text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# ChemPy - A chemistry toolkit for Python
#
# Copyright (c) 2010 by Joshua W. Allen (jwallen@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains an implementation of a graph data structure (the
:class:`Graph` class) and functions for manipulating that graph, including
efficient isomorphism functions.
"""
import cython
import logging
################################################################################
class Vertex(object):
"""
A base class for vertices in a graph. Contains several connectivity values
useful for accelerating isomorphism searches, as proposed by
`Morgan (1965) <http://dx.doi.org/10.1021/c160017a018>`_.
================== ========================================================
Attribute Description
================== ========================================================
`connectivity1` The number of nearest neighbors
`connectivity2` The sum of the neighbors' `connectivity1` values
`connectivity3` The sum of the neighbors' `connectivity2` values
`sortingLabel` An integer used to sort the vertices
================== ========================================================
"""
def __init__(self):
self.resetConnectivityValues()
def equivalent(self, other):
"""
Return :data:`True` if two vertices `self` and `other` are semantically
equivalent, or :data:`False` if not. You should reimplement this
function in a derived class if your vertices have semantic information.
"""
return True
def isSpecificCaseOf(self, other):
"""
Return ``True`` if `self` is semantically more specific than `other`,
or ``False`` if not. You should reimplement this function in a derived
class if your edges have semantic information.
"""
return True
def resetConnectivityValues(self):
"""
Reset the cached structure information for this vertex.
"""
self.connectivity1 = -1
self.connectivity2 = -1
self.connectivity3 = -1
self.sortingLabel = -1
def getVertexConnectivityValue(vertex):
"""
Return a value used to sort vertices prior to poposing candidate pairs in
:meth:`__VF2_pairs`. The value returned is based on the vertex's
connectivity values (and assumes that they are set properly).
"""
return ( -256*vertex.connectivity1 - 16*vertex.connectivity2 - vertex.connectivity3 )
def getVertexSortingLabel(vertex):
"""
Return a value used to sort vertices prior to poposing candidate pairs in
:meth:`__VF2_pairs`. The value returned is based on the vertex's
connectivity values (and assumes that they are set properly).
"""
return vertex.sortingLabel
################################################################################
class Edge(object):
"""
A base class for edges in a graph. This class does *not* store the vertex
pair that comprises the edge; that functionality would need to be included
in the derived class.
"""
def __init__(self):
pass
def equivalent(self, other):
"""
Return ``True`` if two edges `self` and `other` are semantically
equivalent, or ``False`` if not. You should reimplement this
function in a derived class if your edges have semantic information.
"""
return True
def isSpecificCaseOf(self, other):
"""
Return ``True`` if `self` is semantically more specific than `other`,
or ``False`` if not. You should reimplement this function in a derived
class if your edges have semantic information.
"""
return True
################################################################################
class Graph:
"""
A graph data type. The vertices of the graph are stored in a list
`vertices`; this provides a consistent traversal order. The edges of the
graph are stored in a dictionary of dictionaries `edges`. A single edge can
be accessed using ``graph.edges[vertex1][vertex2]`` or the :meth:`getEdge`
method; in either case, an exception will be raised if the edge does not
exist. All edges of a vertex can be accessed using ``graph.edges[vertex]``
or the :meth:`getEdges` method.
"""
def __init__(self, vertices=None, edges=None):
self.vertices = vertices or []
self.edges = edges or {}
def addVertex(self, vertex):
"""
Add a `vertex` to the graph. The vertex is initialized with no edges.
"""
self.vertices.append(vertex)
self.edges[vertex] = dict()
return vertex
def addEdge(self, vertex1, vertex2, edge):
"""
Add an `edge` to the graph as an edge connecting the two vertices
`vertex1` and `vertex2`.
"""
self.edges[vertex1][vertex2] = edge
self.edges[vertex2][vertex1] = edge
return edge
def getEdges(self, vertex):
"""
Return a list of the edges involving the specified `vertex`.
"""
return self.edges[vertex]
def getEdge(self, vertex1, vertex2):
"""
Returns the edge connecting vertices `vertex1` and `vertex2`.
"""
return self.edges[vertex1][vertex2]
def hasVertex(self, vertex):
"""
Returns ``True`` if `vertex` is a vertex in the graph, or ``False`` if
not.
"""
return vertex in self.vertices
def hasEdge(self, vertex1, vertex2):
"""
Returns ``True`` if vertices `vertex1` and `vertex2` are connected
by an edge, or ``False`` if not.
"""
return vertex2 in self.edges[vertex1] if vertex1 in self.edges else False
def removeVertex(self, vertex):
"""
Remove `vertex` and all edges associated with it from the graph. Does
not remove vertices that no longer have any edges as a result of this
removal.
"""
for vertex2 in self.vertices:
if vertex2 is not vertex:
if vertex in self.edges[vertex2]:
del self.edges[vertex2][vertex]
del self.edges[vertex]
self.vertices.remove(vertex)
def removeEdge(self, vertex1, vertex2):
"""
Remove the edge having vertices `vertex1` and `vertex2` from the graph.
Does not remove vertices that no longer have any edges as a result of
this removal.
"""
del self.edges[vertex1][vertex2]
del self.edges[vertex2][vertex1]
def copy(self, deep=False):
"""
Create a copy of the current graph. If `deep` is ``True``, a deep copy
is made: copies of the vertices and edges are used in the new graph.
If `deep` is ``False`` or not specified, a shallow copy is made: the
original vertices and edges are used in the new graph.
"""
other = cython.declare(Graph)
other = Graph()
for vertex in self.vertices:
other.addVertex(vertex.copy() if deep else vertex)
for vertex1 in self.vertices:
for vertex2 in self.edges[vertex1]:
if deep:
index1 = self.vertices.index(vertex1)
index2 = self.vertices.index(vertex2)
other.addEdge(other.vertices[index1], other.vertices[index2],
self.edges[vertex1][vertex2].copy())
else:
other.addEdge(vertex1, vertex2, self.edges[vertex1][vertex2])
return other
def merge(self, other):
"""
Merge two graphs so as to store them in a single Graph object.
"""
# Create output graph
new = cython.declare(Graph)
new = Graph()
# Add vertices to output graph
for vertex in self.vertices:
new.addVertex(vertex)
for vertex in other.vertices:
new.addVertex(vertex)
# Add edges to output graph
for v1 in self.vertices:
for v2 in self.edges[v1]:
new.edges[v1][v2] = self.edges[v1][v2]
for v1 in other.vertices:
for v2 in other.edges[v1]:
new.edges[v1][v2] = other.edges[v1][v2]
return new
def split(self):
"""
Convert a single Graph object containing two or more unconnected graphs
into separate graphs.
"""
# Create potential output graphs
new1 = cython.declare(Graph)
new2 = cython.declare(Graph)
verticesToMove = cython.declare(list)
index = cython.declare(cython.int)
new1 = self.copy()
new2 = Graph()
if len(self.vertices) == 0:
return [new1]
# Arbitrarily choose last atom as starting point
verticesToMove = [ self.vertices[-1] ]
# Iterate until there are no more atoms to move
index = 0
while index < len(verticesToMove):
for v2 in self.edges[verticesToMove[index]]:
if v2 not in verticesToMove:
verticesToMove.append(v2)
index += 1
# If all atoms are to be moved, simply return new1
if len(new1.vertices) == len(verticesToMove):
return [new1]
# Copy to new graph
for vertex in verticesToMove:
new2.addVertex(vertex)
for v1 in verticesToMove:
for v2, edge in new1.edges[v1].iteritems():
new2.edges[v1][v2] = edge
# Remove from old graph
for v1 in new2.vertices:
for v2 in new2.edges[v1]:
if v1 in verticesToMove and v2 in verticesToMove:
del new1.edges[v1][v2]
for vertex in verticesToMove:
new1.removeVertex(vertex)
new = [new2]
new.extend(new1.split())
return new
def resetConnectivityValues(self):
"""
Reset any cached connectivity information. Call this method when you
have modified the graph.
"""
vertex = cython.declare(Vertex)
for vertex in self.vertices: vertex.resetConnectivityValues()
def updateConnectivityValues(self):
"""
Update the connectivity values for each vertex in the graph. These are
used to accelerate the isomorphism checking.
"""
cython.declare(count=cython.short, edges=dict)
cython.declare(vertex1=Vertex, vertex2=Vertex)
assert str(self.__class__) != 'chempy.molecule.Molecule' or not self.implicitHydrogens, "%s has implicit hydrogens" % self
for vertex1 in self.vertices:
count = len(self.edges[vertex1])
vertex1.connectivity1 = count
for vertex1 in self.vertices:
count = 0
edges = self.edges[vertex1]
for vertex2 in edges: count += vertex2.connectivity1
vertex1.connectivity2 = count
for vertex1 in self.vertices:
count = 0
edges = self.edges[vertex1]
for vertex2 in edges: count += vertex2.connectivity2
vertex1.connectivity3 = count
def sortVertices(self):
"""
Sort the vertices in the graph. This can make certain operations, e.g.
the isomorphism functions, much more efficient.
"""
cython.declare(index=cython.int, vertex=Vertex)
# Only need to conduct sort if there is an invalid sorting label on any vertex
for vertex in self.vertices:
if vertex.sortingLabel < 0: break
else:
return
self.vertices.sort(key=getVertexConnectivityValue)
for index, vertex in enumerate(self.vertices):
vertex.sortingLabel = index
def isIsomorphic(self, other, initialMap=None):
"""
Returns :data:`True` if two graphs are isomorphic and :data:`False`
otherwise. Uses the VF2 algorithm of Vento and Foggia.
"""
ismatch, mapList = VF2_isomorphism(self, other, subgraph=False, findAll=False, initialMap=initialMap)
return ismatch
def findIsomorphism(self, other, initialMap=None):
"""
Returns :data:`True` if `other` is subgraph isomorphic and :data:`False`
otherwise, and the matching mapping.
Uses the VF2 algorithm of Vento and Foggia.
"""
return VF2_isomorphism(self, other, subgraph=False, findAll=True, initialMap=initialMap)
def isSubgraphIsomorphic(self, other, initialMap=None):
"""
Returns :data:`True` if `other` is subgraph isomorphic and :data:`False`
otherwise. Uses the VF2 algorithm of Vento and Foggia.
"""
ismatch, mapList = VF2_isomorphism(self, other, subgraph=True, findAll=False, initialMap=initialMap)
return ismatch
def findSubgraphIsomorphisms(self, other, initialMap=None):
"""
Returns :data:`True` if `other` is subgraph isomorphic and :data:`False`
otherwise. Also returns the lists all of valid mappings.
Uses the VF2 algorithm of Vento and Foggia.
"""
return VF2_isomorphism(self, other, subgraph=True, findAll=True, initialMap=initialMap)
def isCyclic(self):
"""
Return :data:`True` if one or more cycles are present in the structure
and :data:`False` otherwise.
"""
for vertex in self.vertices:
if self.isVertexInCycle(vertex):
return True
return False
def isVertexInCycle(self, vertex):
"""
Return :data:`True` if `vertex` is in one or more cycles in the graph,
or :data:`False` if not.
"""
chain = cython.declare(list)
chain = [vertex]
return self.__isChainInCycle(chain)
def isEdgeInCycle(self, vertex1, vertex2):
"""
Return :data:`True` if the edge between vertices `vertex1` and `vertex2`
is in one or more cycles in the graph, or :data:`False` if not.
"""
cycle_list = self.getAllCycles(vertex1)
for cycle in cycle_list:
if vertex2 in cycle:
return True
return False
def __isChainInCycle(self, chain):
"""
Is the `chain` in a cycle?
Returns True/False.
Recursively calls itself
"""
# Note that this function no longer returns the cycle; just True/False
vertex2 = cython.declare(Vertex)
edge = cython.declare(Edge)
found = cython.declare(cython.bint)
for vertex2, edge in self.edges[chain[-1]].iteritems():
if vertex2 is chain[0] and len(chain) > 2:
return True
elif vertex2 not in chain:
# make the chain a little longer and explore again
chain.append(vertex2)
found = self.__isChainInCycle(chain)
if found: return True
# didn't find a cycle down this path (-vertex2),
# so remove the vertex from the chain
chain.remove(vertex2)
return False
def getAllCycles(self, startingVertex):
"""
Given a starting vertex, returns a list of all the cycles containing
that vertex.
"""
chain = cython.declare(list)
cycleList = cython.declare(list)
cycleList=list()
chain = [startingVertex]
#chainLabels=range(len(self.keys()))
#print "Starting at %s in graph: %s"%(self.keys().index(startingVertex),chainLabels)
cycleList = self.__exploreCyclesRecursively(chain, cycleList)
return cycleList
def __exploreCyclesRecursively(self, chain, cycleList):
"""
Finds cycles by spidering through a graph.
Give it a chain of atoms that are connected, `chain`,
and a list of cycles found so far `cycleList`.
If `chain` is a cycle, it is appended to `cycleList`.
Then chain is expanded by one atom (in each available direction)
and the function is called again. This recursively spiders outwards
from the starting chain, finding all the cycles.
"""
vertex2 = cython.declare(Vertex)
edge = cython.declare(Edge)
# chainLabels = cython.declare(list)
# chainLabels=[self.keys().index(v) for v in chain]
# print "found %d so far. Chain=%s"%(len(cycleList),chainLabels)
for vertex2, edge in self.edges[chain[-1]].iteritems():
# vertex2 will loop through each of the atoms
# that are bonded to the last atom in the chain.
if vertex2 is chain[0] and len(chain) > 2:
# it is the first atom in the chain - so the chain IS a cycle!
cycleList.append(chain[:])
elif vertex2 not in chain:
# make the chain a little longer and explore again
chain.append(vertex2)
cycleList = self.__exploreCyclesRecursively(chain, cycleList)
# any cycles down this path (-vertex2) have now been found,
# so remove the vertex from the chain
chain.pop(-1)
return cycleList
def getSmallestSetOfSmallestRings(self):
"""
Return a list of the smallest set of smallest rings in the graph. The
algorithm implements was adapted from a description by Fan, Panaye,
Doucet, and Barbu (doi: 10.1021/ci00015a002)
B. T. Fan, A. Panaye, J. P. Doucet, and A. Barbu. "Ring Perception: A
New Algorithm for Directly Finding the Smallest Set of Smallest Rings
from a Connection Table." *J. Chem. Inf. Comput. Sci.* **33**,
p. 657-662 (1993).
"""
graph = cython.declare(Graph)
done = cython.declare(cython.bint)
verticesToRemove = cython.declare(list)
cycleList = cython.declare(list)
cycles = cython.declare(list)
vertex = cython.declare(Vertex)
rootVertex = cython.declare(Vertex)
found = cython.declare(cython.bint)
cycle = cython.declare(list)
graphs = cython.declare(list)
# Make a copy of the graph so we don't modify the original
graph = self.copy()
# Step 1: Remove all terminal vertices
done = False
while not done:
verticesToRemove = []
for vertex1, value in graph.edges.iteritems():
if len(value) == 1: verticesToRemove.append(vertex1)
done = len(verticesToRemove) == 0
# Remove identified vertices from graph
for vertex in verticesToRemove:
graph.removeVertex(vertex)
# Step 2: Remove all other vertices that are not part of cycles
verticesToRemove = []
for vertex in graph.vertices:
found = graph.isVertexInCycle(vertex)
if not found:
verticesToRemove.append(vertex)
# Remove identified vertices from graph
for vertex in verticesToRemove:
graph.removeVertex(vertex)
### also need to remove EDGES that are not in ring
# Step 3: Split graph into remaining subgraphs
graphs = graph.split()
# Step 4: Find ring sets in each subgraph
cycleList = []
for graph in graphs:
while len(graph.vertices) > 0:
# Choose root vertex as vertex with smallest number of edges
rootVertex = None
for vertex in graph.vertices:
if rootVertex is None:
rootVertex = vertex
elif len(graph.edges[vertex]) < len(graph.edges[rootVertex]):
rootVertex = vertex
# Get all cycles involving the root vertex
cycles = graph.getAllCycles(rootVertex)
if len(cycles) == 0:
# this vertex is no longer in a ring.
# remove all its edges
neighbours = graph.edges[rootVertex].keys()[:]
for vertex2 in neighbours:
graph.removeEdge(rootVertex, vertex2)
# then remove it
graph.removeVertex(rootVertex)
#print("Removed vertex that's no longer in ring")
continue # (pick a new root Vertex)
# raise Exception('Did not find expected cycle!')
# Keep the smallest of the cycles found above
cycle = cycles[0]
for c in cycles[1:]:
if len(c) < len(cycle):
cycle = c
cycleList.append(cycle)
# Remove from the graph all vertices in the cycle that have only two edges
verticesToRemove = []
for vertex in cycle:
if len(graph.edges[vertex]) <= 2:
verticesToRemove.append(vertex)
if len(verticesToRemove) == 0:
# there are no vertices in this cycle that with only two edges
# Remove edge between root vertex and any one vertex it is connected to
graph.removeEdge(rootVertex, graph[rootVertex].keys()[0])
else:
for vertex in verticesToRemove:
graph.removeVertex(vertex)
return cycleList
################################################################################
def VF2_isomorphism(graph1, graph2, subgraph=False, findAll=False, initialMap=None):
"""
Determines if two :class:`Graph` objects `graph1` and `graph2` are
isomorphic. A number of options affect how the isomorphism check is
performed:
* If `subgraph` is ``True``, the isomorphism function will treat `graph2`
as a subgraph of `graph1`. In this instance a subgraph can either mean a
smaller graph (i.e. fewer vertices and/or edges) or a less specific graph.
* If `findAll` is ``True``, all valid isomorphisms will be found and
returned; otherwise only the first valid isomorphism will be returned.
* The `initialMap` parameter can be used to pass a previously-established
mapping. This mapping will be preserved in all returned valid
isomorphisms.
The isomorphism algorithm used is the VF2 algorithm of Vento and Foggia.
The function returns a boolean `isMatch` indicating whether or not one or
more valid isomorphisms have been found, and a list `mapList` of the valid
isomorphisms, each consisting of a dictionary mapping from vertices of
`graph1` to corresponding vertices of `graph2`.
"""
cython.declare(isMatch=cython.bint, map12List=list, map21List=list)
cython.declare(terminals1=list, terminals2=list, callDepth=cython.int)
cython.declare(vert=Vertex)
map21List = list()
# Some quick initial checks to avoid using the full algorithm if the
# graphs are obviously not isomorphic (based on graph size)
if not subgraph:
if len(graph2.vertices) != len(graph1.vertices):
# The two graphs don't have the same number of vertices, so they
# cannot be isomorphic
return False, map21List
elif len(graph1.vertices) == len(graph2.vertices) == 0:
logging.warning("Tried matching empty graphs (returning True)")
# The two graphs don't have any vertices; this means they are
# trivially isomorphic
return True, map21List
else:
if len(graph2.vertices) > len(graph1.vertices):
# The second graph has more vertices than the first, so it cannot be
# a subgraph of the first
return False, map21List
if initialMap is None: initialMap = {}
map12List = list()
# Initialize callDepth with the size of the largest graph
# Each recursive call to __VF2_match will decrease it by one;
# when the whole graph has been explored, it should reach 0
# It should never go below zero!
callDepth = min(len(graph1.vertices), len(graph2.vertices)) - len(initialMap)
# Sort the vertices in each graph to make the isomorphism more efficient
graph1.sortVertices()
graph2.sortVertices()
# Generate initial mapping pairs
# map21 = map to 2 from 1
# map12 = map to 1 from 2
map21 = initialMap
map12 = dict([(v,k) for k,v in initialMap.iteritems()])
# Generate an initial set of terminals
terminals1 = __VF2_terminals(graph1, map21)
terminals2 = __VF2_terminals(graph2, map12)
isMatch = __VF2_match(graph1, graph2, map21, map12, \
terminals1, terminals2, subgraph, findAll, map21List, map12List, callDepth)
if findAll:
return len(map21List) > 0, map21List
else:
return isMatch, map21
def __VF2_feasible(graph1, graph2, vertex1, vertex2, map21, map12, terminals1,
terminals2, subgraph):
"""
Returns :data:`True` if two vertices `vertex1` and `vertex2` from graphs
`graph1` and `graph2`, respectively, are feasible matches. `mapping21` and
`mapping12` are the current state of the mapping from `graph1` to `graph2`
and vice versa, respectively. `terminals1` and `terminals2` are lists of
the vertices that are directly connected to the already-mapped vertices.
`subgraph` is :data:`True` if graph2 is to be treated as a potential
subgraph of graph1. i.e. graph1 is a specific case of graph2.
Uses the VF2 algorithm of Vento and Foggia. The feasibility is assessed
through a series of semantic and structural checks. Only the combination
of the semantic checks and the level 0 structural check are both
necessary and sufficient to ensure feasibility. (This does *not* mean that
vertex1 and vertex2 are always a match, although the level 1 and level 2
checks preemptively eliminate a number of false positives.)
"""
cython.declare(vert1=Vertex, vert2=Vertex, edge1=Edge, edge2=Edge, edges1=dict, edges2=dict)
cython.declare(i=cython.int)
cython.declare(term1Count=cython.int, term2Count=cython.int, neither1Count=cython.int, neither2Count=cython.int)
if not subgraph:
# To be feasible the connectivity values must be an exact match
if vertex1.connectivity1 != vertex2.connectivity1: return False
if vertex1.connectivity2 != vertex2.connectivity2: return False
if vertex1.connectivity3 != vertex2.connectivity3: return False
# Semantic check #1: vertex1 and vertex2 must be equivalent
if subgraph:
if not vertex1.isSpecificCaseOf(vertex2): return False
else:
if not vertex1.equivalent(vertex2): return False
# Get edges adjacent to each vertex
edges1 = graph1.edges[vertex1]
edges2 = graph2.edges[vertex2]
# Semantic check #2: adjacent vertices to vertex1 and vertex2 that are
# already mapped should be connected by equivalent edges
for vert2 in edges2:
if vert2 in map12:
vert1 = map12[vert2]
if not vert1 in edges1: # atoms not joined in graph1
return False
edge1 = edges1[vert1]
edge2 = edges2[vert2]
if subgraph:
if not edge1.isSpecificCaseOf(edge2): return False
else: # exact match required
if not edge1.equivalent(edge2): return False
# there could still be edges in graph1 that aren't in graph2.
# this is ok for subgraph matching, but not for exact matching
if not subgraph:
for vert1 in edges1:
if vert1 in map21:
vert2 = map21[vert1]
if not vert2 in edges2: return False
# Count number of terminals adjacent to vertex1 and vertex2
term1Count = 0; term2Count = 0; neither1Count = 0; neither2Count = 0
for vert1 in edges1:
if vert1 in terminals1: term1Count += 1
elif vert1 not in map21: neither1Count += 1
for vert2 in edges2:
if vert2 in terminals2: term2Count += 1
elif vert2 not in map12: neither2Count += 1
# Level 2 look-ahead: the number of adjacent vertices of vertex1 and
# vertex2 that are non-terminals must be equal
if subgraph:
if neither1Count < neither2Count: return False
else:
if neither1Count != neither2Count: return False
# Level 1 look-ahead: the number of adjacent vertices of vertex1 and
# vertex2 that are terminals must be equal
if subgraph:
if term1Count < term2Count: return False
else:
if term1Count != term2Count: return False
# Level 0 look-ahead: all adjacent vertices of vertex2 already in the
# mapping must map to adjacent vertices of vertex1
for vert2 in edges2:
if vert2 in map12:
vert1 = map12[vert2]
if vert1 not in edges1: return False
# Also, all adjacent vertices of vertex1 already in the mapping must map to
# adjacent vertices of vertex2, unless we are subgraph matching
if not subgraph:
for vert1 in edges1:
if vert1 in map21:
vert2 = map21[vert1]
if vert2 not in edges2: return False
# All of our tests have been passed, so the two vertices are a feasible
# pair
return True
def __VF2_match(graph1, graph2, map21, map12, terminals1, terminals2, subgraph,
findAll, map21List, map12List, callDepth):
"""
A recursive function used to explore two graphs `graph1` and `graph2` for
isomorphism by attempting to map them to one another. `mapping21` and
`mapping12` are the current state of the mapping from `graph1` to `graph2`
and vice versa, respectively. `terminals1` and `terminals2` are lists of
the vertices that are directly connected to the already-mapped vertices.
`subgraph` is :data:`True` if graph2 is to be treated as a potential
subgraph of graph1. i.e. graph1 is a specific case of graph2.
If findAll=True then it adds valid mappings to map21List and
map12List, but returns False when done (or True if the initial mapping is complete)
Uses the VF2 algorithm of Vento and Foggia, which is O(N) in spatial complexity
and O(N**2) (best-case) to O(N! * N) (worst-case) in temporal complexity.
"""
cython.declare(vertices1=list, new_terminals1=list, new_terminals2=list)
cython.declare(vertex1=Vertex, vertex2=Vertex)
cython.declare(ismatch=cython.bint)
# Make sure we don't get cause in an infinite recursive loop
if callDepth < 0:
logging.error("Recursing too deep. Now %d" % callDepth)
if callDepth < -100:
raise Exception("Recursing infinitely deep!")
# Done if we have mapped to all vertices in graph
if callDepth == 0:
if not subgraph:
assert len(map21) == len(graph1.vertices), \
"Calldepth mismatch: callDepth = %g, len(map21) = %g, len(map12) = %g, len(graph1.vertices) = %g, len(graph2.vertices) = %g" % (callDepth, len(map21), len(map12), len(graph1.vertices), len(graph2.vertices))
if findAll:
map21List.append(map21.copy())
map12List.append(map12.copy())
return True
else:
assert len(map12) == len(graph2.vertices), \
"Calldepth mismatch: callDepth = %g, len(map21) = %g, len(map12) = %g, len(graph1.vertices) = %g, len(graph2.vertices) = %g" % (callDepth, len(map21), len(map12), len(graph1.vertices), len(graph2.vertices))
if findAll:
map21List.append(map21.copy())
map12List.append(map12.copy())
return True
# Create list of pairs of candidates for inclusion in mapping
# Note that the extra Python overhead is not worth making this a standalone
# method, so we simply put it inline here
# If we have terminals for both graphs, then use those as a basis for the
# pairs
if len(terminals1) > 0 and len(terminals2) > 0:
vertices1 = terminals1
vertex2 = terminals2[0]
# Otherwise construct list from all *remaining* vertices (not matched)
else:
# vertex2 is the lowest-labelled un-mapped vertex from graph2
# Note that this assumes that graph2.vertices is properly sorted
vertices1 = []
for vertex1 in graph1.vertices:
if vertex1 not in map21:
vertices1.append(vertex1)
for vertex2 in graph2.vertices:
if vertex2 not in map12:
break
else:
raise Exception("Could not find a pair to propose!")
for vertex1 in vertices1:
# propose a pairing
if __VF2_feasible(graph1, graph2, vertex1, vertex2, map21, map12, \
terminals1, terminals2, subgraph):
# Update mapping accordingly
map21[vertex1] = vertex2
map12[vertex2] = vertex1
# update terminals
new_terminals1 = __VF2_updateTerminals(graph1, map21, terminals1, vertex1)
new_terminals2 = __VF2_updateTerminals(graph2, map12, terminals2, vertex2)
# Recurse
ismatch = __VF2_match(graph1, graph2, \
map21, map12, new_terminals1, new_terminals2, subgraph, findAll, \
map21List, map12List, callDepth-1)
if ismatch:
if not findAll:
return True
# Undo proposed match
del map21[vertex1]
del map12[vertex2]
# changes to 'new_terminals' will be discarded and 'terminals' is unchanged
return False
def __VF2_terminals(graph, mapping):
"""
For a given graph `graph` and associated partial mapping `mapping`,
generate a list of terminals, vertices that are directly connected to
vertices that have already been mapped.
List is sorted (using key=__getSortLabel) before returning.
"""
cython.declare(terminals=list)
terminals = list()
for vertex2 in graph.vertices:
if vertex2 not in mapping:
for vertex1 in mapping:
if vertex2 in graph.edges[vertex1]:
terminals.append(vertex2)
break
return terminals
def __VF2_updateTerminals(graph, mapping, old_terminals, new_vertex):
"""
For a given graph `graph` and associated partial mapping `mapping`,
*updates* a list of terminals, vertices that are directly connected to
vertices that have already been mapped. You have to pass it the previous
list of terminals `old_terminals` and the vertex `vertex` that has been
added to the mapping. Returns a new *copy* of the terminals.
"""
cython.declare(terminals=list, vertex1=Vertex, vertex2=Vertex, edges=dict)
cython.declare(i=cython.int, sorting_label=cython.short, sorting_label2=cython.short)
# Copy the old terminals, leaving out the new_vertex
terminals = old_terminals[:]
if new_vertex in terminals: terminals.remove(new_vertex)
# Add the terminals of new_vertex
edges = graph.edges[new_vertex]
for vertex1 in edges:
if vertex1 not in mapping: # only add if not already mapped
# find spot in the sorted terminals list where we should put this vertex
sorting_label = vertex1.sortingLabel
i=0; sorting_label2=-1 # in case terminals list empty
for i in range(len(terminals)):
vertex2 = terminals[i]
sorting_label2 = vertex2.sortingLabel
if sorting_label2 >= sorting_label:
break
# else continue going through the list of terminals
else: # got to end of list without breaking,
# so add one to index to make sure vertex goes at end
i+=1
if sorting_label2 == sorting_label: # this vertex already in terminals.
continue # try next vertex in graph[new_vertex]
# insert vertex in right spot in terminals
terminals.insert(i,vertex1)
return terminals
################################################################################
| jwallen/ChemPy | chempy/graph.py | Python | mit | 37,438 | [
"ChemPy"
] | e50c16bcbfeef23ea110806071a5087ba26c20fed45589f5ac37ce104051713c |
import numpy as np
import scipy.linalg as la
from dora.regressors.gp import linalg
from dora.regressors.gp import types
from time import time
def alpha(Y, L):
result = la.solve_triangular(
L.T, la.solve_triangular(L,Y, lower=True, check_finite=False),
check_finite=False)
return result
# Mux and demux are mainly used for multi-task problems
# They convert lists to tagged vectors and vice versa
def mux(X_list, y_list=None):
demuxinfo = [x.shape[0] for x in X_list]
X = np.vstack(X_list)
label = np.concatenate([d*np.ones(demuxinfo[d])
for d in range(len(X_list))])
X = np.hstack((X, label[:,np.newaxis]))
if y_list is None:
return X
else:
y = np.concatenate(y_list)
return X,y
def demux(y, X):
n_tasks = int(X[-1,-1]) +1
Xv = X[:,:-1]
label = X[:,-1]
#X_list = [Xv[label==i] for i in range(n_tasks)]
y_list = [y[label==i] for i in range(n_tasks)]
return y_list
def noise_vector(X, noise_params):
if type(X) is list:
# multi-task
result = np.concatenate([noise_params[i]*np.ones(X[i].shape[0])
for i in range(len(X))])
else:
# single task
result = np.ones(X.shape[0])*noise_params[0]
assert(result.ndim == 1)
return result
def mean(regressor, query):
return np.dot(query.K_xxs.T, regressor.alpha)
def covariance(regressor, query):
K_xs = regressor.kernel(query.Xs, query.Xs) # matrix
v = la.solve_triangular(regressor.L, query.K_xxs,
lower=True, check_finite=False)
return K_xs - np.dot(v.T, v)
def variance(regressor, query):
K_xs = regressor.kernel(query.Xs, None) # vector
v = la.solve_triangular(regressor.L, query.K_xxs,
lower=True, check_finite=False)
result = K_xs - np.sum(v**2, axis=0)
return result
def query(Xs, p):
K_xxs = p.kernel(p.X, Xs)
return types.QueryParams(Xs, K_xxs)
# # TODO: These are actually draws from a Gaussian and more general than a GP
# def draws(ndraws, mean, cov):
# L_s = linalg.jitchol(cov)
# draws = []
# for i in range(ndraws):
# norms = np.random.normal(loc=0.0,scale=1.0,size=(mean.shape[0],1))
# c = np.dot(L_s, norms).ravel()
# d = c + mean
# draws.append(d)
# return draws
# Proposed change to the above 'draws' function -Kelvin
def draws(ndraws, exp, cov):
# S: Standard Draw
# C: Transform to include covariance
# D: Transform to include expectance
L = linalg.jitchol(cov)
S = np.random.normal(loc = 0.0, scale = 1.0, size = (exp.shape[0], ndraws))
C = np.dot(L, S)
D = (C.T + exp)
return D
| NICTA/dora | dora/regressors/gp/predict.py | Python | apache-2.0 | 2,721 | [
"Gaussian"
] | 419e7e860a071c5d32a590d76279bfb5152de4b2f39a38c04cde8779a2c0e25f |
"""
System class for biomolecules using AMBER ff.
Set up using prmtop and inpcrd files used in Amber GMIN and Optim.
Potential parameters (e.g. non-bonded cut-offs are set in
TODO:
Parameters
----------
prmtopFname : str
prmtop file name
inpcrdFname : str
inpcrd file name
See Also
--------
BaseSystem
"""
# utils
import numpy as np
import tempfile
import os
import shutil
# pygmin
from pygmin.systems import BaseSystem
from pygmin.mindist import ExactMatchAtomicCluster, MinPermDistAtomicCluster
from pygmin.transition_states import orthogopt
from pygmin.transition_states import InterpolatedPathDensity, NEB, create_NEB
from pygmin.landscape import smoothPath
from pygmin.systems import BaseParameters
from pygmin.utils.elements import elements
from pygmin.systems.spawn_OPTIM import SpawnOPTIM
# OpenMM related
import openmm_potential
from simtk.unit import angstrom as openmm_angstrom
__all__ = ["AMBERSystem"]
class AMBERSystem(BaseSystem):
def __init__(self, prmtopFname, inpcrdFname):
super(AMBERSystem, self).__init__()
if os.path.exists('min.in') and os.path.exists('data') :
# print '\nFiles min.in and data found. trying to import ambgmin_ now ..'
try:
import ambgmin_
import gmin_potential
self.potential = gmin_potential.GMINAmberPotential(prmtopFname, inpcrdFname)
print '\namberSystem> Using GMIN Amber potential ..'
except ImportError:
# using OpenMM because ambgmin_ could not be imported
print '\namberSystem> Using OpenMM amber potential because ambgmin_ not imported ..'
self.potential = openmm_potential.OpenMMAmberPotential(prmtopFname, inpcrdFname)
else:
# using OpenMM because min.in and data files not found
print '\namberSystem> Using OpenMM amber potential ..'
self.potential = openmm_potential.OpenMMAmberPotential(prmtopFname, inpcrdFname)
# check for openmm version
# data structures changed between openmm4 and 5
# crude check - todo
if hasattr(self.potential.prmtop.topology._bonds,'index'):
self.OpenMMVer = 5
else:
self.OpenMMVer = 4
self.set_params(self.params)
self.natoms = self.potential.prmtop.topology._numAtoms
self.params.database.accuracy = 1e-3
self.params.basinhopping["temperature"] = 1.
self.params.takestep_random_displacement = BaseParameters()
self.params.takestep_random_displacement.stepsize = 2.
self.prmtopFname = prmtopFname
self.inpcrdFname = inpcrdFname
# atom numbers of peptide bonds
self.populate_peptideAtomList()
# atom numbers of CA neighbors
self.populate_CAneighborList()
self.params.basinhopping.insert_rejected = False
self.sanitycheck = True # todo: this should be part of params and show up in GUI
if self.sanitycheck:
# self.params.basinhopping.confCheck = [self.check_cistrans_wrapper, self.check_CAchirality_wrapper]
self.params.basinhopping.confCheck = [self.check_CAchirality_wrapper]
self.params.double_ended_connect.conf_checks = [self.check_cistrans_wrapper_kwargs, self.check_CAchirality_wrapper_kwargs]
def set_params(self, params):
"""set default parameters for the system"""
#set NEBparams
NEBparams = params.double_ended_connect.local_connect_params.NEBparams
NEBparams.NEBquenchParams = BaseParameters()
# NEBquenchParams = NEBparams.NEBquenchParams
NEBparams.iter_density = 15.
NEBparams.image_density = 3.5
NEBparams.max_images = 50
NEBparams.k = 100.
NEBparams.adjustk_freq = 5
if False: #use fire
from pygmin.optimize import fire
NEBparams.quenchRoutine = fire
else: #use lbfgs
NEBparams.NEBquenchParams.maxErise = 100.5
NEBparams.NEBquenchParams.maxstep = .1
NEBparams.NEBquenchParams.tol = 1e-2
NEBparams.reinterpolate = 50
NEBparams.adaptive_niter = True
NEBparams.adaptive_nimages = True
NEBparams.adjustk_freq = 50
#set transition state search params
tsSearchParams = params.double_ended_connect.local_connect_params.tsSearchParams
tsSearchParams.nsteps = 200
tsSearchParams.lowestEigenvectorQuenchParams["nsteps"] = 100
tsSearchParams.lowestEigenvectorQuenchParams["tol"] = 0.001
tsSearchParams.tangentSpaceQuenchParams["maxstep"] = .1
tsSearchParams.nfail_max = 1000
tsSearchParams.nsteps_tangent1 = 5
tsSearchParams.nsteps_tangent2 = 100
tsSearchParams.max_uphill_step = .3
#control the output
tsSearchParams.verbosity = 0
NEBparams.NEBquenchParams.iprint = 50
tsSearchParams.lowestEigenvectorQuenchParams["iprint"] = -50
tsSearchParams.tangentSpaceQuenchParams["iprint"] = -5
tsSearchParams["iprint"] = 10
# self.params.double_ended_connect.local_connect_params.pushoff_params.verbose = True
# self.params.double_ended_connect.local_connect_params.pushoff_params.stepmin = 1e-3
# self.params.double_ended_connect.local_connect_params.pushoff_params.gdiff = 100.
# #self.params.double_ended_connect.local_connect_params.pushoff_params.quenchRoutine = fire
def __call__(self):
return self
def get_potential(self):
return self.potential
def get_random_configuration(self):
"""a starting point for basinhopping, etc."""
from simtk.openmm.app import pdbfile as openmmpdbReader
pdb = openmmpdbReader.PDBFile('coords.pdb') # todo: coords.pdb is hardcoded
coords = pdb.getPositions() / openmm_angstrom
coords = np.reshape(np.transpose(coords), 3*len(coords), 1)
return coords
def get_metric_tensor(self, coords):
"""metric tensor for all masses m_i=1.0 """
print 'amberSystem> setting up mass matrix for normal modes'
# return np.identity(coords.size)
massMatrix_tmp = np.identity(coords.size)
# get masses from 'elements' file
for i in self.potential.prmtop.topology.atoms():
atomNum = i.index
atomElem = i.name[0] # assuming elements corresponding to first character of atom name
m = elements[atomElem]['mass']
massMatrix_tmp[atomNum][atomNum] = 1/m
return massMatrix_tmp
def get_permlist(self):
import pdb2permlist
#return [[0, 2, 3], [11, 12, 13], [19, 20, 21] ] # aladipep
#return [[0, 2, 3], [11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43], [49,50,51]] # tetraala
if os.path.exists('coordsModTerm.pdb'):
print '\namberSystem> constructing perm list from coordsModTerm.pdb'
print ' (see comments in amberPDB_to_permList.py)'
plist = pdb2permlist.pdb2permList('coordsModTerm.pdb')
print '\namberSystem> Groups of permutable atoms (atom numbers start at 0) = '
for i in plist:
print i
return plist
else:
print 'amberSystem> coordsModTerm.pdb not found. permlist could not be created.'
return []
def get_mindist(self):
permlist = self.get_permlist()
return MinPermDistAtomicCluster(permlist=permlist, niter=10, can_invert=False)
# def createNEB(self, coords1, coords2):
# pot = self.get_potential()
# NEBparams = self.params.double_ended_connect.local_connect_params.NEBparams
# return create_NEB(pot, coords1, coords2, verbose=True, **NEBparams)
def get_orthogonalize_to_zero_eigenvectors(self):
return orthogopt
def get_compare_exact(self, **kwargs):
permlist = self.get_permlist()
return ExactMatchAtomicCluster(permlist=permlist, **kwargs)
def smooth_path(self, path, **kwargs):
mindist = self.get_mindist()
return smoothPath(path, mindist, **kwargs)
def drawCylinder(self, X1, X2):
from OpenGL import GL,GLUT, GLU
z = np.array([0.,0.,1.]) #default cylinder orientation
p = X2-X1 #desired cylinder orientation
r = np.linalg.norm(p)
t = np.cross(z,p) #angle about which to rotate
a = np.arccos( np.dot( z,p) / r ) #rotation angle
a *= (180. / np.pi) #change units to angles
GL.glPushMatrix()
GL.glTranslate( X1[0], X1[1], X1[2] )
GL.glRotate( a, t[0], t[1], t[2] )
g=GLU.gluNewQuadric()
GLU.gluCylinder(g, .1,0.1,r,30,30) #I can't seem to draw a cylinder
GL.glPopMatrix()
def draw(self, coordsl, index):
from OpenGL import GL,GLUT
coords=coordsl.reshape(coordsl.size/3,3)
com=np.mean(coords, axis=0)
# draw atoms as spheres
for i in self.potential.prmtop.topology.atoms():
atomElem = i.name[0]
atomNum = i.index
x = coords[atomNum] - com
GL.glPushMatrix()
GL.glTranslate(x[0],x[1],x[2])
col = elements[atomElem]['color']
if index == 2:
col = [0.5, 1.0, .5]
# scaling down the radius by factor of 5, else the spheres fuse into one another
rad = elements[atomElem]['radius']/5
GL.glMaterialfv(GL.GL_FRONT_AND_BACK, GL.GL_DIFFUSE, col)
GLUT.glutSolidSphere(rad,30,30)
GL.glPopMatrix()
# draw bonds
for atomPairs in self.potential.prmtop.topology.bonds():
# note that atom numbers in topology start at 0
if self.OpenMMVer == 5:
xyz1 = coords[atomPairs[0].index] - com
xyz2 = coords[atomPairs[1].index] - com
else:
xyz1 = coords[atomPairs[0]] - com
xyz2 = coords[atomPairs[1]] - com
self.drawCylinder(xyz1, xyz2)
def load_coords_pymol(self, coordslist, oname, index=1):
"""load the coords into pymol
the new object must be named oname so we can manipulate it later
Parameters
----------
coordslist : list of arrays
oname : str
the new pymol object must be named oname so it can be manipulated
later
index : int
we can have more than one molecule on the screen at one time. index tells
which one to draw. They are viewed at the same time, so should be
visually distinct, e.g. different colors. accepted values are 1 or 2
Notes
-----
the implementation here is a bit hacky. we create a temporary xyz file from coords
and load the molecule in pymol from this file.
"""
#pymol is imported here so you can do, e.g. basinhopping without installing pymol
import pymol
#create the temporary file
suffix = ".pdb"
f = tempfile.NamedTemporaryFile(mode="w", suffix=suffix)
fname = f.name
from simtk.openmm.app import pdbfile as openmmpdb
#write the coords into pdb file
from pygmin.mindist import CoMToOrigin
ct = 0
for coords in coordslist:
ct = ct + 1
coords = CoMToOrigin(coords.copy())
self.potential.copyToLocalCoords(coords)
# openmmpdb.PDBFile.writeFile(self.potential.prmtop.topology , self.potential.localCoords * openmm_angstrom , file=sys.stdout, modelIndex=1)
openmmpdb.PDBFile.writeModel(self.potential.prmtop.topology , self.potential.localCoords * openmm_angstrom , file=f, modelIndex=ct)
print "closing file"
f.flush()
#load the molecule from the temporary file
pymol.cmd.load(fname)
#get name of the object just created and change it to oname
objects = pymol.cmd.get_object_list()
objectname = objects[-1]
pymol.cmd.set_name(objectname, oname)
#set the representation
pymol.cmd.hide("everything", oname)
pymol.cmd.show("lines", oname)
# #set the color according to index
# if index == 1:
# pymol.cmd.color("red", oname)
# else:
# pymol.cmd.color("blue", oname)
def get_optim_spawner(self, coords1, coords2):
import os
from pygmin.config import config
optim = config.get("exec", "AMBOPTIM")
optim = os.path.expandvars(os.path.expanduser(optim))
print "optim executable", optim
return AmberSpawnOPTIM(coords1, coords2, self, OPTIM=optim, tempdir=False)
def populate_peptideAtomList(self):
listofC = []
listofO = []
listofN = []
listofH = []
for i in self.potential.prmtop.topology.atoms():
if i.name == 'C':
listofC.append(i.index)
if i.name == 'O':
listofO.append(i.index)
if i.name == 'N':
listofN.append(i.index)
if i.name == 'H':
listofH.append(i.index)
#print listofC
#print listofO
#print listofN
#print listofH
# atom numbers of peptide bond
self.peptideBondAtoms = []
for i in listofC:
if listofO.__contains__(i+1) and listofN.__contains__(i+2) and listofH.__contains__(i+3):
self.peptideBondAtoms.append([i,i+1,i+2,i+3])
print '\namberSystem> Peptide bond atom numbers (C,O,N,H, in order): '
for i in self.peptideBondAtoms:
print i
def populate_CAneighborList(self):
listofCA = []
listofC = []
listofN = []
listofCB = []
for i in self.potential.prmtop.topology.atoms():
if i.name == 'CA':
listofCA.append(i.index)
if i.name == 'C':
listofC.append(i.index)
if i.name == 'N':
listofN.append(i.index)
if i.name == 'CB':
listofCB.append(i.index)
#print '---amberSystem> list of CA = ' , listofCA
#print '---amberSystem> list of C = ' , listofC
#print '---amberSystem> list of N = ' , listofN
#print '---amberSystem> list of CB = ' , listofCB
# atom numbers of peptide bond
self.CAneighborList = []
for i in listofCA:
# find atoms bonded to CA
neighborlist = []
for b in self.potential.prmtop.topology.bonds():
print b
if b[0] == i:
neighborlist.append(b[1])
if b[1] == i:
neighborlist.append(b[0])
# Commented, since this stuff doesn't seem to work at the moment...
# if self.OpenMMVer == 5 :
# # openmm5
# if b[0].index == i:
# neighborlist.append(b[1].index)
# if b[1].index == i:
# neighborlist.append(b[0].index)
# else: # openmm4
# if b[0].index == i:
# neighborlist.append(b[1].index)
# if b[1].index == i:
# neighborlist.append(b[0].index)
#print '---bonds = ', b[0].index , b[1].index
#print '---amberSystem> atoms bonded to CA ',i, ' = ', neighborlist
nn = [i]
# append C (=O)
for n in neighborlist:
if listofC.__contains__(n):
nn.append(n)
# append CB
for n in neighborlist:
if listofCB.__contains__(n):
nn.append(n)
# append N
for n in neighborlist:
if listofN.__contains__(n):
nn.append(n)
self.CAneighborList.append(nn)
# atoms numbers start at 0
print '\namberSystem> CA neighbors atom numbers (CA,C(=O),CB, N, in order): '
for i in self.CAneighborList:
print i
def check_cistrans_wrapper_kwargs(self, coords=None, **kwargs):
print 'in check_cistrans_wrapper_kwargs'
return self.check_cistrans(coords)
def check_cistrans_wrapper(self, energy, coords, **kwargs):
return self.check_cistrans(coords)
def check_cistrans(self, coords):
"""
Sanity check on the isomer state of peptide bonds
Returns False if the check fails i.e. if any of the peptide bond is CIS
"""
import measure
m = measure.Measure()
isTrans = True
for i in self.peptideBondAtoms:
atNum = i[0]
rC = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[1]
rO = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[2]
rN = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[3]
rH = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
# compute O-C-N-H torsion angle
rad, deg = m.torsion(rO,rC,rN,rH)
# print 'peptide torsion (deg) ', i, ' = ', deg
# check cis
if deg < 90 or deg > 270:
isTrans = False
print 'CIS peptide bond between atoms ', i, ' torsion (deg) = ', deg
return isTrans
def check_CAchirality_wrapper_kwargs(self, coords=None, **kwargs):
return self.check_CAchirality(coords)
def check_CAchirality_wrapper(self, energy, coords, **kwargs):
return self.check_CAchirality(coords)
def check_CAchirality(self, coords):
"""
Sanity check on the CA to check if it is L of D
Returns False if the check fails i.e. if any D-amino acid is present
"""
# print 'in check CA chirality'
import measure
m = measure.Measure()
isL = True
for i in self.CAneighborList:
atNum = i[0]
rCA = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[1]
rC = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[2]
rCB = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
atNum = i[3]
rN = np.array( [ coords[3*atNum] , coords[3*atNum+1] , coords[3*atNum+2] ])
# compute improper torsion angle between C-CA-CB and CA-CB-N
rad, deg = m.torsion(rC,rCA,rCB,rN)
# check cis
if deg < 180 :
# this condition was found by inspection of structures todo
isL = False
print 'chiral state of CA atom ', i[0], ' is D'
print 'CA improper torsion (deg) ', i, ' = ', deg
return isL
def test_potential(self, pdbfname ):
""" tests amber potential for pdbfname
Input
-----
pdbfname = full path to pdb file
"""
# read a conformation from pdb file
print 'reading conformation from coords.pdb'
from simtk.openmm.app import pdbfile as openmmpdb
pdb = openmmpdb.PDBFile(pdbfname)
coords = pdb.getPositions() / openmm_angstrom
coords = np.reshape(np.transpose(coords), 3*len(coords), 1)
e = self.potential.getEnergy(coords)
print 'Energy (kJ/mol) = '
print e
e, g = self.potential.getEnergyGradient(coords)
gnum = self.potential.NumericalDerivative(coords, eps=1e-6)
print 'Energy (kJ/mol) = '
print e
print 'Analytic Gradient = '
print g[1:3]
print 'Numerical Gradient = '
print gnum[1:3]
print 'Num vs Analytic Gradient ='
print np.max(np.abs(gnum-g)), np.max(np.abs(gnum))
print np.max(np.abs(gnum-g)) / np.max(np.abs(gnum))
def test_connect(self, database):
#connect the all minima to the lowest minimum
minima = database.minima()
min1 = minima[0]
for min2 in minima[1:]:
connect = self.get_double_ended_connect(min1, min2, database)
connect.connect
def test_disconn_graph(self,database):
from pygmin.utils.disconnectivity_graph import DisconnectivityGraph
from pygmin.landscape import TSGraph
import matplotlib.pyplot as plt
graph = TSGraph(database).graph
dg = DisconnectivityGraph(graph, nlevels=3, center_gmin=True)
dg.calculate()
dg.plot()
plt.show()
def test_BH(self,db,nsteps):
from pygmin.takestep import RandomDisplacement, AdaptiveStepsizeTemperature
takeStepRnd = RandomDisplacement( stepsize=2 )
tsAdaptive = AdaptiveStepsizeTemperature(takeStepRnd, interval=10, verbose=True)
self.params.basinhopping["temperature"] = 10.0
# todo - how do you save N lowest?
bh = self.get_basinhopping(database=db, takestep = takeStepRnd)
bh = self.get_basinhopping(database=db, takestep = tsAdaptive)
print 'Running BH .. '
bh.run(nsteps)
print "Number of minima found = ", len(db.minima())
min0 = db.minima()[0]
print "lowest minimum found has energy = ", min0.energy
def test_mindist(self, db):
m1, m2 = db.minima()[:2]
mindist = sys.get_mindist()
dist, c1, c2 = mindist(m1.coords, m2.coords)
print "distance", dist
class AmberSpawnOPTIM(SpawnOPTIM):
def __init__(self, coords1, coords2, sys, **kwargs):
super(AmberSpawnOPTIM, self).__init__(coords1, coords2, **kwargs)
self.sys = sys
def write_odata_coords(self, coords, fout):
pass
def write_perm_allow(self, fname):
permallow = self.make_permallow_from_permlist(self.sys.get_permlist())
with open(fname, "w") as fout:
fout.write(permallow)
def write_additional_input_files(self, rundir, coords1, coords2):
#write start
with open(rundir + "/start", "w") as fout:
for xyz in coords1.reshape(-1,3):
fout.write( "%f %f %f\n" % tuple(xyz))
#write coords.prmtop and coords.inpcrd
shutil.copyfile(self.sys.prmtopFname, rundir + "/coords.prmtop")
shutil.copyfile(self.sys.inpcrdFname, rundir + "/coords.inpcrd")
min_in = """
STOP
&cntrl
imin = 1,
ncyc = 1,
maxcyc = 1,
igb = 0,
ntb = 0,
cut = 999.99,
rgbmax = 25.0,
ifswitch = 1
/
"""
with open(rundir + "/min.in", "w") as fout:
fout.write(min_in)
def write_odata(self, fout):
odatastr = """
DUMPALLPATHS
UPDATES 6000
NEWCONNECT 15 3 2.0 20.0 30 0.5
CHECKCHIRALITY
comment PATH dumps intermediate conformations along the path
PATH 100 1.0D-2
COMMENT NEWNEB 30 500 0.01
NEBK 10.0
comment DUMPNEBXYZ
AMBERIC
comment AMBERSTEP
DIJKSTRA EXP
DUMPALLPATHS
REOPTIMISEENDPOINTS
COMMENT MAXTSENERGY -4770.0
EDIFFTOL 1.0D-4
MAXERISE 1.0D-4 1.0D0
GEOMDIFFTOL 0.05D0
BFGSTS 500 10 100 0.01 100
NOIT
BFGSMIN 1.0D-6
PERMDIST
MAXSTEP 0.1
TRAD 0.2
MAXMAX 0.3
BFGSCONV 1.0D-6
PUSHOFF 0.1
STEPS 800
BFGSSTEPS 2000
MAXBFGS 0.1
NAB start
"""
fout.write(odatastr)
fout.write("\n")
# ============================ MAIN ================================
if __name__ == "__main__":
# create new amber system
sysAmb = AMBERSystem('/home/ss2029/WORK/PyGMIN/examples/amber/coords.prmtop', '/home/ss2029/WORK/PyGMIN/examples/amber/coords.inpcrd')
# load existing database
from pygmin.storage import Database
dbcurr = Database(db="/home/ss2029/WORK/PyGMIN/examples/amber/aladipep.db")
coords = sysAmb.get_random_configuration()
aa = sysAmb.get_metric_tensor(coords)
exit()
# ------- TEST gui
from pygmin.gui import run as gr
gr.run_gui(sysAmb)
# ------ Test potential
sysAmb.test_potential('coords.pdb')
# ------ BH
sysAmb.test_BH(dbcurr)
# ------- Connect runs
sysAmb.test_connect(dbcurr)
# ------- Disconn graph
sysAmb.test_disconn_graph(dbcurr)
# ------- Test mindist
sysAmb.test_mindist( dbcurr)
| js850/PyGMIN | pygmin/amber/amberSystem.py | Python | gpl-3.0 | 26,533 | [
"Amber",
"OpenMM",
"PyMOL"
] | 17c52d9372132c0eae83ac74a52234ece7cc3d111c91087144e369e5f4111fdd |
# Kristen Laguana
# CS294 Project
# Galaxy Sky Spaxel Logger
import argparse
def create_new_file():
print("Creating new file...\n")
pass
def begin_working_file():
print("Retrieving file...\n")
pass
def classify():
# goes through 15x15 grid
for column in range(1, 16):
for row in range(1,16):
# prints working spaxel, prompts user for classification
print("Currently at {},{}:\n".format(row,column))
ans = raw_input("Is this a sky spaxel? (Y/N/?): ")
# if user cannot classify spaxel
if ans == "?":
print("Diagnostics coming soon, sorry!\n")
# if the spaxel is NOT a sky spaxel
elif ans == "N":
print("{},{} will be considered a galaxy spaxel.\n\
It will not be saved to the file.\n".format(row,column))
# if the spaxel is a sky spaxel
elif ans == "Y":
# prompts user for confirmation
out_file.write("{},{}".format(row,column))
conf = raw_input("{},{} will be saved to file. \
Are you sure? (Y/N): ".format(row, column))
# temp, replace w/whatever makes this return to line 17
if conf == "N":
classify()
# print("WELP. TOO LATE 4 U\n")
# saves spaxel to file
elif conf == "Y":
out_file.write("{},{}\n".format(row,column))
print("{},{} will be saved to file as a sky spaxel.\n"\
.format(row, column))
else:
# same problem as line 17
classify()
def main():
global out_file
parser = argparse.ArgumentParser()
parser.add_argument("--new", "-n", help="start on a new file",\
action="store_true")
parser.add_argument("--file", "-f", help="choose filename")
parser.add_argument("--working", "-w", help="work on existing file",\
action="store_true")
args = parser.parse_args()
# Welcome text
print('\n// Galaxy Sky Spaxel Logger v1.0 by Kristen Laguana //\n\n')
# if starting on a new file:
if args.new:
create_new_file()
out_file = open(args.file, 'w')
print("File {} created. Let's begin.\n".format(args.file))
# if continuing from a previous working file:
elif args.working:
begin_working_file()
out_file = open(args.file, 'a')
print("Working file {} retrieved. Let's begin.\n".format(args.file))
classify()
return
if __name__ == "__main__":
main()
| klaguana/project | logger.py | Python | mit | 2,729 | [
"Galaxy"
] | 2a1db58e87686955cd81e9de811c1d13ad7d6fbec49b6cb6038a603a1cdc7255 |
########################################################################
# $HeadURL$
# File: RequestValidator.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/09/18 07:55:16
########################################################################
""" :mod: RequestValidator
======================
.. module: RequestValidator
:synopsis: request validator
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
A general and simple request validator checking for required attributes and logic.
It checks if required attributes are set/unset but not for their values.
There is a global singleton validator for general use defined in this module: gRequestValidator.
If you need to extend this one with your own specific checks consider:
* for adding Operation or Files required attributes use :addReqAttrsCheck: function::
gRequestValidator.addReqAttrsCheck( "FooOperation", operationAttrs = [ "Bar", "Buzz"], filesAttrs = [ "LFN" ] )
* for adding generic check define a new callable object ( function or functor ) which takes only one argument,
say for functor::
class MyValidator( RequestValidator ):
@staticmethod
def hasFoo( request ):
if not request.Foo:
return S_ERROR("Foo not set")
return S_OK()
or function::
def hasBar( request ):
if not request.Bar:
return S_ERROR("Bar not set")
return S_OK()
and add this one to the validators set by calling gRequestValidator.addValidator, i.e.::
gRequestValidator.addValidator( MyValidator.hasFoo )
gRequestValidator.addValidator( hasFoo )
Notice that all validators should always return S_ERROR/S_OK, no exceptions from that whatsoever!
"""
__RCSID__ = "$Id$"
# #
# @file RequestValidator.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/09/18 07:55:37
# @brief Definition of RequestValidator class.
# # import
import inspect
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
########################################################################
class RequestValidator( object ):
"""
.. class:: RequestValidator
This class validates newly created requests (before saving them in RequestDB) for
required attributes.
"""
# # one to rule them all
__metaclass__ = DIRACSingleton
# # dict with required attrs
reqAttrs = { "ForwardDISET" : { "Operation": [ "Arguments" ], "Files" : [] },
"PutAndRegister" : { "Operation" : [ "TargetSE" ], "Files" : [ "LFN", "PFN" ] },
"ReplicateAndRegister" : { "Operation" : [ "TargetSE" ], "Files" : [ "LFN" ] },
"PhysicalRemoval" : { "Operation" : ["TargetSE" ], "Files" : [ "PFN" ] },
"RemoveFile" : { "Operation" : [], "Files" : [ "LFN" ] },
"RemoveReplica" : { "Operation" : [ "TargetSE" ], "Files" : [ "LFN" ] },
"ReTransfer" : { "Operation" : [ "TargetSE" ], "Files" : [ "LFN", "PFN" ] },
"RegisterFile" : { "Operation" : [ ], "Files" : [ "LFN", "PFN", "ChecksumType", "Checksum", "GUID" ] },
"RegisterReplica" : { "Operation" : [ "TargetSE" ], "Files" : [ "LFN", "PFN" ] } }
def __init__( self ):
""" c'tor
just setting validation order
"""
self.validator = ( self._hasRequestName,
self._hasOwner,
self._hasOperations,
self._hasType,
self._hasFiles,
self._hasRequiredAttrs,
self._hasChecksumAndChecksumType )
@classmethod
def addReqAttrsCheck( cls, operationType, operationAttrs = None, filesAttrs = None ):
""" add required attributes of Operation of type :operationType:
:param str operationType: Operation.Type
:param list operationAttrs: required Operation attributes
:param list filesAttrs: required Files attributes
"""
toUpdate = { "Operation" : operationAttrs if operationAttrs else [],
"Files" : filesAttrs if filesAttrs else [] }
if operationType not in cls.reqAttrs:
cls.reqAttrs[operationType] = { "Operation" : [], "Files" : [] }
for key, attrList in cls.reqAttrs[operationType].items():
cls.reqAttrs[operationType][key] = list( set( attrList + toUpdate[key] ) )
@classmethod
def addValidator( cls, fcnObj ):
""" add :fcnObj: validator """
if not callable( fcnObj ):
return S_ERROR( "supplied argument is not callable" )
args = inspect.getargspec( fcnObj ).args
if len( args ) not in ( 1, 2 ):
return S_ERROR( "wrong number of arguments for supplied function object" )
cls.validator = cls.validator + tuple( fcnObj, )
return S_OK()
def validate( self, request ):
""" validation of a given :request:
:param Request request: Request instance
"""
for validator in self.validator:
isValid = validator( request )
if not isValid["OK"]:
return isValid
# # if we're here request is more or less valid
return S_OK()
@staticmethod
def _hasDIRACSetup( request ):
""" required attribute - DIRACSetup """
if not request.DIRACSetup:
return S_ERROR( "DIRACSetup not set" )
return S_OK()
@staticmethod
def _hasOwner( request ):
""" required attributes OwnerDn and OwnerGroup """
if not request.OwnerDN:
return S_ERROR( "Request '%s' is missing OwnerDN value" % request.RequestName )
if not request.OwnerGroup:
return S_ERROR( "Request '%s' is missing OwnerGroup value" % request.RequestName )
return S_OK()
@staticmethod
def _hasRequestName( request ):
""" required attribute: RequestName """
if not request.RequestName:
return S_ERROR( "RequestName not set" )
return S_OK()
@staticmethod
def _hasOperations( request ):
""" at least one operation is in """
if not len( request ):
return S_ERROR( "Operations not present in request '%s'" % request.RequestName )
return S_OK()
@staticmethod
def _hasType( request ):
""" operation type is set """
for operation in request:
if not operation.Type:
return S_ERROR( "Operation #%d in request '%s' hasn't got Type set" % ( request.indexOf( operation ),
request.RequestName ) )
return S_OK()
@classmethod
def _hasFiles( cls, request ):
""" check for files presence """
for operation in request:
if operation.Type not in cls.reqAttrs:
return S_OK()
if cls.reqAttrs[operation.Type]["Files"] and not len( operation ):
return S_ERROR( "Operation #%d of type '%s' hasn't got files to process." % ( request.indexOf( operation ),
operation.Type ) )
if not cls.reqAttrs[operation.Type]["Files"] and len( operation ):
return S_ERROR( "Operation #%d of type '%s' has got files to process." % ( request.indexOf( operation ),
operation.Type ) )
return S_OK()
@classmethod
def _hasRequiredAttrs( cls, request ):
""" check required attributes for operations and files """
for operation in request:
if operation.Type in cls.reqAttrs:
opAttrs = cls.reqAttrs[operation.Type]["Operation"]
for opAttr in opAttrs:
if not getattr( operation, opAttr ):
return S_ERROR( "Operation #%d of type '%s' is missing %s attribute." % \
( request.indexOf( operation ), operation.Type, opAttr ) )
fileAttrs = cls.reqAttrs[operation.Type]["Files"]
for opFile in operation:
for fileAttr in fileAttrs:
if not getattr( opFile, fileAttr ):
return S_ERROR( "Operation #%d of type '%s' is missing %s attribute for file." % \
( request.indexOf( operation ), operation.Type, fileAttr ) )
return S_OK()
@classmethod
def _hasChecksumAndChecksumType( cls, request ):
""" Checksum and ChecksumType should be specified """
for operation in request:
for opFile in operation:
if any( [ opFile.Checksum, opFile.ChecksumType ] ) and not all( [opFile.Checksum, opFile.ChecksumType ] ):
return S_ERROR( "File in operation #%d is missing Checksum (%s) or ChecksumType (%s)" % \
( request.indexOf( operation ), opFile.Checksum, opFile.ChecksumType ) )
return S_OK()
# # global instance
gRequestValidator = RequestValidator()
| avedaee/DIRAC | RequestManagementSystem/private/RequestValidator.py | Python | gpl-3.0 | 8,649 | [
"DIRAC"
] | e31f72b553d3f7ac0f3d52273f96aceaa589c5dd12a659c25f353c616ff246ea |
# -*- coding: utf-8 -*-
# Messier Catalog
# number.
# FIXME: Need to convert this into a dictionary such that we can more efficiently obtain
# the astronomical data (including common name) when looking up the catalog
# number.
# Key to Type representation:
# SNR = supernova remnant
# GCl = globular cluster
# OCl = open cluster
# C/N = cluster and nebula
# PlN = planetary nebula
# Gal = Galaxy
# Dbl = double star
# DfN = Diffuse Nebula
# ??? = unknown or unclassified object
#
# Note: Objects smaller than 10' or dimmer than magnitude 9.0 have been removed from this
# catalog, as they are not going to be visible even in good binoculars.
#
from gettext import gettext as _
# Messier No. Constellation Right Ascension Declination Magnitude Maj Axis Min Axis PosAngle Type Common Name
# nM strCon dRA dDec dMag dMajA dMinA strTyp strName
data = [ \
('M002', 'Aqr', 21.558333, -0.816667, 7.50, 12.900000, 12.900000, 0.0, 'GCl', ''), \
('M003', 'CVn', 13.703333, 28.383333, 7.00, 16.200000, 16.200000, 0.0, 'GCl', ''), \
('M004', 'Sco', 16.393333, -26.533333, 7.50, 26.300000, 26.300000, 0.0, 'GCl', ''), \
('M005', 'Ser', 15.310000, 2.083333, 7.00, 17.400000, 17.400000, 0.0, 'GCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Butterfly_Cluster
('M006', 'Sco', 17.668333, -32.216667, 4.50, 15.000000, 15.000000, 0.0, 'OCl', _('Butterfly Cluster')), \
# TRANS: http://en.wikipedia.org/wiki/Ptolemy's_Cluster
('M007', 'Sco', 17.898333, -34.816667, 3.50, 80.000000, 80.000000, 0.0, 'OCl', _("Ptolemy's Cluster")), \
# TRANS: http://en.wikipedia.org/wiki/Lagoon_Nebula
('M008', 'Sgr', 18.063333, -24.383333, 5.00, 60.000000, 35.000000, 0.0, 'C/N', _('Lagoon Nebula')), \
('M010', 'Oph', 16.951667, -4.100000, 7.50, 15.100000, 15.100000, 0.0, 'GCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Wild_Duck_Cluster
('M011', 'Sct', 18.851667, -6.266667, 7.00, 14.000000, 14.000000, 0.0, 'OCl', _('Wild Duck Cluster')), \
('M012', 'Oph', 16.786667, -1.950000, 8.00, 14.500000, 14.500000, 0.0, 'GCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Hercules_Cluster
('M013', 'Her', 16.695000, 36.466667, 7.00, 16.600000, 16.600000, 0.0, 'GCl', _('Hercules Cluster')), \
('M015', 'Peg', 21.500000, 12.166667, 7.50, 12.300000, 12.300000, 0.0, 'GCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Omega_Nebula
('M017', 'Sgr', 18.346667, -16.183333, 7.00, 11.000000, 11.000000, 0.0, 'C/N', _('Omega Nebula')), \
('M019', 'Oph', 17.043333, -26.266667, 8.50, 13.500000, 13.500000, 0.0, 'GCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Trifid_Nebula
('M020', 'Sgr', 18.043333, -23.033333, 5.00, 28.000000, 28.000000, 0.0, 'C/N', _('Trifid Nebula')), \
('M021', 'Sgr', 18.076667, -22.500000, 7.00, 13.000000, 13.000000, 0.0, 'OCl', ''), \
('M022', 'Sgr', 18.606667, -23.900000, 6.50, 24.000000, 24.000000, 0.0, 'GCl', ''), \
('M023', 'Sgr', 17.946667, -19.016667, 6.00, 27.000000, 27.000000, 0.0, 'OCl', ''), \
('M025', 'Sgr', 18.526667, -19.250000, 4.90, 40.000000, 40.000000, 0.0, 'OCl', ''), \
('M028', 'Sgr', 18.408333, -24.866667, 8.50, 11.200000, 11.200000, 0.0, 'GCl', ''), \
('M030', 'Cap', 21.673333, -23.183333, 8.50, 11.000000, 11.000000, 0.0, 'GCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Andromeda_Galaxy
('M031', 'And', 0.711667, 41.266667, 4.50, 178.000000, 53.000000, 34.0, 'Gal', _('Andromeda Galaxy')), \
# TRANS: http://en.wikipedia.org/wiki/Triangulum_Galaxy
('M033', 'Tri', 1.565000, 30.650000, 7.00, 73.000000, 45.000000, 22.0, 'Gal', _('Triangulum Galaxy')), \
('M034', 'Per', 2.700000, 42.783333, 6.00, 35.000000, 35.000000, 0.0, 'OCl', ''), \
('M035', 'Gem', 6.148333, 24.333333, 5.50, 28.000000, 28.000000, 0.0, 'OCl', ''), \
('M036', 'Aur', 5.601667, 34.133333, 6.50, 12.000000, 12.000000, 0.0, 'OCl', ''), \
('M037', 'Aur', 5.873333, 32.550000, 6.00, 24.000000, 24.000000, 0.0, 'OCl', ''), \
('M038', 'Aur', 5.478333, 35.833333, 7.00, 21.000000, 21.000000, 0.0, 'OCl', ''), \
('M039', 'Cyg', 21.536667, 48.433333, 5.50, 32.000000, 32.000000, 0.0, 'OCl', ''), \
('M041', 'CMa', 6.783333, -20.733333, 5.00, 38.000000, 38.000000, 0.0, 'OCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Orion_Nebula
('M042', 'Ori', 5.590000, -5.450000, 5.00, 85.000000, 60.000000, 0.0, 'DfN', _('Orion Nebula')), \
# TRANS: http://en.wikipedia.org/wiki/De_Mairan's_Nebula
('M043', 'Ori', 5.593333, -5.266667, 7.00, 20.000000, 15.000000, 0.0, 'DfN', _("de Mairan's Nebula")), \
# TRANS: http://en.wikipedia.org/wiki/Beehive_Cluster
('M044', 'Cnc', 8.668333, 19.983333, 4.00, 95.000000, 95.000000, 0.0, 'OCl', _('Beehive Cluster')), \
# TRANS: http://en.wikipedia.org/wiki/Pleiades
('M045', 'Tau', 3.783333, 24.116667, 1.40, 110.000000, 110.000000, 0.0, 'OCl', _('Pleiades')), \
('M046', 'Pup', 7.696667, -14.816667, 6.50, 27.000000, 27.000000, 0.0, 'OCl', ''), \
('M047', 'Pup', 7.610000, -14.500000, 4.50, 30.000000, 30.000000, 0.0, 'OCl', ''), \
('M048', 'Hya', 8.230000, -5.800000, 5.50, 54.000000, 54.000000, 0.0, 'OCl', ''), \
('M050', 'Mon', 7.053333, -8.333333, 7.00, 16.000000, 16.000000, 0.0, 'OCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Whirlpool_Galaxy
('M051', 'CVn', 13.498333, 47.200000, 8.00, 11.000000, 7.000000, 0.0, 'Gal', _('Whirlpool Galaxy')), \
('M052', 'Cas', 23.403333, 61.583333, 8.00, 13.000000, 13.000000, 0.0, 'OCl', ''), \
('M053', 'Com', 13.215000, 18.166667, 8.50, 12.600000, 12.600000, 0.0, 'GCl', ''), \
('M055', 'Sgr', 19.666667, -30.966667, 7.00, 19.000000, 19.000000, 0.0, 'GCl', ''), \
('M062', 'Oph', 17.020000, -30.116667, 8.00, 14.100000, 14.100000, 0.0, 'GCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Sunflower_Galaxy
('M063', 'CVn', 13.263333, 42.033333, 8.50, 10.000000, 6.000000, 104.0, 'Gal', _('Sunflower Galaxy')), \
('M067', 'Cnc', 8.840000, 11.816667, 7.50, 30.000000, 30.000000, 0.0, 'OCl', ''), \
('M068', 'Hya', 12.658333, -26.750000, 9.00, 12.000000, 12.000000, 0.0, 'GCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Bodes_Galaxy
('M081', 'UMa', 9.926667, 69.066667, 8.50, 21.000000, 10.000000, 156.0, 'Gal', _('Bodes Galaxy')), \
# TRANS: http://en.wikipedia.org/wiki/Southern_Pinwheel_Galaxy
('M083', 'Hya', 13.616667, -29.866667, 8.50, 11.000000, 10.000000, 0.0, 'Gal', _('Southern Pinwheel Galaxy')), \
('M092', 'Her', 17.285000, 43.133333, 7.50, 11.200000, 11.200000, 0.0, 'GCl', ''), \
('M093', 'Pup', 7.743333, -23.866667, 6.50, 22.000000, 22.000000, 0.0, 'OCl', ''), \
# TRANS: http://en.wikipedia.org/wiki/Pinwheel_Galaxy
('M101', 'UMa', 14.055000, 54.350000, 8.50, 22.000000, 22.000000, 0.0, 'Gal', _('Pinwheel Galaxy')), \
]
| walterbender/starchart | dso1.py | Python | gpl-2.0 | 8,531 | [
"Galaxy"
] | 88ed2ea27dddbc33e0fb70a240e2952808173d98c4a97d493fd8da191164c2b3 |
"""
Process an input dataset into a format suitable for machine learning.
"""
import os
import gzip
import pandas as pd
import numpy as np
import csv
import numbers
import tempfile
import time
import sys
import logging
import warnings
from typing import List, Optional, Dict, Tuple, Any, Sequence, Union
from deepchem.utils.typing import OneOrMany
from deepchem.utils.save import load_csv_files, load_json_files
from deepchem.utils.save import load_sdf_files
from deepchem.utils.genomics_utils import encode_bio_sequence
from deepchem.feat import UserDefinedFeaturizer, Featurizer
from deepchem.data import Dataset, DiskDataset, NumpyDataset, ImageDataset
import zipfile
logger = logging.getLogger(__name__)
def _convert_df_to_numpy(df, tasks):
"""Transforms a dataframe containing deepchem input into numpy arrays
This is a private helper method intended to help parse labels and
weights arrays from a pandas dataframe. Here `df` is a dataframe
which has columns for each task in `tasks`. These labels are
extracted into a labels array `y`. Weights `w` are initialized to
all ones, but weights for any missing labels are set to 0.
Parameters
----------
df: pd.DataFrame
Pandas dataframe with columns for all tasks
tasks: list
List of tasks
"""
n_samples = df.shape[0]
n_tasks = len(tasks)
y = np.hstack(
[np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks])
w = np.ones((n_samples, n_tasks))
if y.dtype.kind in ['O', 'U']:
missing = (y == '')
y[missing] = 0
w[missing] = 0
return y.astype(float), w.astype(float)
def _featurize_smiles_df(df, featurizer, field, log_every_n=1000):
"""Featurize individual compounds in dataframe.
Private helper that given a featurizer that operates on individual
chemical compounds or macromolecules, compute & add features for
that compound to the features dataframe
Parameters
----------
df: pd.DataFrame
DataFrame that holds SMILES strings
featurizer: Featurizer
A featurizer object
field: str
The name of a column in `df` that holds SMILES strings
log_every_n: int, optional (default 1000)
Emit a logging statement every `log_every_n` rows.
Note
----
This function requires RDKit to be installed
"""
sample_elems = df[field].tolist()
features = []
from rdkit import Chem
from rdkit.Chem import rdmolfiles
from rdkit.Chem import rdmolops
for ind, elem in enumerate(sample_elems):
mol = Chem.MolFromSmiles(elem)
# TODO (ytz) this is a bandage solution to reorder the atoms
# so that they're always in the same canonical order.
# Presumably this should be correctly implemented in the
# future for graph mols.
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
if ind % log_every_n == 0:
logger.info("Featurizing sample %d" % ind)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features), axis=1), valid_inds
def _get_user_specified_features(df, featurizer):
"""Extract and merge user specified features.
Private helper methods that merges features included in dataset
provided by user into final features dataframe
Three types of featurization here:
1) Molecule featurization
-) Smiles string featurization
-) Rdkit MOL featurization
2) Complex featurization
-) PDB files for interacting molecules.
3) User specified featurizations.
Parameters
----------
df: pd.DataFrame
DataFrame that holds SMILES strings
featurizer: Featurizer
A featurizer object
"""
time1 = time.time()
df[featurizer.feature_fields] = df[featurizer.feature_fields].apply(
pd.to_numeric)
X_shard = df[featurizer.feature_fields].to_numpy()
time2 = time.time()
logger.info(
"TIMING: user specified processing took %0.3f s" % (time2 - time1))
return X_shard
def _featurize_mol_df(df, featurizer, field, log_every_n=1000):
"""Featurize individual compounds in dataframe.
Used when processing .sdf files, so the 3-D structure should be
preserved. We use the rdkit "mol" object created from .sdf
instead of smiles string. Some featurizers such as
CoulombMatrix also require a 3-D structure. Featurizing from
.sdf is currently the only way to perform CM feautization.
Parameters
----------
df: Pandas Dataframe
Should be created by dc.utils.save.load_sdf_files.
featurizer: dc.feat.MolecularFeaturizer
Featurizer for molecules.
log_every_n: int, optional
Controls how often logging statements are emitted.
"""
sample_elems = df[field].tolist()
features = []
for ind, mol in enumerate(sample_elems):
if ind % log_every_n == 0:
logger.info("Featurizing sample %d" % ind)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features)), valid_inds
class DataLoader(object):
"""Handles loading/featurizing of data from disk.
The main use of `DataLoader` and its child classes is to make it
easier to load large datasets into `Dataset` objects.`
`DataLoader` is an abstract superclass that provides a
general framework for loading data into DeepChem. This class should
never be instantiated directly. To load your own type of data, make
a subclass of `DataLoader` and provide your own implementation for
the `create_dataset()` method.
To construct a `Dataset` from input data, first instantiate a
concrete data loader (that is, an object which is an instance of a
subclass of `DataLoader`) with a given `Featurizer` object. Then
call the data loader's `create_dataset()` method on a list of input
files that hold the source data to process. Note that each subclass
of `DataLoader` is specialized to handle one type of input data so
you will have to pick the loader class suitable for your input data
type.
Note that it isn't necessary to use a data loader to process input
data. You can directly use `Featurizer` objects to featurize
provided input into numpy arrays, but note that this calculation
will be performed in memory, so you will have to write generators
that walk the source files and write featurized data to disk
yourself. `DataLoader` and its subclasses make this process easier
for you by performing this work under the hood.
"""
def __init__(self, tasks, id_field=None, featurizer=None, log_every_n=1000):
"""Construct a DataLoader object.
This constructor is provided as a template mainly. You
shouldn't ever call this constructor directly as a user.
Parameters
----------
tasks: list[str]
List of task names
id_field: str, optional
Name of field that holds sample identifier. Note that the
meaning of "field" depends on the input data type and can have a
different meaning in different subclasses. For example, a CSV
file could have a field as a column, and an SDF file could have
a field as molecular property.
featurizer: dc.feat.Featurizer, optional
Featurizer to use to process data
log_every_n: int, optional
Writes a logging statement this often.
"""
if self.__class__ is DataLoader:
raise ValueError(
"DataLoader should never be instantiated directly. Use a subclass instead."
)
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.tasks = tasks
self.id_field = id_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def featurize(self,
inputs: Sequence[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> Dataset:
"""Featurize provided files and write to specified location.
DEPRECATED: This method is now a wrapper for `create_dataset()`
and calls that method under the hood.
For large datasets, automatically shards into smaller chunks
for convenience. This implementation assumes that the helper
methods `_get_shards` and `_featurize_shard` are implemented and
that each shard returned by `_get_shards` is a pandas dataframe.
You may choose to reuse or override this method in your subclass
implementations.
Parameters
----------
inputs: list
List of inputs to process. Entries can be filenames or arbitrary objects.
data_dir: str, optional
Directory to store featurized dataset.
shard_size: int, optional
Number of examples stored in each shard.
Returns
-------
A `Dataset` object containing a featurized representation of data
from `input`.
"""
warnings.warn(
"featurize() is deprecated and has been renamed to create_dataset(). featurize() will be removed in DeepChem 3.0",
FutureWarning)
return self.create_dataset(inputs, data_dir, shard_size)
def create_dataset(self,
inputs: Sequence[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> Dataset:
"""Creates and returns a `Dataset` object by featurizing provided files.
Reads in `inputs` and uses `self.featurizer` to featurize the
data in these inputs. For large files, automatically shards
into smaller chunks of `shard_size` datapoints for convenience.
Returns a `Dataset` object that contains the featurized dataset.
This implementation assumes that the helper methods `_get_shards`
and `_featurize_shard` are implemented and that each shard
returned by `_get_shards` is a pandas dataframe. You may choose
to reuse or override this method in your subclass implementations.
Parameters
----------
inputs: list
List of inputs to process. Entries can be filenames or arbitrary objects.
data_dir: str, optional
Directory to store featurized dataset.
shard_size: int, optional
Number of examples stored in each shard.
Returns
-------
A `Dataset` object containing a featurized representation of data
from `inputs`.
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
if not isinstance(inputs, list):
inputs = [inputs]
def shard_generator():
for shard_num, shard in enumerate(self._get_shards(inputs, shard_size)):
time1 = time.time()
X, valid_inds = self._featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = _convert_df_to_numpy(shard, self.tasks)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it
# makes no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir, self.tasks)
def _get_shards(self, inputs, shard_size):
"""Stub for children classes.
Should implement a generator that walks over the source data in
`inputs` and returns a "shard" at a time. Here a shard is a
chunk of input data that can reasonably be handled in memory. For
example, this may be a set of rows from a CSV file or a set of
molecules from a SDF file. To re-use the
`DataLoader.create_dataset()` method, each shard must be a pandas
dataframe.
If you chose to override `create_dataset()` directly you don't
need to override this helper method.
Parameters
----------
inputs: list
List of inputs to process. Entries can be filenames or arbitrary objects.
shard_size: int, optional
Number of examples stored in each shard.
"""
raise NotImplementedError
def _featurize_shard(self, shard):
"""Featurizes a shard of input data.
Recall a shard is a chunk of input data that can reasonably be
handled in memory. For example, this may be a set of rows from a
CSV file or a set of molecules from a SDF file. Featurize this
shard in memory and return the results.
"""
raise NotImplementedError
class CSVLoader(DataLoader):
"""
Creates `Dataset` objects from input CSF files.
This class provides conveniences to load data from CSV files.
It's possible to directly featurize data from CSV files using
pandas, but this class may prove useful if you're processing
large CSV files that you don't want to manipulate directly in
memory.
"""
def __init__(self,
tasks,
smiles_field=None,
id_field=None,
featurizer=None,
log_every_n=1000):
"""Initializes CSVLoader.
Parameters
----------
tasks: list[str]
List of task names
smiles_field: str, optional
Name of field that holds smiles string
id_field: str, optional
Name of field that holds sample identifier
featurizer: dc.feat.Featurizer, optional
Featurizer to use to process data
log_every_n: int, optional
Writes a logging statement this often.
"""
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.tasks = tasks
self.smiles_field = smiles_field
if id_field is None:
self.id_field = smiles_field
else:
self.id_field = id_field
#self.mol_field = mol_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def _get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: list[str]
List of filenames to process
shard_size: int
The size of a shard of data to process at a time.
"""
return load_csv_files(input_files, shard_size)
def _featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
return _featurize_smiles_df(
shard,
self.featurizer,
field=self.smiles_field,
log_every_n=self.log_every_n)
class UserCSVLoader(CSVLoader):
"""
Handles loading of CSV files with user-defined featurizers.
"""
def _get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_csv_files(input_files, shard_size)
def _featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
assert isinstance(self.featurizer, UserDefinedFeaturizer)
X = _get_user_specified_features(shard, self.featurizer)
return (X, np.ones(len(X), dtype=bool))
class JsonLoader(DataLoader):
"""
Creates `Dataset` objects from input json files.
This class provides conveniences to load data from json files.
It's possible to directly featurize data from json files using
pandas, but this class may prove useful if you're processing
large json files that you don't want to manipulate directly in
memory.
It is meant to load JSON files formatted as "records" in line
delimited format, which allows for sharding.
``list like [{column -> value}, ... , {column -> value}]``.
Examples
--------
>> import pandas as pd
>> df = pd.DataFrame(some_data)
>> df.columns.tolist()
.. ['sample_data', 'sample_name', 'weight', 'task']
>> df.to_json('file.json', orient='records', lines=True)
>> loader = JsonLoader(tasks=['task'], feature_field='sample_data',
label_field='task', weight_field='weight', id_field='sample_name')
>> dataset = loader.create_dataset('file.json')
"""
def __init__(self,
tasks: OneOrMany[str],
feature_field: str,
label_field: str = None,
weight_field: str = None,
id_field: str = None,
featurizer: Optional[Featurizer] = None,
log_every_n: int = 1000):
"""Initializes JsonLoader.
Parameters
----------
tasks : List[str]
List of task names
feature_field : str
JSON field with data to be featurized.
label_field : str, default None
Field with target variables.
weight_field : str, default None
Field with weights.
id_field : str, default None
Field for identifying samples.
featurizer : dc.feat.Featurizer, optional
Featurizer to use to process data
log_every_n : int, optional
Writes a logging statement this often.
"""
if not isinstance(tasks, list):
raise ValueError("Tasks must be a list.")
self.tasks = tasks
self.feature_field = feature_field
self.label_field = label_field
self.weight_field = weight_field
self.id_field = id_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def create_dataset(self,
input_files: OneOrMany[str],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> DiskDataset:
"""Creates a `Dataset` from input JSON files.
Parameters
----------
input_files: OneOrMany[str]
List of JSON filenames.
data_dir: Optional[str], default None
Name of directory where featurized data is stored.
shard_size: Optional[int], default 8192
Shard size when loading data.
Returns
-------
dataset: dc.data.Dataset
A `Dataset` object containing a featurized representation of data
from `input_files`.
"""
if not isinstance(input_files, list):
try:
if isinstance(input_files, str):
input_files = [input_files]
else:
input_files = list(input_files)
except TypeError:
raise ValueError(
"input_files is of an unrecognized form. Must be one filename or a list of filenames."
)
def shard_generator():
"""Yield X, y, w, and ids for shards."""
for shard_num, shard in enumerate(
self._get_shards(input_files, shard_size)):
time1 = time.time()
X, valid_inds = self._featurize_shard(shard)
if self.id_field:
ids = shard[self.id_field].values
else:
ids = np.ones(len(X))
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results if they exist.
y, w = _convert_df_to_numpy(shard, self.tasks)
if self.label_field:
y = shard[self.label_field]
if self.weight_field:
w = shard[self.weight_field]
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it
# makes no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir)
def _get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_json_files(input_files, shard_size)
def _featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
return self._featurize_df(
shard, self.featurizer, log_every_n=self.log_every_n)
def _featurize_df(self,
shard,
featurizer: Featurizer,
log_every_n: int = 1000) -> Tuple[np.ndarray, np.ndarray]:
"""Featurize individual samples in dataframe.
Helper that given a featurizer that operates on individual
samples, computes & adds features for that sample to the
features dataframe.
Parameters
----------
shard: pd.DataFrame
DataFrame that holds data to be featurized.
featurizer: Featurizer
An instance of `dc.feat.Featurizer`.
log_every_n: int, optional (default 1000)
Emit a logging statement every `log_every_n` rows.
Returns
-------
features : np.ndarray
Array of feature vectors.
valid_inds : np.ndarray
Boolean values indicating successfull featurization.
"""
features = []
valid_inds = []
field = self.feature_field
data = shard[field].tolist()
for idx, datapoint in enumerate(data):
feat = featurizer.featurize([datapoint])
is_valid = True if feat.size > 0 else False
valid_inds.append(is_valid)
if is_valid:
features.append(feat)
return np.squeeze(np.array(features), axis=1), valid_inds
class SDFLoader(DataLoader):
"""
Creates `Dataset` from SDF input files.
This class provides conveniences to load data from SDF files.
"""
def __init__(self, tasks, sanitize=False, featurizer=None, log_every_n=1000):
"""Initialize SDF Loader
Parameters
----------
tasks: list[str]
List of tasknames. These will be loaded from the SDF file.
sanitize: bool, optional
Whether to sanitize molecules.
featurizer: dc.feat.Featurizer, optional
Featurizer to use to process data
log_every_n: int, optional
Writes a logging statement this often.
"""
self.featurizer = featurizer
self.sanitize = sanitize
self.tasks = tasks
# The field in which dc.utils.save.load_sdf_files stores
# RDKit mol objects
self.mol_field = "mol"
# The field in which load_sdf_files return value stores
# smiles
self.id_field = "smiles"
self.log_every_n = log_every_n
def _get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_sdf_files(input_files, self.sanitize, tasks=self.tasks)
def _featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
logger.info("Currently featurizing feature_type: %s" %
self.featurizer.__class__.__name__)
return _featurize_mol_df(
shard,
self.featurizer,
field=self.mol_field,
log_every_n=self.log_every_n)
class FASTALoader(DataLoader):
"""Handles loading of FASTA files.
FASTA files are commonly used to hold sequence data. This
class provides convenience files to lead FASTA data and
one-hot encode the genomic sequences for use in downstream
learning tasks.
"""
def __init__(self):
"""Initialize loader."""
pass
def create_dataset(self,
input_files: OneOrMany[str],
data_dir: Optional[str] = None,
shard_size: Optional[int] = None) -> DiskDataset:
"""Creates a `Dataset` from input FASTA files.
At present, FASTA support is limited and only allows for one-hot
featurization, and doesn't allow for sharding.
Parameters
----------
input_files: list
List of fasta files.
data_dir: str, optional
Name of directory where featurized data is stored.
shard_size: int, optional
For now, this argument is ignored and each FASTA file gets its
own shard.
Returns
-------
A `Dataset` object containing a featurized representation of data
from `input_files`.
"""
if isinstance(input_files, str):
input_files = [input_files]
def shard_generator():
for input_file in input_files:
X = encode_bio_sequence(input_file)
ids = np.ones(len(X))
# (X, y, w, ids)
yield X, None, None, ids
return DiskDataset.create_dataset(shard_generator(), data_dir)
class ImageLoader(DataLoader):
"""Handles loading of image files.
This class allows for loading of images in various formats.
For user convenience, also accepts zip-files and directories
of images and uses some limited intelligence to attempt to
traverse subdirectories which contain images.
"""
def __init__(self, tasks: OneOrMany[str] = None):
"""Initialize image loader.
At present, custom image featurizers aren't supported by this
loader class.
Parameters
----------
tasks: list[str]
List of task names for image labels.
"""
if tasks is None:
tasks = []
self.tasks = tasks
def create_dataset(self,
inputs: Union[OneOrMany[str], Tuple[Any]],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192,
in_memory: bool = False) -> Dataset:
"""Creates and returns a `Dataset` object by featurizing provided image files and labels/weights.
Parameters
----------
inputs: `Union[OneOrMany[str], Tuple[Any]]`
The inputs provided should be one of the following
- filename
- list of filenames
- Tuple (list of filenames, labels)
- Tuple (list of filenames, labels, weights)
Each file in a given list of filenames should either be of a supported
image format (.png, .tif only for now) or of a compressed folder of
image files (only .zip for now). If `labels` or `weights` are provided,
they must correspond to the sorted order of all filenames provided, with
one label/weight per file.
data_dir: str, optional
Directory to store featurized dataset.
in_memory: bool
If true, return in-memory NumpyDataset. Else return ImageDataset.
Returns
-------
A `Dataset` object containing a featurized representation of data
from `input_files`, `labels`, and `weights`.
"""
labels, weights = None, None
if isinstance(inputs, tuple):
if len(inputs) == 1:
input_files = inputs[0]
if isinstance(inputs, str):
input_files = [inputs]
elif len(inputs) == 2:
input_files, labels = inputs
elif len(inputs) == 3:
input_files, labels, weights = inputs
else:
raise ValueError("Input must be a tuple of length 1, 2, or 3")
else:
input_files = inputs
if isinstance(input_files, str):
input_files = [input_files]
image_files = []
# Sometimes zip files contain directories within. Traverse directories
while len(input_files) > 0:
remainder = []
for input_file in input_files:
filename, extension = os.path.splitext(input_file)
extension = extension.lower()
# TODO(rbharath): Add support for more extensions
if os.path.isdir(input_file):
dirfiles = [
os.path.join(input_file, subfile)
for subfile in os.listdir(input_file)
]
remainder += dirfiles
elif extension == ".zip":
zip_dir = tempfile.mkdtemp()
zip_ref = zipfile.ZipFile(input_file, 'r')
zip_ref.extractall(path=zip_dir)
zip_ref.close()
zip_files = [
os.path.join(zip_dir, name) for name in zip_ref.namelist()
]
for zip_file in zip_files:
_, extension = os.path.splitext(zip_file)
extension = extension.lower()
if extension in [".png", ".tif"]:
image_files.append(zip_file)
elif extension in [".png", ".tif"]:
image_files.append(input_file)
else:
raise ValueError("Unsupported file format")
input_files = remainder
# Sort image files
image_files = sorted(image_files)
if in_memory:
if data_dir is None:
return NumpyDataset(
self.load_img(image_files), y=labels, w=weights, ids=image_files)
else:
dataset = DiskDataset.from_numpy(
self.load_img(image_files),
y=labels,
w=weights,
ids=image_files,
tasks=self.tasks,
data_dir=data_dir)
if shard_size is not None:
dataset.reshard(shard_size)
return dataset
else:
return ImageDataset(image_files, y=labels, w=weights, ids=image_files)
@staticmethod
def load_img(image_files) -> np.ndarray:
"""Loads a set of images from disk.
Parameters
----------
image_files: list[str]
List of image filenames to load
Returns
-------
np.ndarray that contains loaded images. Of shape `(N,...)`.
Note
----
This method requires PIL to be installed.
"""
from PIL import Image
images = []
for image_file in image_files:
_, extension = os.path.splitext(image_file)
extension = extension.lower()
if extension == ".png":
image = np.array(Image.open(image_file))
images.append(image)
elif extension == ".tif":
im = Image.open(image_file)
imarray = np.array(im)
images.append(imarray)
else:
raise ValueError("Unsupported image filetype for %s" % image_file)
return np.array(images)
class InMemoryLoader(DataLoader):
"""Facilitate Featurization of In-memory objects.
When featurizing a dataset, it's often the case that the initial set of
data (pre-featurization) fits handily within memory. (For example, perhaps
it fits within a column of a pandas DataFrame.) In this case, it would be
convenient to directly be able to featurize this column of data. However,
the process of featurization often generates large arrays which quickly eat
up available memory. This class provides convenient capabilities to process
such in-memory data by checkpointing generated features periodically to
disk.
Example
-------
Here's an example with only datapoints and no labels or weights.
>>> import deepchem as dc
>>> smiles = ["C", "CC", "CCC", "CCCC"]
>>> featurizer = dc.feat.CircularFingerprint()
>>> loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
>>> dataset = loader.create_dataset(smiles, shard_size=2)
>>> len(dataset)
4
Here's an example with both datapoints and labels
>>> import deepchem as dc
>>> smiles = ["C", "CC", "CCC", "CCCC"]
>>> labels = [1, 0, 1, 0]
>>> featurizer = dc.feat.CircularFingerprint()
>>> loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
>>> dataset = loader.create_dataset(zip(smiles, labels), shard_size=2)
>>> len(dataset)
4
Here's an example with datapoints, labels, weights and ids all provided.
>>> import deepchem as dc
>>> smiles = ["C", "CC", "CCC", "CCCC"]
>>> labels = [1, 0, 1, 0]
>>> weights = [1.5, 0, 1.5, 0]
>>> ids = ["C", "CC", "CCC", "CCCC"]
>>> featurizer = dc.feat.CircularFingerprint()
>>> loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
>>> dataset = loader.create_dataset(zip(smiles, labels, weights, ids), shard_size=2)
>>> len(dataset)
4
"""
def create_dataset(self,
inputs: Sequence[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> DiskDataset:
"""Creates and returns a `Dataset` object by featurizing provided files.
Reads in `inputs` and uses `self.featurizer` to featurize the
data in these input files. For large files, automatically shards
into smaller chunks of `shard_size` datapoints for convenience.
Returns a `Dataset` object that contains the featurized dataset.
This implementation assumes that the helper methods `_get_shards`
and `_featurize_shard` are implemented and that each shard
returned by `_get_shards` is a pandas dataframe. You may choose
to reuse or override this method in your subclass implementations.
Parameters
----------
inputs: Sequence[Any]
List of inputs to process. Entries can be arbitrary objects so long as
they are understood by `self.featurizer`
data_dir: str, optional
Directory to store featurized dataset.
shard_size: int, optional
Number of examples stored in each shard.
Returns
-------
A `Dataset` object containing a featurized representation of data
from `inputs`.
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
if not isinstance(inputs, list):
try:
inputs = list(inputs)
except TypeError:
inputs = [inputs]
def shard_generator():
global_index = 0
for shard_num, shard in enumerate(self._get_shards(inputs, shard_size)):
time1 = time.time()
X, y, w, ids = self._featurize_shard(shard, global_index)
global_index += len(shard)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir, self.tasks)
def _get_shards(self, inputs, shard_size):
"""Break up input into shards.
Parameters
----------
inputs: list[object]
Each entry in this list must be of the form `(featurization_input,
label, weight, id)` or `(featurization_input, label, weight)` or
`(featurization_input, label)` or `featurization_input` for one
datapoint, where `featurization_input` is any input that is recognized
by `self.featurizer`.
shard_size: int
The size of shard to generate.
Returns
-------
Iterator which iterates over shards of data.
"""
current_shard = []
for i, datapoint in enumerate(inputs):
if i != 0 and i % shard_size == 0:
shard_data = current_shard
current_shard = []
yield shard_data
current_shard.append(datapoint)
yield current_shard
def _featurize_shard(self, shard, global_index):
"""Featurizes a shard of an input data.
Parameters
----------
shard: list
List each entry of which must be of the form `(featurization_input,
label, weight, id)` or `(featurization_input, label, weight)` or
`(featurization_input, label)` or `featurization_input` for one
datapoint, where `featurization_input` is any input that is recognized
by `self.featurizer`.
global_index: int
The starting index for this shard in the full set of provided inputs
"""
features = []
labels = []
weights = []
ids = []
n_tasks = len(self.tasks)
for i, entry in enumerate(shard):
if not isinstance(entry, tuple):
entry = (entry,)
if len(entry) > 4:
raise ValueError(
"Entry is malformed and must be of length 1-4 containing featurization_input and optionally label, weight, and id."
)
if len(entry) == 4:
featurization_input, label, weight, entry_id = entry
elif len(entry) == 3:
featurization_input, label, weight = entry
entry_id = global_index + i
elif len(entry) == 2:
featurization_input, label = entry
weight = np.ones((n_tasks), np.float32)
entry_id = global_index + i
elif len(entry) == 1:
featurization_input = entry
label = np.zeros((n_tasks), np.float32)
weight = np.zeros((n_tasks), np.float32)
entry_id = global_index + i
feature = self.featurizer(featurization_input)
features.append(feature)
weights.append(weight)
labels.append(label)
ids.append(entry_id)
X = np.concatenate(features, axis=0)
y = np.array(labels)
w = np.array(weights)
ids = np.array(ids)
return X, y, w, ids
| miaecle/deepchem | deepchem/data/data_loader.py | Python | mit | 36,370 | [
"RDKit"
] | ddb0f80016c6bc3cf54355e6bcfec6079c99b681c28f861e1e86508885c65d4b |
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
def PtAcceptInviteInGame(friendName,inviteKey):
"""Sends a VaultTask to the server to perform the invite"""
pass
def PtAmCCR():
"""Returns true if local player is a CCR"""
pass
def PtAtTimeCallback(selfkey,time,id):
"""This will create a timer callback that will call OnTimer when complete
- 'selfkey' is the ptKey of the PythonFile component
- 'time' is how much time from now (in seconds) to call back
- 'id' is an integer id that will be returned in the OnTimer call"""
pass
def PtAttachObject(child,parent):
"""Attach child to parent based on ptKey or ptSceneobject
- childKey is the ptKey or ptSceneobject of the one being attached
- parentKey is the ptKey or ptSceneobject of the one being attached to
(both arguments must be ptKeys or ptSceneobjects, you cannot mix types)"""
pass
def PtAvatarEnterAFK():
"""Tells the local avatar to enter AwayFromKeyboard idle loop (netpropagated)"""
pass
def PtAvatarEnterAnimMode(animName):
"""Enter a custom anim loop (netpropagated)"""
pass
def PtAvatarEnterLookingAtKI():
"""Tells the local avatar to enter looking at KI idle loop (netpropagated)"""
pass
def PtAvatarEnterUsePersBook():
"""Tells the local avatar to enter using their personal book idle loop (netpropagated)"""
pass
def PtAvatarExitAFK():
"""Tells the local avatar to exit AwayFromKeyboard idle loop (netpropagated)"""
pass
def PtAvatarExitAnimMode(animName):
"""Exit custom anim loop (netpropagated)"""
pass
def PtAvatarExitLookingAtKI():
"""Tells the local avatar to exit looking at KI idle loop (netpropagated)"""
pass
def PtAvatarExitUsePersBook():
"""Tells the local avatar to exit using their personal book idle loop (netpropagated)"""
pass
def PtAvatarSitOnGround():
"""Tells the local avatar to sit on ground and enter sit idle loop (netpropagated)"""
pass
def PtAvatarSpawnNext():
"""Send the avatar to the next spawn point"""
pass
def PtCanShadowCast():
"""Can we cast shadows?"""
pass
def PtChangeAvatar(gender):
"""Change the local avatar's gender (or clothing type)"""
pass
def PtChangePassword(password):
"""Changes the current account's password"""
pass
def PtChangePlayerName(name):
"""Change the local avatar's name"""
pass
def PtCheckVisLOS(startPoint,endPoint):
"""Does LOS check from start to end"""
pass
def PtCheckVisLOSFromCursor():
"""Does LOS check from where the mouse cursor is, into the screen"""
pass
def PtClearCameraStack():
"""clears all cameras"""
pass
def PtClearOfferBookMode():
"""Cancel the offer book interface"""
pass
def PtClearPrivateChatList(memberKey):
"""Remove the local avatar from private vox messaging, and / or clear members from his chat list"""
pass
def PtClearTimerCallbacks(key):
"""This will remove timer callbacks to the specified key"""
pass
def PtConsole(command):
"""This will execute 'command' as if it were typed into the Plasma console."""
pass
def PtConsoleNet(command,netForce):
"""This will execute 'command' on the console, over the network, on all clients.
If 'netForce' is true then force command to be sent over the network."""
pass
def PtCreateDir(directory):
"""Creates the directory and all parent folders. Returns false on failure"""
pass
def PtCreatePlayer(playerName, avatarShape, invitation):
"""Creates a new player"""
pass
def PtCreatePlayerW(playerName, avatarShape, invitation):
"""Unicode version of PtCreatePlayer"""
pass
def PtCreatePublicAge(ageInfo, cbObject=None):
"""Create a public instance of the given age.
cbObject, if supplied should have a member called publicAgeCreated(self,ageInfo)"""
pass
def PtDebugAssert(cond, msg):
"""Debug only: Assert if condition is false."""
pass
def PtDebugPrint(*msgs, **kwargs):
"""Prints msgs to the Python log given the message's level"""
pass
def PtDeletePlayer(playerInt):
"""Deletes a player associated with the current account"""
pass
def PtDetachObject(child,parent):
"""Detach child from parent based on ptKey or ptSceneobject
- child is the ptKey or ptSceneobject of the one being detached
- parent is the ptKey or ptSceneobject of the one being detached from
(both arguments must be ptKeys or ptSceneobjects, you cannot mix types)"""
pass
def PtDirtySynchClients(selfKey,SDLStateName,flags):
"""DO NOT USE - handled by ptSDL"""
pass
def PtDirtySynchState(selfKey,SDLStateName,flags):
"""DO NOT USE - handled by ptSDL"""
pass
def PtDisableAvatarCursorFade():
"""Disable the avatar cursor fade"""
pass
def PtDisableAvatarJump():
"""Disable the ability of the avatar to jump"""
pass
def PtDisableControlKeyEvents(selfKey):
"""Disable the control key events from calling OnControlKeyEvent"""
pass
def PtDisableForwardMovement():
"""Disable the ability of the avatar to move forward"""
pass
def PtDisableMouseMovement():
"""Disable avatar mouse movement input"""
pass
def PtDisableMovementKeys():
"""Disable avatar movement input"""
pass
def PtDisableRenderScene():
"""UNKNOWN"""
pass
def PtDisableShadows():
"""Turns shadows off"""
pass
def PtDumpLogs(folder):
"""Dumps all current log files to the specified folder (a sub-folder to the log folder)"""
pass
def PtEmoteAvatar(emote):
"""Play an emote on the local avatar (netpropagated)"""
pass
def PtEnableAvatarCursorFade():
"""Enable the avatar cursor fade"""
pass
def PtEnableAvatarJump():
"""Enable the ability of the avatar to jump"""
pass
def PtEnableControlKeyEvents(selfKey):
"""Enable control key events to call OnControlKeyEvent(controlKey,activateFlag)"""
pass
def PtEnableForwardMovement():
"""Enable the ability of the avatar to move forward"""
pass
def PtEnableMouseMovement():
"""Enable avatar mouse movement input"""
pass
def PtEnableMovementKeys():
"""Enable avatar movement input"""
pass
def PtEnablePlanarReflections(on):
"""Enables/disables planar reflections"""
pass
def PtEnableRenderScene():
"""UNKNOWN"""
pass
def PtEnableShadows():
"""Turns shadows on"""
pass
def PtExcludeRegionSet(senderKey,regionKey,state):
"""This will set the state of an exclude region
- 'senderKey' is a ptKey of the PythonFile component
- 'regionKey' is a ptKey of the exclude region
- 'state' is either kExRegRelease or kExRegClear"""
pass
def PtExcludeRegionSetNow(senderKey,regionKey,state):
"""This will set the state of an exclude region immediately on the server
- 'senderKey' is a ptKey of the PythonFile component
- 'regionKey' is a ptKey of the exclude region
- 'state' is either kExRegRelease or kExRegClear"""
pass
def PtFadeIn(lenTime, holdFlag, noSound=0):
"""Fades screen in for lenTime seconds"""
pass
def PtFadeLocalAvatar(fade):
"""Fade (or unfade) the local avatar"""
pass
def PtFadeOut(lenTime, holdFlag, noSound=0):
"""Fades screen out for lenTime seconds"""
pass
def PtFakeLinkAvatarToObject(avatar,object):
"""Pseudo-links avatar to object within the same age
"""
pass
def PtFileExists(filename):
"""Returns true if the specified file exists"""
pass
def PtFindSceneobject(name,ageName):
"""This will try to find a sceneobject based on its name and what age its in
- it will return a ptSceneObject if found- if not found then a NameError exception will happen"""
pass
def PtFirstPerson():
"""is the local avatar in first person mode"""
pass
def PtFlashWindow():
"""Flashes the client window if it is not focused"""
pass
def PtFogSetDefColor(color):
"""Sets default fog color"""
pass
def PtFogSetDefExp(end,density):
"""Set exp fog values"""
pass
def PtFogSetDefExp2(end,density):
"""Set exp2 fog values"""
pass
def PtFogSetDefLinear(start,end,density):
"""Set linear fog values"""
pass
def PtForceCursorHidden():
"""Forces the cursor to hide, overriding everything.
Only call if other methods won't work. The only way to show the cursor after this call is PtForceMouseShown()"""
pass
def PtForceCursorShown():
"""Forces the cursor to show, overriding everything.
Only call if other methods won't work. This is the only way to show the cursor after a call to PtForceMouseHidden()"""
pass
def PtGMTtoDniTime(gtime):
"""Converts GMT time (passed in) to D'Ni time"""
pass
def PtGUICursorDimmed():
"""Dimms the GUI cursor"""
pass
def PtGUICursorOff():
"""Turns the GUI cursor off"""
pass
def PtGUICursorOn():
"""Turns the GUI cursor on"""
pass
def PtGetAccountName():
"""Returns the account name for the current account"""
pass
def PtGetAccountPlayerList():
"""Returns list of players associated with the current account"""
pass
def PtGetAgeInfo():
"""Returns ptAgeInfoStruct of the current Age"""
pass
def PtGetAgeName():
"""DEPRECIATED - use ptDniInfoSource instead"""
pass
def PtGetAgeSDL():
"""Returns the global ptSDL for the current Age"""
pass
def PtGetAgeTime():
"""DEPRECIATED - use ptDniInfoSource instead"""
pass
def PtGetAgeTimeOfDayPercent():
"""Returns the current age time of day as a percent (0 to 1)"""
pass
def PtGetAvatarKeyFromClientID(clientID):
"""From an integer that is the clientID, find the avatar and return its ptKey"""
pass
def PtGetCameraNumber(x):
"""Returns camera x's name from stack"""
pass
def PtGetClientIDFromAvatarKey(avatarKey):
"""From a ptKey that points at an avatar, return the players clientID (integer)"""
pass
def PtGetClientName(avatarKey=None):
"""This will return the name of the client that is owned by the avatar
- avatarKey is the ptKey of the avatar to get the client name of.
If avatarKey is omitted then the local avatar is used"""
pass
def PtGetControlEvents(on, key):
"""Registers or unregisters for control event messages"""
pass
def PtGetDefaultDisplayParams():
"""Returns the default resolution and display settings"""
pass
def PtGetDefaultSpawnPoint():
"""Returns the default spawnpoint definition (as a ptSpawnPointInfo)"""
pass
def PtGetDesktopColorDepth():
"""Returns desktop ColorDepth"""
pass
def PtGetDesktopHeight():
"""Returns desktop height"""
pass
def PtGetDesktopWidth():
"""Returns desktop width"""
pass
def PtGetDialogFromString(dialogName):
"""Get a ptGUIDialog from its name"""
pass
def PtGetDialogFromTagID(tagID):
"""Returns the dialog associated with the tagID"""
pass
def PtGetDniTime():
"""Returns current D'Ni time"""
pass
def PtGetFrameDeltaTime():
"""Returns the amount of time that has elapsed since last frame."""
pass
def PtGetGameTime():
"""Returns the system game time (frame based) in seconds."""
pass
def PtGetInitPath():
"""Returns the unicode path to the client's init directory. Do NOT convert to a standard string."""
pass
def PtGetLanguage():
"""Returns the current language as a PtLanguage enum"""
pass
def PtGetLocalAvatar():
"""This will return a ptSceneobject of the local avatar
- if there is no local avatar a NameError exception will happen."""
pass
def PtGetLocalClientID():
"""Returns our local client ID number"""
pass
def PtGetLocalKILevel():
"""returns local player's ki level"""
pass
def PtGetLocalPlayer():
"""Returns a ptPlayer object of the local player"""
pass
def PtGetLocalizedString(name, arguments=None):
"""Returns the localized string specified by name (format is Age.Set.Name) and substitutes the arguments in the list of strings passed in as arguments."""
pass
def PtGetMouseTurnSensitivity():
"""Returns the sensitivity"""
pass
def PtGetNPCCount():
"""This will return the number of NPCs in the current age"""
pass
def PtGetNPCByID(npcID):
"""This will return the NPC with a specific ID"""
pass
def PtGetNumCameras():
"""returns camera stack size"""
pass
def PtGetNumParticles(key):
"""Key is the key of scene object host to particle system"""
pass
def PtGetNumRemotePlayers():
"""Returns the number of remote players in this Age with you."""
pass
def PtGetPlayerList():
"""Returns a list of ptPlayer objects of all the remote players"""
pass
def PtGetPlayerListDistanceSorted():
"""Returns a list of ptPlayers, sorted by distance"""
pass
def PtGetPrevAgeInfo():
"""Returns ptAgeInfoStruct of previous age visited"""
pass
def PtGetPrevAgeName():
"""Returns filename of previous age visited"""
pass
def PtGetPublicAgeList(ageName, cbObject=None):
"""Get list of public ages for the given age name.
cbObject, if supplied should have a method called gotPublicAgeList(self,ageList). ageList is a list of tuple(ptAgeInfoStruct,nPlayersInAge)"""
pass
def PtGetPythonLoggingLevel():
"""Returns the current level of python logging"""
pass
def PtGetServerTime():
"""Returns the current time on the server (which is GMT)"""
pass
def PtGetShadowVisDistance():
"""Returns the maximum shadow visibility distance"""
pass
def PtGetSupportedDisplayModes():
"""Returns a list of supported resolutions"""
pass
def PtGetTime():
"""Returns the number of seconds since the game was started."""
pass
def PtGetUserPath():
"""Returns the unicode path to the client's root user directory. Do NOT convert to a standard string."""
pass
def PtHideDialog(dialogName):
"""Hide a GUI dialog by name (does not unload dialog)"""
pass
def PtIsActivePlayerSet():
"""Returns whether or not an active player is set"""
pass
def PtIsCCRAway():
"""Returns current status of CCR dept"""
pass
def PtIsClickToTurn():
"""Is click-to-turn on?"""
pass
def PtIsCurrentBrainHuman():
"""Returns whether the local avatar current brain is the human brain"""
pass
def PtIsDemoMode():
"""Returns whether the game is in Demo mode or not"""
pass
def PtIsDialogLoaded(dialogName):
"""Test to see if a GUI dialog is loaded, by name"""
pass
def PtIsEnterChatModeKeyBound():
"""Returns whether the EnterChatMode is bound to a key"""
pass
def PtIsGUIModal():
"""Returns true if the GUI is displaying a modal dialog and blocking input"""
pass
def PtIsInternalRelease():
"""Returns whether the client is an internal build or not"""
pass
def PtIsMouseInverted():
"""Is the mouse currently inverted?"""
pass
def PtIsShadowsEnabled():
"""Returns whether shadows are currently turned on"""
pass
def PtIsSinglePlayerMode():
"""Returns whether the game is in single player mode or not"""
pass
def PtIsSubscriptionActive():
"""Returns true if the current player is a paying subscriber"""
pass
def PtKillParticles(timeRemaining,pctToKill,particleSystem):
"""Tells particleSystem to kill pctToKill percent of its particles"""
pass
def PtLimitAvatarLOD(LODlimit):
"""Sets avatar's LOD limit"""
pass
def PtLoadAvatarModel(modelName, spawnPoint, userStr = ""):
"""Loads an avatar model at the given spawn point. Assigns the user specified string to it."""
pass
def PtLoadBookGUI(guiName):
"""Loads the gui specified, a gui must be loaded before it can be used. If the gui is already loaded, doesn't do anything"""
pass
def PtLoadDialog(dialogName,selfKey=None,ageName=""):
"""Loads a GUI dialog by name and optionally set the Notify proc key
If the dialog is already loaded then it won't load it again"""
pass
def PtLoadJPEGFromDisk(filename,width,height):
"""The image will be resized to fit the width and height arguments. Set to 0 if resizing is not desired.
Returns a pyImage of the specified file."""
pass
def PtLocalAvatarIsMoving():
"""Returns true if the local avatar is moving (a movement key is held down)"""
pass
def PtLocalAvatarRunKeyDown():
"""Returns true if the run key is being held down for the local avatar"""
pass
def PtMaxListenDistSq():
"""Returns the maximum distance (squared) of the listen range"""
pass
def PtMaxListenListSize():
"""Returns the maximum listen number of players"""
pass
def PtNotifyOffererLinkAccepted(offerer):
"""Tell the offerer that we accepted the link offer"""
pass
def PtNotifyOffererLinkCompleted(offerer):
"""Tell the offerer that we completed the link"""
pass
def PtNotifyOffererLinkRejected(offerer):
"""Tell the offerer that we rejected the link offer"""
pass
def PtPageInNode(nodeName, ageName=""):
"""Pages in node, or a list of nodes"""
pass
def PtPageOutNode(nodeName):
"""Pages out a node"""
pass
def PtPrintToScreen(message):
"""Prints 'message' to the status log, for debug only."""
pass
def PtRateIt(chronicleName,dialogPrompt,onceFlag):
"""Shows a dialog with dialogPrompt and stores user input rating into chronicleName"""
pass
def PtRebuildCameraStack(name,ageName):
"""Push camera with this name on the stack"""
pass
def PtRecenterCamera():
"""re-centers the camera"""
pass
def PtRemovePublicAge(ageInstanceGuid, cbObject=None):
"""Remove a public instance of the given age.
cbObject, if supplied should have a member called publicAgeRemoved(self,ageInstanceGuid)"""
pass
def PtRequestLOSScreen(selfKey,ID,xPos,yPos,distance,what,reportType):
"""Request a LOS check from a point on the screen"""
pass
def PtSaveScreenShot(fileName,width=640,height=480,quality=75):
"""Takes a screenshot with the specified filename, size, and quality"""
pass
def PtSendChatToCCR(message,CCRPlayerID):
"""Sends a chat message to a CCR that has contacted this player"""
pass
def PtSendKIGZMarkerMsg(markerNumber,sender):
"""Same as PtSendKIMessageInt except 'sender' could get a notify message back
"""
pass
def PtSendKIMessage(command,value):
"""Sends a command message to the KI frontend.
See PlasmaKITypes.py for list of commands"""
pass
def PtSendKIMessageInt(command,value):
"""Same as PtSendKIMessage except the value is guaranteed to be a UInt32
(for things like player IDs)"""
pass
def PtSendPetitionToCCR(message,reason=0,title=""):
"""Sends a petition with a message to the CCR group"""
pass
def PtSendPrivateChatList(chatList):
"""Lock the local avatar into private vox messaging, and / or add new members to his chat list"""
pass
def PtSendRTChat(fromPlayer,toPlayerList,message,flags):
"""Sends a realtime chat message to the list of ptPlayers
If toPlayerList is an empty list, it is a broadcast message"""
pass
def PtSetActivePlayer(playerInt):
"""Sets the active player associated with the current account"""
pass
def PtSetAlarm(secs, cbObject, cbContext):
"""secs is the amount of time before your alarm goes off.
cbObject is a python object with the method onAlarm(int context)
cbContext is an integer."""
pass
def PtSetBehaviorLoopCount(behaviorKey,stage,loopCount,netForce):
"""This will set the loop count for a particular stage in a multistage behavior"""
pass
def PtSetBehaviorNetFlags(behKey, netForce, netProp):
"""Sets net flags on the associated behavior"""
pass
def PtSetClearColor(red,green,blue):
"""Set the clear color"""
pass
def PtSetClickToTurn(state):
"""Turns on click-to-turn"""
pass
def PtSetGamma2(gamma):
"""Set the gamma with gamma2 rules"""
pass
def PtSetGlobalClickability(enable):
"""Enable or disable all clickables on the local client"""
pass
def PtSetGraphicsOptions(width, height, colordepth, windowed, numAAsamples, numAnisoSamples, VSync):
"""Set the graphics options"""
pass
def PtSetLightAnimStart(key,name,start):
""" Key is the key of scene object host to light, start is a bool. Name is the name of the light to manipulate"""
pass
def PtSetLightValue(key,name,r,g,b,a):
""" Key is the key of scene object host to light. Name is the name of the light to manipulate"""
pass
def PtSetMouseInverted():
"""Inverts the mouse"""
pass
def PtSetMouseTurnSensitivity(sensitivity):
"""Set the mouse sensitivity"""
pass
def PtSetMouseUninverted():
"""Uninverts the mouse"""
pass
def PtSetOfferBookMode(selfkey,ageFilename,ageInstanceName):
"""Put us into the offer book interface"""
pass
def PtSetParticleDissentPoint(x, y, z, particlesys):
"""Sets the dissent point of the particlesys to x,y,z"""
pass
def PtSetParticleOffset(x,y,z,particlesys):
"""Sets the particlesys particle system's offset"""
pass
def PtSetPythonLoggingLevel(level):
"""Sets the current level of python logging"""
pass
def PtSetShadowVisDistance(distance):
"""Set the maximum shadow visibility distance"""
pass
def PtSetShareSpawnPoint(spawnPoint):
"""This sets the desired spawn point for the receiver to link to"""
pass
def PtShootBulletFromObject(selfkey, gunObj, radius, range):
"""Shoots a bullet from an object"""
pass
def PtShootBulletFromScreen(selfkey, xPos, yPos, radius, range):
"""Shoots a bullet from a position on the screen"""
pass
def PtShowDialog(dialogName):
"""Show a GUI dialog by name (does not load dialog)"""
pass
def PtStartScreenCapture(selfKey,width=800,height=600):
"""Starts a capture of the screen"""
pass
def PtToggleAvatarClickability(on):
"""Turns on and off our avatar's clickability"""
pass
def PtTransferParticlesToObject(objFrom, objTo, num):
"""Transfers num particles from objFrom to objTo"""
pass
def PtUnLoadAvatarModel(avatarKey):
"""Unloads the specified avatar model"""
pass
def PtUnloadAllBookGUIs():
"""Unloads all loaded guis except for the default one"""
pass
def PtUnloadBookGUI(guiName):
"""Unloads the gui specified. If the gui isn't loaded, doesn't do anything"""
pass
def PtUnloadDialog(dialogName):
"""This will unload the GUI dialog by name. If not loaded then nothing will happen"""
pass
def PtUpgradeVisitorToExplorer(playerInt):
"""Upgrades the player to explorer status"""
pass
def PtUsingUnicode():
"""Returns true if the current language is a unicode language (like Japanese)"""
pass
def PtValidateKey(key):
"""Returns true(1) if 'key' is valid and loaded,
otherwise returns false(0)"""
pass
def PtWasLocallyNotified(selfKey):
"""Returns 1 if the last notify was local or 0 if the notify originated on the network"""
pass
def PtWearDefaultClothing(key):
"""Forces the avatar to wear the default clothing set"""
pass
def PtWearDefaultClothingType(key,type):
"""Forces the avatar to wear the default clothing of the specified type"""
pass
def PtWearMaintainerSuit(key,wearOrNot):
"""Wears or removes the maintainer suit of clothes"""
pass
def PtWhatGUIControlType(guiKey):
"""Returns the control type of the key passed in"""
pass
def PtYesNoDialog(selfkey,dialogMessage):
"""This will display a Yes/No dialog to the user with the text dialogMessage
This dialog _has_ to be answered by the user.
And their answer will be returned in a Notify message."""
pass
class ptAgeInfoStruct:
"""Class to hold AgeInfo struct data"""
def __init__(self):
"""None"""
pass
def copyFrom(self,other):
"""Copies data from one ptAgeInfoStruct or ptAgeInfoStructRef to this one"""
pass
def getAgeFilename(self):
"""Gets the Age's filename"""
pass
def getAgeInstanceGuid(self):
"""Get the Age's instance GUID"""
pass
def getAgeInstanceName(self):
"""Get the instance name of the Age"""
pass
def getAgeLanguage(self):
"""Gets the age's language (integer)"""
pass
def getAgeSequenceNumber(self):
"""Gets the unique sequence number"""
pass
def getAgeUserDefinedName(self):
"""Gets the user defined part of the Age name"""
pass
def getDisplayName(self):
"""Returns a string that is the displayable name of the age instance"""
pass
def setAgeFilename(self,filename):
"""Sets the filename of the Age"""
pass
def setAgeInstanceGuid(self,guid):
"""Sets the Age instance's GUID"""
pass
def setAgeInstanceName(self,instanceName):
"""Sets the instance name of the Age"""
pass
def setAgeLanguage(self,lang):
"""Sets the age's language (integer)"""
pass
def setAgeSequenceNumber(self,seqNumber):
"""Sets the unique sequence number"""
pass
def setAgeUserDefinedName(self,udName):
"""Sets the user defined part of the Age"""
pass
class ptAgeInfoStructRef:
"""Class to hold AgeInfo struct data"""
def __init__(self):
"""None"""
pass
def copyFrom(self,other):
"""Copies data from one ptAgeInfoStruct or ptAgeInfoStructRef to this one"""
pass
def getAgeFilename(self):
"""Gets the Age's filename"""
pass
def getAgeInstanceGuid(self):
"""Get the Age's instance GUID"""
pass
def getAgeInstanceName(self):
"""Get the instance name of the Age"""
pass
def getAgeSequenceNumber(self):
"""Gets the unique sequence number"""
pass
def getAgeUserDefinedName(self):
"""Gets the user defined part of the Age name"""
pass
def getDisplayName(self):
"""Returns a string that is the displayable name of the age instance"""
pass
def setAgeFilename(self,filename):
"""Sets the filename of the Age"""
pass
def setAgeInstanceGuid(self,guid):
"""Sets the Age instance's GUID"""
pass
def setAgeInstanceName(self,instanceName):
"""Sets the instance name of the Age"""
pass
def setAgeSequenceNumber(self,seqNumber):
"""Sets the unique sequence number"""
pass
def setAgeUserDefinedName(self,udName):
"""Sets the user defined part of the Age"""
pass
class ptAgeLinkStruct:
"""Class to hold the data of the AgeLink structure"""
def __init__(self):
"""None"""
pass
def copyFrom(self,other):
"""Copies data from one ptAgeLinkStruct or ptAgeLinkStructRef to this one"""
pass
def getAgeInfo(self):
"""Returns a ptAgeInfoStructRef of the AgeInfo for this link"""
pass
def getLinkingRules(self):
"""Returns the linking rules of this link"""
pass
def getParentAgeFilename(self):
"""Returns a string of the parent age filename"""
pass
def getSpawnPoint(self):
"""Gets the spawn point ptSpawnPointInfoRef of this link"""
pass
def setAgeInfo(self,ageInfo):
"""Sets the AgeInfoStruct from the data in ageInfo (a ptAgeInfoStruct)"""
pass
def setLinkingRules(self,rule):
"""Sets the linking rules for this link"""
pass
def setParentAgeFilename(self,filename):
"""Sets the parent age filename for child age links"""
pass
def setSpawnPoint(self,spawnPtInfo):
"""Sets the spawn point of this link (a ptSpawnPointInfo or ptSpawnPointInfoRef)"""
pass
class ptAgeLinkStructRef:
"""Class to hold the data of the AgeLink structure"""
def __init__(self):
"""None"""
pass
def copyFrom(self,other):
"""Copies data from one ptAgeLinkStruct or ptAgeLinkStructRef to this one"""
pass
def getAgeInfo(self):
"""Returns a ptAgeInfoStructRef of the AgeInfo for this link"""
pass
def getLinkingRules(self):
"""Returns the linking rules of this link"""
pass
def getSpawnPoint(self):
"""Gets the spawn point ptSpawnPointInfoRef of this link"""
pass
def setAgeInfo(self,ageInfo):
"""Sets the AgeInfoStruct from the data in ageInfo (a ptAgeInfoStruct)"""
pass
def setLinkingRules(self,rule):
"""Sets the linking rules for this link"""
pass
def setSpawnPoint(self,spawnPtInfo):
"""Sets the spawn point of this link (a ptSpawnPointInfo or ptSpawnPointInfoRef)"""
pass
class ptAgeVault:
"""Accessor class to the Age's vault"""
def __init__(self):
"""None"""
pass
def addChronicleEntry(self,name,type,value):
"""Adds a chronicle entry with the specified type and value"""
pass
def addDevice(self,deviceName,cb=None,cbContext=0):
"""Adds a device to the age"""
pass
def findChronicleEntry(self,entryName):
"""Returns the named ptVaultChronicleNode"""
pass
def getAgeDevicesFolder(self):
"""Returns a ptVaultFolderNode of the inboxes for the devices in this Age."""
pass
def getAgeGuid(self):
"""Returns the current Age's guid as a string."""
pass
def getAgeInfo(self):
"""Returns a ptVaultAgeInfoNode of the this Age"""
pass
def getAgeSDL(self):
"""Returns the age's SDL (ptSDLStateDataRecord)"""
pass
def getAgesIOwnFolder(self):
"""(depreciated, use getBookshelfFolder) Returns a ptVaultFolderNode that contain the Ages I own"""
pass
def getBookshelfFolder(self):
"""Personal age only: Returns a ptVaultFolderNode that contains the owning player's AgesIOwn age list"""
pass
def getChronicleFolder(self):
"""Returns a ptVaultFolderNode"""
pass
def getDevice(self,deviceName):
"""Returns the specified device (ptVaultTextNoteNode)"""
pass
def getDeviceInbox(self,deviceName):
"""Returns a ptVaultFolderNode of the inbox for the named device in this age."""
pass
def getPeopleIKnowAboutFolder(self):
"""Returns a ptVaultPlayerInfoListNode of the players the Age knows about(?)."""
pass
def getPublicAgesFolder(self):
"""Returns a ptVaultFolderNode that contains all the public Ages"""
pass
def getSubAgeLink(self,ageInfo):
"""Returns a ptVaultAgeLinkNode to 'ageInfo' (a ptAgeInfoStruct) for this Age."""
pass
def getSubAgesFolder(self):
"""Returns a ptVaultFolderNode of sub Age's folder."""
pass
def hasDevice(self,deviceName):
"""Does a device with this name exist?"""
pass
def removeDevice(self,deviceName):
"""Removes a device from the age"""
pass
def setDeviceInbox(self,deviceName,inboxName,cb=None,cbContext=0):
"""Set's the device's inbox"""
pass
def updateAgeSDL(self,pyrec):
"""Updates the age's SDL"""
pass
class ptAnimation:
"""Plasma animation class"""
def __init__(self,key=None):
"""None"""
pass
def addKey(self,key):
"""Adds an animation modifier to the list of receiver keys"""
pass
def backwards(self,backwardsFlag):
"""Turn on and off playing the animation backwards"""
pass
def getFirstKey(self):
"""This will return a ptKey object that is the first receiver (target)
However, if the parent is not a modifier or not loaded, then None is returned."""
pass
def incrementBackward(self):
"""Step the animation backward a frame"""
pass
def incrementForward(self):
"""Step the animation forward a frame"""
pass
def looped(self,loopedFlag):
"""Turn on and off looping of the animation"""
pass
def netForce(self,forceFlag):
"""Specify whether this object needs to use messages that are forced to the network
- This is to be used if your Python program is running on only one client
Such as a game master, only running on the client that owns a particular object"""
pass
def play(self):
"""Plays the animation"""
pass
def playRange(self,start,end):
"""Play the animation from start to end"""
pass
def playToPercentage(self,zeroToOne):
"""Play the animation to the specified percentage (0 to 1)"""
pass
def playToTime(self,time):
"""Play the animation to the specified time"""
pass
def resume(self):
"""Resumes the animation from where it was stopped last"""
pass
def sender(self,selfKey):
"""Sets the sender of the messages being sent to the animation modifier"""
pass
def setAnimName(self,name):
"""Sets the animation notetrack name (or (Entire Animation))"""
pass
def setLoopEnd(self,loopEnd):
"""Sets the loop ending position
- 'loopEnd' is the number of seconds from the absolute beginning of the animation"""
pass
def setLoopStart(self,loopStart):
"""Sets the loop starting position
- 'loopStart' is the number of seconds from the absolute beginning of the animation"""
pass
def skipToBegin(self):
"""Skip to the beginning of the animation (don't play)"""
pass
def skipToEnd(self):
"""Skip to the end of the animation (don't play)"""
pass
def skipToLoopBegin(self):
"""Skip to the beginning of the animation loop (don't play)"""
pass
def skipToLoopEnd(self):
"""Skip to the end of the animation loop (don't play)"""
pass
def skipToTime(self,time):
"""Skip the animation to time (don't play)"""
pass
def speed(self,speed):
"""Sets the animation playback speed"""
pass
def stop(self):
"""Stops the animation"""
pass
class ptAudioControl:
"""Accessor class to the Audio controls"""
def __init__(self):
"""None"""
pass
def canSetMicLevel(self):
"""Can the microphone level be set? Returns 1 if true otherwise returns 0."""
pass
def disable(self):
"""Disabled audio"""
pass
def enable(self):
"""Enables audio"""
pass
def enableVoiceChat(self,state):
"""Enables or disables voice chat."""
pass
def enableVoiceCompression(self,state):
"""Enables or disables voice compression."""
pass
def enableVoiceNetBroadcast(self,state):
"""Enables or disables voice over network broadcast."""
pass
def enableVoiceRecording(self,state):
"""Enables or disables voice recording."""
pass
def getAmbienceVolume(self):
"""Returns the volume (0.0 to 1.0) for the Ambiance."""
pass
def getAudioDeviceName(self,index):
"""Gets the name of audio device for the given index"""
pass
def getDeviceName(self):
"""Gets the name for the device being used by the audio system"""
pass
def getGUIVolume(self):
"""Returns the volume (0.0 to 1.0) for the GUI dialogs."""
pass
def getHighestMode(self):
"""Gets the highest possible audio system mode"""
pass
def getMicLevel(self):
"""Returns the microphone recording level (0.0 to 1.0)."""
pass
def getMode(self):
"""Gets the audio system mode"""
pass
def getMusicVolume(self):
"""Returns the volume (0.0 to 1.0) for the Music."""
pass
def getNPCVoiceVolume(self):
"""Returns the volume (0.0 to 1.0) for the NPC's voice."""
pass
def getNumAudioDevices(self):
"""Returns the number of available audio devices."""
pass
def getPriorityCutoff(self):
"""Returns current sound priority"""
pass
def getSoundFXVolume(self):
"""Returns the volume (0.0 to 1.0) for the Sound FX."""
pass
def getVoiceVolume(self):
"""Returns the volume (0.0 to 1.0) for the Voices."""
pass
def hideIcons(self):
"""Hides (disables) the voice recording icons."""
pass
def isEnabled(self):
"""Is the audio enabled? Returns 1 if true otherwise returns 0."""
pass
def isHardwareAccelerated(self):
"""Is audio hardware acceleration enabled? Returns 1 if true otherwise returns 0."""
pass
def isMuted(self):
"""Are all sounds muted? Returns 1 if true otherwise returns 0."""
pass
def isUsingEAXAcceleration(self):
"""Is EAX sound acceleration enabled? Returns 1 if true otherwise returns 0."""
pass
def isVoiceCompressionEnabled(self):
"""Is voice compression enabled? Returns 1 if true otherwise returns 0."""
pass
def isVoiceNetBroadcastEnabled(self):
"""Is voice over net enabled? Returns 1 if true otherwise returns 0."""
pass
def isVoiceRecordingEnabled(self):
"""Is voice recording enabled? Returns 1 if true otherwise returns 0."""
pass
def muteAll(self):
"""Mutes all sounds."""
pass
def pushToTalk(self,state):
"""Enables or disables 'push-to-talk'."""
pass
def recordFrame(self,size):
"""Sets the voice packet frame size."""
pass
def recordSampleRate(self,sampleRate):
"""Sets the recording sample rate."""
pass
def setAmbienceVolume(self,volume):
"""Sets the Ambience volume (0.0 to 1.0) for the game.
This only sets the volume for this game session."""
pass
def setDeviceName(self,devicename,restart):
"""Sets the device name for the audio system, and optionally restarts it"""
pass
def setGUIVolume(self,volume):
"""Sets the GUI dialog volume (0.0 to 1.0) for the game.
This only sets the volume for this game session."""
pass
def setLoadOnDemand(self,state):
"""Enables or disables the load on demand for sounds."""
pass
def setMicLevel(self,level):
"""Sets the microphone recording level (0.0 to 1.0)."""
pass
def setMode(self,mode):
"""Sets the audio system mode"""
pass
def setMusicVolume(self,volume):
"""Sets the Music volume (0.0 to 1.0) for the game.
This only sets the volume for this game session."""
pass
def setNPCVoiceVolume(self,volume):
"""Sets the NPC's voice volume (0.0 to 1.0) for the game.
This only sets the volume for this game session."""
pass
def setPriorityCutoff(self,priority):
"""Sets the sound priority"""
pass
def setSoundFXVolume(self,volume):
"""Sets the SoundFX volume (0.0 to 1.0) for the game.
This only sets the volume for this game session."""
pass
def setTwoStageLOD(self,state):
"""Enables or disables two-stage LOD, where sounds can be loaded into RAM but not into sound buffers.
...Less of a performance hit, harder on memory."""
pass
def setVoiceVolume(self,volume):
"""Sets the Voice volume (0.0 to 1.0) for the game.
This only sets the volume for this game session."""
pass
def showIcons(self):
"""Shows (enables) the voice recording icons."""
pass
def squelchLevel(self,level):
"""Sets the squelch level."""
pass
def supportsEAX(self):
"""Returns true or false based on whether or not a the device specified supports EAX"""
pass
def unmuteAll(self):
"""Unmutes all sounds."""
pass
def useEAXAcceleration(self,state):
"""Enables or disables EAX sound acceleration (requires hardware acceleration)."""
pass
def useHardwareAcceleration(self,state):
"""Enables or disables audio hardware acceleration."""
pass
class ptAvatar:
"""Plasma avatar class"""
def __init__(self):
"""None"""
pass
def addWardrobeClothingItem(self,clothing_name,tint1,tint2):
"""To add a clothing item to the avatar's wardrobe (closet)"""
pass
def enterSubWorld(self,sceneobject):
"""Places the avatar into the subworld of the ptSceneObject specified"""
pass
def exitSubWorld(self):
"""Exits the avatar from the subWorld where it was"""
pass
def getAllWithSameMesh(self,clothing_name):
"""Returns a lilst of all clothing items that use the same mesh as the specified one"""
pass
def getAvatarClothingGroup(self):
"""Returns what clothing group the avatar belongs to.
It is also a means to determine if avatar is male or female"""
pass
def getAvatarClothingList(self):
"""Returns a list of clothes that the avatar is currently wearing."""
pass
def getClosetClothingList(self,clothing_type):
"""Returns a list of clothes for the avatar that are in specified clothing group."""
pass
def getCurrentMode(self):
"""Returns current brain mode for avatar"""
pass
def getEntireClothingList(self,clothing_type):
"""Gets the entire list of clothing available. 'clothing_type' not used
NOTE: should use getClosetClothingList"""
pass
def getMatchingClothingItem(self,clothingName):
"""Finds the matching clothing item that goes with 'clothingName'
Used to find matching left and right gloves and shoes."""
pass
def getMorph(self,clothing_name,layer):
"""Get the current morph value"""
pass
def getSkinBlend(self,layer):
"""Get the current skin blend value"""
pass
def getTintClothingItem(self,clothing_name,layer=1):
"""Returns a ptColor of a particular item of clothing that the avatar is wearing.
The color will be a ptColor object."""
pass
def getTintSkin(self):
"""Returns a ptColor of the current skin tint for the avatar"""
pass
def getUniqueMeshList(self,clothing_type):
"""Returns a list of unique clothing items of the desired type (different meshes)"""
pass
def getWardrobeClothingList(self):
"""Return a list of items that are in the avatars closet"""
pass
def gotoStage(self,behaviorKey,stage,transitionTime,setTimeFlag,newTime,SetDirectionFlag,isForward,netForce):
"""Tells a multistage behavior to go to a particular stage"""
pass
def loadClothingFromFile(self,filename):
"""Load avatar clothing from a file"""
pass
def netForce(self,forceFlag):
"""Specify whether this object needs to use messages that are forced to the network
- This is to be used if your Python program is running on only one client
Such as a game master, only running on the client that owns a particular object"""
pass
def nextStage(self,behaviorKey,transitionTime,setTimeFlag,newTime,SetDirectionFlag,isForward,netForce):
"""Tells a multistage behavior to go to the next stage (Why does Matt like so many parameters?)"""
pass
def oneShot(self,seekKey,duration,usePhysicsFlag,animationName,drivableFlag,reversibleFlag):
"""Plays a one-shot animation on the avatar"""
pass
def playSimpleAnimation(self,animName):
"""Play simple animation on avatar"""
pass
def previousStage(self,behaviorKey,transitionTime,setTimeFlag,newTime,SetDirectionFlag,isForward,netForce):
"""Tells a multistage behavior to go to the previous stage"""
pass
def registerForBehaviorNotify(self,selfKey):
"""This will register for behavior notifies from the avatar"""
pass
def removeClothingItem(self,clothing_name,update=1):
"""Tells the avatar to remove a particular item of clothing."""
pass
def runBehavior(self,behaviorKey,netForceFlag):
"""Runs a behavior on the avatar. Can be a single or multi-stage behavior."""
pass
def runBehaviorSetNotify(self,behaviorKey,replyKey,netForceFlag):
"""Same as runBehavior, except send notifications to specified keyed object"""
pass
def runCoopAnim(self,targetKey,activeAvatarAnim,targetAvatarAnim,range=6,dist=3,move=1):
"""Seek near another avatar and run animations on both."""
pass
def saveClothing(self):
"""Saves the current clothing options (including morphs) to the vault"""
pass
def saveClothingToFile(self,filename):
"""Save avatar clothing to a file"""
pass
def setMorph(self,clothing_name,layer,value):
"""Set the morph value (clipped between -1 and 1)"""
pass
def setReplyKey(self,key):
"""Sets the sender's key"""
pass
def setSkinBlend(self,layer,value):
"""Set the skin blend (value between 0 and 1)"""
pass
def tintClothingItem(self,clothing_name,tint,update=1):
"""Tells the avatar to tint(color) a particular item of clothing that they are already wearing.
'tint' is a ptColor object"""
pass
def tintClothingItemLayer(self,clothing_name,tint,layer,update=1):
"""Tells the avatar to tint(color) a particular layer of a particular item of clothing."""
pass
def tintSkin(self,tint,update=1):
"""Tints all of the skin on the avatar, with the ptColor tint"""
pass
def unRegisterForBehaviorNotify(self,selfKey):
"""This will unregister behavior notifications"""
pass
def wearClothingItem(self,clothing_name,update=1):
"""Tells the avatar to wear a particular item of clothing.
And optionally hold update until later (for applying tinting before wearing)."""
pass
class ptBook:
"""Creates a new book"""
def __init__(self,esHTMLSource,coverImage=None,callbackKey=None,guiName=''):
"""None"""
pass
def allowPageTurning(self,allow):
"""Turns on and off the ability to flip the pages in a book"""
pass
def close(self):
"""Closes the book"""
pass
def closeAndHide(self):
"""Closes the book and hides it once it finishes animating"""
pass
def getCurrentPage(self):
"""Returns the currently shown page"""
pass
def getEditableText(self):
"""Returns the editable text currently contained in the book."""
pass
def getMovie(self,index):
"""Grabs a ptAnimation object representing the movie indexed by index. The index is the index of the movie in the source code"""
pass
def goToPage(self,page):
"""Flips the book to the specified page"""
pass
def hide(self):
"""Hides the book"""
pass
def nextPage(self):
"""Flips the book to the next page"""
pass
def open(self,startingPage):
"""Opens the book to the specified page"""
pass
def previousPage(self):
"""Flips the book to the previous page"""
pass
def setEditable(self,editable):
"""Turn book editing on or off. If the book GUI does not support editing, nothing will happen"""
pass
def setEditableText(self,text):
"""Sets the book's editable text."""
pass
def setGUI(self,guiName):
"""Sets the gui to be used by the book, if the requested gui is not loaded, it will use the default
Do not call while the book is open!"""
pass
def setPageMargin(self,margin):
"""Sets the text margin for the book"""
pass
def setSize(self,width,height):
"""Sets the size of the book (width and height are floats from 0 to 1)"""
pass
def show(self,startOpened):
"""Shows the book closed, or open if the the startOpened flag is true"""
pass
class ptCCRAge:
"""CCR only: CCR age info struct"""
def __init__(self):
"""None"""
pass
class ptCCRMgr:
"""CCR only: accessor class to the CCR manager"""
def __init__(self):
"""None"""
pass
def banLinking(self,pid, banFlag):
"""Set the ban linking flag for a player"""
pass
def beginCommunication(self,pid, message):
"""Begin a CCR communication with a player"""
pass
def clippingOff(self):
"""Disables clipping for this player"""
pass
def clippingOn(self):
"""Enables clipping for this player"""
pass
def endCommunication(self,pid):
"""End CCR communications with a player"""
pass
def getClipping(self):
"""Is clipping on for this player? Returns 1 if true otherwise returns 0"""
pass
def getErrorString(self,errorNumber):
"""Returns the error string that corresponds to 'errorNumber'"""
pass
def getLevel(self):
"""Returns the current CCR level for this player"""
pass
def getPlayerInfo(self,player, cbObject, cbContext):
"""Finds a player that matches 'player' (which is an id or name)."""
pass
def linkPlayerHere(self,pid):
"""Links player to where I am"""
pass
def linkPlayerToAge(self,ageInfoStruct,pid):
"""Links player to a specified age"""
pass
def linkToAge(self,age,pid):
"""Links to player's version of age"""
pass
def linkToMyNeighborhoodAge(self,pid):
"""Links this player to their neighborhood"""
pass
def linkToMyPersonalAge(self,pid):
"""Links this player to their personal Age."""
pass
def linkToPlayersAge(self,pid):
"""Link to where the player is"""
pass
def logMessage(self,message):
"""Logs 'message' somewhere...?"""
pass
def makeInvisible(self,level):
"""Makes this player invisible to 'level'"""
pass
def sendCommunication(self,pid, message):
"""Send a CCR communication to a player"""
pass
def setAwayStatus(self,awayFlag):
"""Set the away flag for CCRs"""
pass
def silencePlayer(self,pid, silenceFlag):
"""Set the silence player flag for a player"""
pass
def systemMessage(self):
"""Params message
Send a system wide CCR message"""
pass
def toggleClipping(self):
"""Toggles clipping for this player"""
pass
def warpPlayerHere(self,pid):
"""warp the player to here"""
pass
def warpToPlayer(self,pid):
"""warp to where the player is"""
pass
class ptCCRPlayerInfo:
"""CCR only: CCR player info struct"""
def __init__(self):
"""None"""
pass
class ptCamera:
"""Plasma camera class"""
def __init__(self):
"""None"""
pass
def controlKey(self,controlKey,activateFlag):
"""Send a control key to the camera as if it was hit by the user.
This is for sending things like pan-up, pan-down, zoom-in, etc."""
pass
def disableFirstPersonOverride(self):
"""Does _not_ allow the user to override the camera to go to first person camera."""
pass
def enableFirstPersonOverride(self):
"""Allows the user to override the camera and go to a first person camera."""
pass
def getFOV(self):
"""Returns the current camera's FOV(h)"""
pass
def isSmootherCam(self):
"""Returns true if we are using the faster cams thing"""
pass
def isStayInFirstPerson(self):
"""Are we staying in first person?"""
pass
def isWalkAndVerticalPan(self):
"""Returns true if we are walking and chewing gum"""
pass
def restore(self,cameraKey):
"""Restores camera to saved one"""
pass
def save(self,cameraKey):
"""Saves the current camera and sets the camera to cameraKey"""
pass
def set(self,cameraKey,time,save):
"""DO NOT USE"""
pass
def setFOV(self,fov, time):
"""Sets the current cameras FOV (based on h)"""
pass
def setSmootherCam(self,state):
"""Set the faster cams thing"""
pass
def setStayInFirstPerson(self,state):
"""Set Stay In First Person Always"""
pass
def setWalkAndVerticalPan(self,state):
"""Set Walk and chew gum"""
pass
def undoFirstPerson(self):
"""If the user has overridden the camera to be in first person, this will take them out of first person.
If the user didn't override the camera, then this will do nothing."""
pass
class ptCluster:
"""Creates a new ptCluster"""
def __init__(self,ey):
"""None"""
pass
def setVisible(self,isible):
"""Shows or hides the cluster object"""
pass
class ptColor:
"""Plasma color class"""
def __init__(self,red=0, green=0, blue=0, alpha=0):
"""None"""
pass
def black(self):
"""Sets the color to be black
Example: black = ptColor().black()"""
pass
def blue(self):
"""Sets the color to be blue
Example: blue = ptColor().blue()"""
pass
def brown(self):
"""Sets the color to be brown
Example: brown = ptColor().brown()"""
pass
def cyan(self):
"""Sets the color to be cyan
Example: cyan = ptColor.cyan()"""
pass
def darkbrown(self):
"""Sets the color to be darkbrown
Example: darkbrown = ptColor().darkbrown()"""
pass
def darkgreen(self):
"""Sets the color to be darkgreen
Example: darkgreen = ptColor().darkgreen()"""
pass
def darkpurple(self):
"""Sets the color to be darkpurple
Example: darkpurple = ptColor().darkpurple()"""
pass
def getAlpha(self):
"""Get the alpha blend component of the color"""
pass
def getBlue(self):
"""Get the blue component of the color"""
pass
def getGreen(self):
"""Get the green component of the color"""
pass
def getRed(self):
"""Get the red component of the color"""
pass
def gray(self):
"""Sets the color to be gray
Example: gray = ptColor().gray()"""
pass
def green(self):
"""Sets the color to be green
Example: green = ptColor().green()"""
pass
def magenta(self):
"""Sets the color to be magenta
Example: magenta = ptColor().magenta()"""
pass
def maroon(self):
"""Sets the color to be maroon
Example: maroon = ptColor().maroon()"""
pass
def navyblue(self):
"""Sets the color to be navyblue
Example: navyblue = ptColor().navyblue()"""
pass
def orange(self):
"""Sets the color to be orange
Example: orange = ptColor().orange()"""
pass
def pink(self):
"""Sets the color to be pink
Example: pink = ptColor().pink()"""
pass
def red(self):
"""Sets the color to be red
Example: red = ptColor().red()"""
pass
def setAlpha(self,alpha):
"""Set the alpha blend component of the color. 0.0 to 1.0"""
pass
def setBlue(self,blue):
"""Set the blue component of the color. 0.0 to 1.0"""
pass
def setGreen(self,green):
"""Set the green component of the color. 0.0 to 1.0"""
pass
def setRed(self,red):
"""Set the red component of the color. 0.0 to 1.0"""
pass
def slateblue(self):
"""Sets the color to be slateblue
Example: slateblue = ptColor().slateblue()"""
pass
def steelblue(self):
"""Sets the color to be steelblue
Example: steelblue = ptColor().steelblue()"""
pass
def tan(self):
"""Sets the color to be tan
Example: tan = ptColor().tan()"""
pass
def white(self):
"""Sets the color to be white
Example: white = ptColor().white()"""
pass
def yellow(self):
"""Sets the color to be yellow
Example: yellow = ptColor().yellow()"""
pass
class ptCritterBrain:
"""Object to manipulate critter brains"""
def __init__(self):
"""None"""
pass
def addBehavior(self,animName, behaviorName, loop = 1, randomStartPos = 1, fadeInLen = 2.0, fadeOutLen = 2.0):
"""Adds a new animation to the brain as a behavior with the specified name and parameters. If multiple animations are assigned to the same behavior, they will be randomly picked from when started."""
pass
def addReceiver(self,key):
"""Tells the brain that the specified key wants AI messages"""
pass
def animationName(self,behavior):
"""Returns the animation name associated with the specified integral behavior."""
pass
def atGoal(self):
"""Are we currently are our final destination?"""
pass
def avoidingAvatars(self):
"""Are we currently avoiding avatars while pathfinding?"""
pass
def behaviorName(self,behavior):
"""Returns the behavior name associated with the specified integral behavior."""
pass
def canHearAvatar(self,avatarID):
"""Returns whether this brain can hear the avatar with the specified id."""
pass
def canSeeAvatar(self,avatarID):
"""Returns whether this brain can see the avatar with the specified id."""
pass
def curBehavior(self):
"""Returns the current integral behavior the brain is running."""
pass
def currentGoal(self):
"""Returns the current ptPoint that the brain is running towards."""
pass
def getHearingDistance(self):
"""Returns how far away the brain can hear."""
pass
def getSceneObject(self):
"""Returns the ptSceneObject this brain controls."""
pass
def getSightCone(self):
"""Returns the width of the brain's field of view in radians."""
pass
def getSightDistance(self):
"""Returns how far the brain can see."""
pass
def getStopDistance(self):
"""Returns how far away from the goal we could be and still be considered there."""
pass
def goToGoal(self,newGoal, avoidingAvatars = 0):
"""Tells the brain to start running towards the specified location, avoiding avatars it can see or hear if told to."""
pass
def idleBehaviorName(self):
"""Returns the name of the brain's idle behavior."""
pass
def nextBehavior(self):
"""Returns the behavior the brain will be switching to next frame. (-1 if no change)"""
pass
def playersICanHear(self):
"""Returns a list of player ids which this brain can hear."""
pass
def playersICanSee(self):
"""Returns a list of player ids which this brain can see."""
pass
def removeReceiver(self,key):
"""Tells the brain that the specified key no longer wants AI messages"""
pass
def runBehaviorName(self):
"""Returns the name of the brain's run behavior."""
pass
def runningBehavior(self,behaviorName):
"""Returns true if the named behavior is running."""
pass
def setHearingDistance(self,dist):
"""Set how far away the brain can hear (360 degree field of hearing)."""
pass
def setSightCone(self,radians):
"""Set how wide the brain's field of view is in radians. Note that it is the total angle of the cone, half on one side of the brain's line of sight, half on the other."""
pass
def setSightDistance(self,dist):
"""Set how far away the brain can see."""
pass
def setStopDistance(self,dist):
"""Set how far away from the goal we should be when we are considered there and stop running."""
pass
def startBehavior(self,behaviorName, fade = 1):
"""Starts playing the named behavior. If fade is true, it will fade out the previous behavior and fade in the new one. If false, they will immediately switch."""
pass
def vectorToPlayer(self,avatarID):
"""Returns the vector between us and the specified player."""
pass
class ptDniCoordinates:
"""Constructor for a D'Ni coordinate"""
def __init__(self):
"""None"""
pass
def fromPoint(self,pt):
"""Update these coordinates with the specified ptPoint3"""
pass
def getHSpans(self):
"""Returns the HSpans component of the coordinate"""
pass
def getTorans(self):
"""Returns the Torans component of the coordinate"""
pass
def getVSpans(self):
"""Returns the VSpans component of the coordinate"""
pass
def update(self):
"""Update these coordinates with the players current position"""
pass
class ptDniInfoSource:
"""DO NOT USE"""
def __init__(self):
"""None"""
pass
def getAgeCoords(self):
"""Current coords of the player in current age as a ptDniCoordinates"""
pass
def getAgeGuid(self):
"""Unique identifier for this age instance"""
pass
def getAgeName(self):
"""Name of current age"""
pass
def getAgeTime(self):
"""Current time in current age (tbd)"""
pass
class ptDraw:
"""Plasma Draw class"""
def __init__(self):
"""None"""
pass
def disable(self):
"""Disables the draw on the sceneobject attached
In other words, makes it invisible"""
pass
def enable(self,state=1):
"""Sets the draw enable for the sceneobject attached"""
pass
def netForce(self,forceFlag):
"""Specify whether this object needs to use messages that are forced to the network
- This is to be used if your Python program is running on only one client
Such as a game master, only running on the client that owns a particular object"""
pass
class ptDynamicMap:
"""Creates a ptDynamicMap object"""
def __init__(self,key=None):
"""None"""
pass
def addKey(self,key):
"""Add a receiver... in other words a DynamicMap"""
pass
def calcTextExtents(self,text):
"""Calculates the extent of the specified text, returns it as a (width, height) tuple"""
pass
def clearKeys(self):
"""Clears the receiver list"""
pass
def clearToColor(self,color):
"""Clear the DynamicMap to the specified color
- 'color' is a ptColor object"""
pass
def drawImage(self,x,y,image,respectAlphaFlag):
"""Draws a ptImage object on the dynamicTextmap starting at the location x,y"""
pass
def drawImageClipped(self,x,y,image,cx,cy,cw,ch,respectAlphaFlag):
"""Draws a ptImage object clipped to cx,cy with cw(width),ch(height)"""
pass
def drawText(self,x,y,text):
"""Draw text at a specified location
- x,y is the point to start drawing the text
- 'text' is a string of the text to be drawn"""
pass
def fillRect(self,left,top,right,bottom,color):
"""Fill in the specified rectangle with a color
- left,top,right,bottom define the rectangle
- 'color' is a ptColor object"""
pass
def flush(self):
"""Flush all the commands that were issued since the last flush()"""
pass
def frameRect(self,left,top,right,bottom,color):
"""Frame a rectangle with a specified color
- left,top,right,bottom define the rectangle
- 'color' is a ptColor object"""
pass
def getHeight(self):
"""Returns the height of the dynamicTextmap"""
pass
def getImage(self):
"""Returns a pyImage associated with the dynamicTextmap"""
pass
def getWidth(self):
"""Returns the width of the dynamicTextmap"""
pass
def netForce(self,forceFlag):
"""Specify whether this object needs to use messages that are forced to the network
- This is to be used if your Python program is running on only one client
Such as a game master, only running on the client that owns a particular object
This only applies when NetPropagate is set to true"""
pass
def netPropagate(self,propagateFlag):
"""Specify whether this object needs to use messages that are sent on the network
- The default is for this to be false."""
pass
def purgeImage(self):
"""Purge the DynamicTextMap images"""
pass
def sender(self,sender):
"""Set the sender of the message being sent to the DynamicMap"""
pass
def setClipping(self,clipLeft,clipTop,clipRight,clipBottom):
"""Sets the clipping rectangle
- All drawtext will be clipped to this until the
unsetClipping() is called"""
pass
def setFont(self,facename,size):
"""Set the font of the text to be written
- 'facename' is a string with the name of the font
- 'size' is the point size of the font to use"""
pass
def setJustify(self,justify):
"""Sets the justification of the text. (justify is a PtJustify)"""
pass
def setLineSpacing(self,spacing):
"""Sets the line spacing (in pixels)"""
pass
def setTextColor(self,color, blockRGB=0):
"""Set the color of the text to be written
- 'color' is a ptColor object
- 'blockRGB' must be true if you're trying to render onto a transparent or semi-transparent color"""
pass
def setWrapping(self,wrapWidth,wrapHeight):
"""Set where text will be wrapped horizontally and vertically
- All drawtext commands will be wrapped until the
unsetWrapping() is called"""
pass
def unsetClipping(self):
"""Stop the clipping of text"""
pass
def unsetWrapping(self):
"""Stop text wrapping"""
pass
class ptGameScore:
"""Plasma Game Score"""
def __init__(self):
"""None"""
pass
def addPoints(self, points, key=None):
"""Adds points to the score"""
pass
@staticmethod
def createAgeScore(scoreName, type, points=0, key=None):
"""Creates a new score associated with this age"""
pass
@staticmethod
def createGlobalScore(scoreName, type, points=0, key=None):
"""Creates a new global score"""
pass
@staticmethod
def createPlayerScore(scoreName, type, points=0, key=None):
"""Creates a new score associated with this player"""
pass
@staticmethod
def createScore(ownerID, scoreName, type, points=0, key=None):
"""Creates a new score for an arbitrary owner"""
pass
@staticmethod
def findAgeScores(scoreName, key):
"""Finds matching scores for this age"""
pass
@staticmethod
def findAgeHighScores(name, maxScores, key):
"""Finds the highest matching scores for the current age's owners"""
pass
@staticmethod
def findGlobalScores(scoreName, key):
"""Finds matching global scores"""
pass
@staticmethod
def findGlobalHighScores(name, maxScores, key):
"""Finds the highest matching scores"""
pass
@staticmethod
def findPlayerScores(scoreName, key):
"""Finds matching player scores"""
pass
@staticmethod
def findScores(ownerID, scoreName, key):
"""Finds matching scores for an arbitrary owner"""
pass
def getGameType(self):
"""Returns the score game type."""
pass
def getName(self):
"""Returns the score game name."""
pass
def getOwnerID(self):
"""Returns the score game owner."""
pass
def getPoints(self):
"""Returns the number of points in this score"""
pass
def remove(self):
"""Removes this score from the server"""
pass
def setPoints(self):
"""Sets the number of points in the score
Don't use to add/remove points, use only to reset values!"""
pass
def transferPoints(self, dest, points=0, key=None):
"""Transfers points from this score to another"""
pass
class ptGameScoreMsg:
"""Game Score operation callback message"""
def __init__(self):
"""None"""
pass
class ptGameScoreListMsg(ptGameScoreMsg):
"""Game Score message for scores found on the server"""
def __init__(self):
"""None"""
pass
def getName(self):
"""Returns the template score name"""
pass
def getOwnerID(self):
"""Returns the template score ownerID"""
pass
def getScores(self):
"""Returns a list of scores found by the server"""
pass
class ptGameScoreTransferMsg(ptGameScoreMsg):
"""Game Score message indicating a score point transfer"""
def __init__(self):
"""None"""
pass
def getDestination(self):
"""Returns the score points were transferred to"""
pass
def getSource(self):
"""Returns the score points were transferred from"""
pass
class ptGameScoreUpdateMsg(ptGameScoreMsg):
"""Game Score message for a score update operation"""
def __init__(self):
"""None"""
pass
def getScore(self):
"""Returns the updated game score"""
pass
class ptGUIControl:
"""Base class for all GUI controls"""
def __init__(self,controlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlButton(ptGUIControl):
"""Plasma GUI Control Button class"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getNotifyType(self):
"""Returns this button's notify type. See PtButtonNotifyTypes"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isButtonDown(self):
"""Is the button down? Returns 1 for true otherwise returns 0"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setNotifyType(self,kind):
"""Sets this button's notify type. See PtButtonNotifyTypes"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlCheckBox(ptGUIControl):
"""Plasma GUI Control Checkbox class"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isChecked(self):
"""Is this checkbox checked? Returns 1 for true otherwise returns 0"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setChecked(self,checkedState):
"""Sets this checkbox to the 'checkedState'"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlClickMap(ptGUIControl):
"""Plasma GUI control Click Map"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getLastMouseDragPoint(self):
"""Returns the last point the mouse was dragged to"""
pass
def getLastMousePoint(self):
"""Returns the last point the mouse was at"""
pass
def getLastMouseUpPoint(self):
"""Returns the last point the mouse was released at"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlDragBar(ptGUIControl):
"""Plasma GUI Control DragBar class"""
def __init__(self,ctrlKey):
"""None"""
pass
def anchor(self):
"""Don't allow this dragbar object to be moved by the user.
Drop anchor!"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isAnchored(self):
"""Is this dragbar control anchored? Returns 1 if true otherwise returns 0"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
def unanchor(self):
"""Allow the user to drag this control around the screen.
Raise anchor."""
pass
class ptGUIControlDraggable(ptGUIControl):
"""Plasma GUI control for something draggable"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getLastMousePoint(self):
"""Returns the last point we were dragged to"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def stopDragging(self,cancelFlag):
"""UNKNOWN"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlDynamicText(ptGUIControl):
"""Plasma GUI Control DynamicText class"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getMap(self,index):
"""Returns a specific ptDynamicText attached to this contol
If there is no map at 'index' then a KeyError exception will be raised"""
pass
def getNumMaps(self):
"""Returns the number of ptDynamicText maps attached"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlEditBox(ptGUIControl):
"""Plasma GUI Control Editbox class"""
def __init__(self,ctrlKey):
"""None"""
pass
def clearString(self):
"""Clears the editbox."""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def end(self):
"""Sets the cursor in the editbox to the after the last character."""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getLastKeyCaptured(self):
"""Gets the last capture key"""
pass
def getLastModifiersCaptured(self):
"""Gets the last modifiers flags captured"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getString(self):
"""Returns the sting that the user typed in."""
pass
def getStringW(self):
"""Unicode version of getString."""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def home(self):
"""Sets the cursor in the editbox to before the first character."""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setChatMode(self,state):
"""Set the Chat mode on this control"""
pass
def setColor(self,foreColor,backColor):
"""Sets the fore and back color of the editbox."""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setLastKeyCapture(self,key, modifiers):
"""Set last key captured"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setSelectionColor(self,foreColor,backColor):
"""Sets the selection color of the editbox."""
pass
def setSpecialCaptureKeyMode(self,state):
"""Set the Capture mode on this control"""
pass
def setString(self,text):
"""Pre-sets the editbox to a atring."""
pass
def setStringSize(self,size):
"""Sets the maximum size of the string that can be inputted by the user."""
pass
def setStringW(self,text):
"""Unicode version of setString."""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
def wasEscaped(self):
"""If the editbox was escaped then return 1 else return 0"""
pass
class ptGUIControlValue(ptGUIControl):
"""Plasma GUI Control Value class - knobs, spinners"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getMax(self):
"""Returns the maximum of the control."""
pass
def getMin(self):
"""Returns the minimum of the control."""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getStep(self):
"""Returns the step increment of the control."""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def getValue(self):
"""Returns the current value of the control."""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setRange(self,minimum,maximum):
"""Sets the minimum and maximum range of the control."""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setStep(self,step):
"""Sets the step increment of the control."""
pass
def setValue(self,value):
"""Sets the current value of the control."""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlKnob(ptGUIControlValue):
"""Plasma GUI control for knob"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getMax(self):
"""Returns the maximum of the control."""
pass
def getMin(self):
"""Returns the minimum of the control."""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getStep(self):
"""Returns the step increment of the control."""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def getValue(self):
"""Returns the current value of the control."""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setRange(self,minimum,maximum):
"""Sets the minimum and maximum range of the control."""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setStep(self,step):
"""Sets the step increment of the control."""
pass
def setValue(self,value):
"""Sets the current value of the control."""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlListBox(ptGUIControl):
"""Plasma GUI Control List Box class"""
def __init__(self,ctrlKey):
"""None"""
pass
def add2StringsWithColors(self,text1,color1,text2,color2,respectAlpha):
"""Doesn't work right - DONT USE"""
pass
def addBranch(self,name,initiallyOpen):
"""UNKNOWN"""
pass
def addBranchW(self,name,initiallyOpen):
"""Unicode version of addBranch"""
pass
def addImage(self,image,respectAlphaFlag):
"""Appends an image item to the listbox"""
pass
def addImageAndSwatchesInBox(self,image,x,y,width,height,respectAlpha,primary,secondary):
"""Add the image and color swatches to the list"""
pass
def addImageInBox(self,image,x,y,width,height,respectAlpha):
"""Appends an image item to the listbox, centering within the box dimension."""
pass
def addSelection(self,item):
"""Adds item to selection list"""
pass
def addString(self,text):
"""Appends a list item 'text' to the listbox."""
pass
def addStringInBox(self,text,min_width,min_height):
"""Adds a text list item that has a minimum width and height"""
pass
def addStringW(self,text):
"""Unicode version of addString."""
pass
def addStringWithColor(self,text,color,inheritAlpha):
"""Adds a colored string to the list box"""
pass
def addStringWithColorWithSize(self,text,color,inheritAlpha,fontsize):
"""Adds a text list item with a color and different font size"""
pass
def allowNoSelect(self):
"""Allows the listbox to have no selection"""
pass
def clearAllElements(self):
"""Removes all the items from the listbox, making it empty."""
pass
def clickable(self):
"""Sets this listbox to be clickable by the user."""
pass
def closeBranch(self):
"""UNKNOWN"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def disallowNoSelect(self):
"""The listbox must always have a selection"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def findString(self,text):
"""Finds and returns the index of the item that matches 'text' in the listbox."""
pass
def findStringW(self,text):
"""Unicode version of findString."""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getBranchList(self):
"""get a list of branches in this list (index,isShowingChildren)"""
pass
def getElement(self,index):
"""Get the string of the item at 'index' in the listbox."""
pass
def getElementW(self,index):
"""Unicode version of getElement."""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getNumElements(self):
"""Return the number of items in the listbox."""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getScrollPos(self):
"""Returns the current scroll position in the listbox."""
pass
def getScrollRange(self):
"""Returns the max scroll position"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getSelection(self):
"""Returns the currently selected list item in the listbox."""
pass
def getSelectionList(self):
"""Returns the current selection list"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def lock(self):
"""Locks the updates to a listbox, so a number of operations can be performed
NOTE: an unlock() call must be made before the next lock() can be."""
pass
def refresh(self):
"""Refresh the display of the listbox (after updating contents)."""
pass
def removeElement(self,index):
"""Removes element at 'index' in the listbox."""
pass
def removeSelection(self,item):
"""Removes item from selection list"""
pass
def scrollToBegin(self):
"""Scrolls the listbox to the beginning of the list"""
pass
def scrollToEnd(self):
"""Scrolls the listbox to the end of the list"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setElement(self,index,text):
"""Set a particular item in the listbox to a string."""
pass
def setElementW(self,index,text):
"""Unicode version of setElement."""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setGlobalSwatchEdgeOffset(self,offset):
"""Sets the edge offset of the color swatches"""
pass
def setGlobalSwatchSize(self,size):
"""Sets the size of the color swatches"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setScrollPos(self,pos):
"""Sets the scroll position of the listbox to 'pos'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setSelection(self,selectionIndex):
"""Sets the current selection in the listbox."""
pass
def setStringJustify(self,index,justify):
"""Sets the text justification"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
def unclickable(self):
"""Makes this listbox not clickable by the user.
Useful when just displaying a list that is not really selectable."""
pass
def unlock(self):
"""Unlocks updates to a listbox and does any saved up changes"""
pass
class ptGUIControlMultiLineEdit(ptGUIControl):
"""Plasma GUI Control Multi-line edit class"""
def __init__(self,ctrlKey):
"""None"""
pass
def beginUpdate(self):
"""Signifies that the control will be updated heavily starting now, so suppress all redraws"""
def clearBuffer(self):
"""Clears all text from the multi-line edit control."""
pass
def clickable(self):
"""Sets this listbox to be clickable by the user."""
pass
def deleteChar(self):
"""Deletes a character at the current cursor position."""
pass
def deleteLinesFromTop(self,numLines):
"""Deletes the specified number of lines from the top of the text buffer"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def disableScrollControl(self):
"""Disables the scroll control if there is one"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def enableScrollControl(self):
"""Enables the scroll control if there is one"""
pass
def endUpdate(self, redraw=True):
"""Signifies that the massive updates are over. We can now redraw."""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getBufferLimit(self):
"""Returns the current buffer limit"""
pass
def getBufferSize(self):
"""Returns the size of the buffer"""
pass
def getEncodedBuffer(self):
"""Returns the encoded buffer in a python buffer object. Do NOT use result with setEncodedBufferW."""
pass
def getEncodedBufferW(self):
"""Unicode version of getEncodedBuffer. Do NOT use result with setEncodedBuffer."""
pass
def getFontSize(self):
"""Returns the current default font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getScrollPosition(self):
"""Returns what line is the top line."""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getString(self):
"""Gets the string of the edit control."""
pass
def getStringW(self):
"""Unicode version of getString."""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def insertChar(self,c):
"""Inserts a character at the current cursor position."""
pass
def insertCharW(self,c):
"""Unicode version of insertChar."""
pass
def insertColor(self,color):
"""Inserts an encoded color object at the current cursor position.
'color' is a ptColor object."""
pass
def insertString(self,string):
"""Inserts a string at the current cursor position."""
pass
def insertStringW(self,string):
"""Unicode version of insertString"""
pass
def insertStyle(self,style):
"""Inserts an encoded font style at the current cursor position."""
pass
def isAtEnd(self):
"""Returns whether the cursor is at the end."""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isLocked(self):
"""Is the multi-line edit control locked? Returns 1 if true otherwise returns 0"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def lock(self):
"""Locks the multi-line edit control so the user cannot make changes."""
pass
def moveCursor(self,direction):
"""Move the cursor in the specified direction (see PtGUIMultiLineDirection)"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setBufferLimit(self,bufferLimit):
"""Sets the buffer max for the editbox"""
pass
def setEncodedBuffer(self,bufferObject):
"""Sets the edit control to the encoded buffer in the python buffer object. Do NOT use with a result from getEncodedBufferW."""
pass
def setEncodedBufferW(self,bufferObject):
"""Unicode version of setEncodedBuffer. Do NOT use with a result from getEncodedBuffer."""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the default font size for the edit control"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setScrollPosition(self,topLine):
"""Sets the what line is the top line."""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setString(self,asciiText):
"""Sets the multi-line edit control string."""
pass
def setStringW(self,unicodeText):
"""Unicode version of setString."""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
def unclickable(self):
"""Makes this listbox not clickable by the user.
Useful when just displaying a list that is not really selectable."""
pass
def unlock(self):
"""Unlocks the multi-line edit control so that the user can make changes."""
pass
class ptGUIControlProgress(ptGUIControlValue):
"""Plasma GUI control for progress bar"""
def __init__(self,ctrlKey):
"""None"""
pass
def animateToPercent(self,percent):
"""Sets the value of the control and animates to that point."""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getMax(self):
"""Returns the maximum of the control."""
pass
def getMin(self):
"""Returns the minimum of the control."""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getStep(self):
"""Returns the step increment of the control."""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def getValue(self):
"""Returns the current value of the control."""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setRange(self,minimum,maximum):
"""Sets the minimum and maximum range of the control."""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setStep(self,step):
"""Sets the step increment of the control."""
pass
def setValue(self,value):
"""Sets the current value of the control."""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlRadioGroup(ptGUIControl):
"""Plasma GUI Control Radio Group class"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def getValue(self):
"""Returns the current selection of the radio group."""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setValue(self,value):
"""Sets the current selection to 'value'"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlTextBox(ptGUIControl):
"""Plasma GUI Control Textbox class"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the current forecolor"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getString(self):
"""Returns the string that the TextBox is set to (in case you forgot)"""
pass
def getStringJustify(self):
"""Returns current justify"""
pass
def getStringW(self):
"""Unicode version of getString"""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,color):
"""Sets the text backcolor to 'color', which is a ptColor object."""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,size):
"""Don't use"""
pass
def setForeColor(self,color):
"""Sets the text forecolor to 'color', which is a ptColor object."""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setString(self,text):
"""Sets the textbox string to 'text'"""
pass
def setStringJustify(self,justify):
"""Sets current justify"""
pass
def setStringW(self,text):
"""Unicode version of setString"""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIControlUpDownPair(ptGUIControlValue):
"""Plasma GUI control for up/down pair"""
def __init__(self,ctrlKey):
"""None"""
pass
def disable(self):
"""Disables this GUI control"""
pass
def enable(self,flag=1):
"""Enables this GUI control"""
pass
def focus(self):
"""Gets focus for this GUI control"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns the ptKey for this GUI control"""
pass
def getMax(self):
"""Returns the maximum of the control."""
pass
def getMin(self):
"""Returns the minimum of the control."""
pass
def getObjectCenter(self):
"""Returns ptPoint3 of the center of the GUI control object"""
pass
def getOwnerDialog(self):
"""Returns a ptGUIDialog of the dialog that owns this GUI control"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getStep(self):
"""Returns the step increment of the control."""
pass
def getTagID(self):
"""Returns the Tag ID for this GUI control"""
pass
def getValue(self):
"""Returns the current value of the control."""
pass
def hide(self):
"""Hides this GUI control"""
pass
def isEnabled(self):
"""Returns whether this GUI control is enabled"""
pass
def isFocused(self):
"""Returns whether this GUI control has focus"""
pass
def isInteresting(self):
"""Returns whether this GUI control is interesting at the moment"""
pass
def isVisible(self):
"""Returns whether this GUI control is visible"""
pass
def refresh(self):
"""UNKNOWN"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setFocus(self,state):
"""Sets the state of the focus of this GUI control"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setNotifyOnInteresting(self,state):
"""Sets whether this control should send interesting events or not"""
pass
def setObjectCenter(self,point):
"""Sets the GUI controls object center to 'point'"""
pass
def setRange(self,minimum,maximum):
"""Sets the minimum and maximum range of the control."""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def setStep(self,step):
"""Sets the step increment of the control."""
pass
def setValue(self,value):
"""Sets the current value of the control."""
pass
def setVisible(self,state):
"""Sets the state of visibility of this GUI control"""
pass
def show(self):
"""Shows this GUI control"""
pass
def unFocus(self):
"""Releases focus for this GUI control"""
pass
class ptGUIDialog:
"""Plasma GUI dialog class"""
def __init__(self,dialogKey):
"""None"""
pass
def disable(self):
"""Disables this dialog"""
pass
def enable(self,enableFlag=1):
"""Enable this dialog"""
pass
def getBackColor(self):
"""Returns the back color as a ptColor object"""
pass
def getBackSelectColor(self):
"""Returns the select back color as a ptColor object"""
pass
def getControlFromIndex(self,index):
"""Returns the ptKey of the control with the specified index (not tag ID!)"""
pass
def getControlFromTag(self,tagID):
"""Returns the ptKey of the control with the specified tag ID"""
pass
def getFontSize(self):
"""Returns the font size"""
pass
def getForeColor(self):
"""Returns the fore color as a ptColor object"""
pass
def getKey(self):
"""Returns this dialog's ptKey"""
pass
def getName(self):
"""Returns the dialog's name"""
pass
def getNumControls(self):
"""Returns the number of controls in this dialog"""
pass
def getSelectColor(self):
"""Returns the select color as a ptColor object"""
pass
def getTagID(self):
"""Returns this dialog's tag ID"""
pass
def getVersion(self):
"""UNKNOWN"""
pass
def hide(self):
"""Hides the dialog"""
pass
def isEnabled(self):
"""Is this dialog currently enabled?"""
pass
def noFocus(self):
"""Makes sure no control has input focus"""
pass
def refreshAllControls(self):
"""Tells the dialog to redraw all its controls"""
pass
def setBackColor(self,red,green,blue,alpha):
"""Sets the back color, -1 means don't change"""
pass
def setBackSelectColor(self,red,green,blue,alpha):
"""Sets the select back color, -1 means don't change"""
pass
def setFocus(self,ctrlKey):
"""Sets the control that has input focus"""
pass
def setFontSize(self,fontSize):
"""Sets the font size"""
pass
def setForeColor(self,red,green,blue,alpha):
"""Sets the fore color, -1 means don't change"""
pass
def setSelectColor(self,red,green,blue,alpha):
"""Sets the select color, -1 means don't change"""
pass
def show(self):
"""Shows the dialog"""
pass
def showNoReset(self):
"""Show dialog without resetting clickables"""
pass
def updateAllBounds(self):
"""Tells the dialog to recompute all the bounds for its controls"""
pass
class ptGUIPopUpMenu:
"""Takes three diferent argument lists:
gckey
name,screenOriginX,screenOriginY
name,parent,screenOriginX,screenOriginY"""
def __init__(self,arg1,arg2=None,arg3=None,arg4=None):
"""None"""
pass
def addConsoleCmdItem(self,name,consoleCmd):
"""Adds a new item to the menu that fires a console command"""
pass
def addConsoleCmdItemW(self,name,consoleCmd):
"""Unicode version of addConsoleCmdItem"""
pass
def addNotifyItem(self,name):
"""Adds a new item ot the mneu"""
pass
def addNotifyItemW(self,name):
"""Unicode version of addNotifyItem"""
pass
def addSubMenuItem(self,name,subMenu):
"""Adds a submenu to this menu"""
pass
def addSubMenuItemW(self,name,subMenu):
"""Unicode version of addSubMenuItem"""
pass
def disable(self):
"""Disables this menu"""
pass
def enable(self,state=1):
"""Enables/disables this menu"""
pass
def getBackColor(self):
"""Returns the background color"""
pass
def getBackSelectColor(self):
"""Returns the background selection color"""
pass
def getForeColor(self):
"""Returns the foreground color"""
pass
def getKey(self):
"""Returns this menu's key"""
pass
def getName(self):
"""Returns this menu's name"""
pass
def getSelectColor(self):
"""Returns the selection color"""
pass
def getTagID(self):
"""Returns this menu's tag id"""
pass
def getVersion(self):
"""UNKNOWN"""
pass
def hide(self):
"""Hides this menu"""
pass
def isEnabled(self):
"""Returns whether this menu is enabled or not"""
pass
def setBackColor(self,r,g,b,a):
"""Sets the background color"""
pass
def setBackSelectColor(self,r,g,b,a):
"""Sets the selection background color"""
pass
def setForeColor(self,r,g,b,a):
"""Sets the foreground color"""
pass
def setSelectColor(self,r,g,b,a):
"""Sets the selection color"""
pass
def show(self):
"""Shows this menu"""
pass
class ptGUISkin:
"""Plasma GUI Skin object"""
def __init__(self,key):
"""None"""
pass
def getKey(self):
"""Returns this object's ptKey"""
pass
class ptGrassShader:
"""Plasma Grass Shader class"""
def __init__(self,key):
"""None"""
pass
def getWaveDirection(self,waveNum):
"""Gets the wave waveNum's direction as a tuple of x,y. waveNum must be between 0 and plGrassShaderMod::kNumWaves-1 (currently 3) inclusive"""
pass
def getWaveDistortion(self,waveNum):
"""Gets the wave waveNum's distortion as a tuple of x,y,z. waveNum must be between 0 and plGrassShaderMod::kNumWaves-1 (currently 3) inclusive"""
pass
def getWaveSpeed(self,waveNum):
"""Gets the wave waveNum's speed as a float. waveNum must be between 0 and plGrassShaderMod::kNumWaves-1 (currently 3) inclusive"""
pass
def resetWaves(self):
"""Resets wave data to 0"""
pass
def setWaveDirection(self,waveNum, direction):
"""Sets the wave waveNum's direction as a tuple of x,y. waveNum must be between 0 and plGrassShaderMod::kNumWaves-1 (currently 3) inclusive"""
pass
def setWaveDistortion(self,waveNum, distortion):
"""Sets the wave waveNum's distortion as a tuple of x,y,z. waveNum must be between 0 and plGrassShaderMod::kNumWaves-1 (currently 3) inclusive"""
pass
def setWaveSpeed(self,waveNum, speed):
"""Sets the wave waveNum's speed as a float. waveNum must be between 0 and plGrassShaderMod::kNumWaves-1 (currently 3) inclusive"""
pass
class ptImage:
"""Plasma image class"""
def __init__(self,imgKey):
"""None"""
pass
def getColorLoc(self,color):
"""Returns the ptPoint3 where the specified color is located"""
pass
def getHeight(self):
"""Returns the height of the image"""
pass
def getPixelColor(self,x,y):
"""Returns the ptColor at the specified location (float from 0 to 1)"""
pass
def getWidth(self):
"""Returns the width of the image"""
pass
def saveAsJPEG(self,filename,quality=75):
"""Saves this image to disk as a JPEG file"""
pass
class ptInputInterface:
"""Plasma input interface class"""
def __init__(self):
"""None"""
pass
def popTelescope(self):
"""pops off the telescope interface and gos back to previous interface"""
pass
def pushTelescope(self):
"""pushes on the telescope interface"""
pass
class ptKey:
"""Plasma Key class"""
def __init__(self):
"""None"""
pass
def disable(self):
"""Sends a disable message to whatever this ptKey is pointing to"""
pass
def enable(self):
"""Sends an enable message to whatever this ptKey is pointing to"""
pass
def getName(self):
"""Get the name of the object that this ptKey is pointing to"""
pass
def getParentKey(self):
"""This will return a ptKey object that is the parent of this modifer
However, if the parent is not a modifier or not loaded, then None is returned."""
pass
def getSceneObject(self):
"""This will return a ptSceneobject object that is associated with this ptKey
However, if this ptKey is _not_ a sceneobject, then unpredicatable results will ensue"""
pass
def isAttachedToClone(self):
"""Returns whether the python file mod is attached to a clone"""
pass
def netForce(self,forceFlag):
"""Specify whether this object needs to use messages that are forced to the network
- This is to be used if your Python program is running on only one client
Such as a game master, only running on the client that owns a particular object"""
pass
class ptKeyMap:
"""Accessor class to the Key Mapping functions"""
def __init__(self):
"""None"""
pass
def bindKey(self):
"""Params key1,key2,action
Bind keys to an action"""
pass
def bindKeyToConsoleCommand(self,keyStr1, command):
"""Binds key to console command"""
pass
def convertCharToControlCode(self,controlCodeString):
"""Convert string version of control code to number"""
pass
def convertCharToFlags(self,charString):
"""Convert char string to flags"""
pass
def convertCharToVKey(self,charString):
"""Convert char string to virtual key"""
pass
def convertControlCodeToString(self):
"""Params controlCode
Convert control code to character string"""
pass
def convertVKeyToChar(self,virtualKey,flags):
"""Convert virtual key and shift flags to string"""
pass
def getBindingFlags1(self):
"""Params controlCode
Returns modifier flags for controlCode"""
pass
def getBindingFlags2(self):
"""Params controlCode
Returns modifier flags for controlCode"""
pass
def getBindingFlagsConsole(self,command):
"""Returns modifier flags for the console command mapping"""
pass
def getBindingKey1(self):
"""Params controlCode
Returns key code for controlCode"""
pass
def getBindingKey2(self):
"""Params controlCode
Returns key code for controlCode"""
pass
def getBindingKeyConsole(self,command):
"""Returns key for console command mapping"""
pass
def writeKeyMap(self):
"""Forces write of the keymap file"""
pass
class ptMarkerMgr:
"""Marker manager accessor class"""
def __init__(self):
"""None"""
pass
def addMarker(self,x, y, z, id, justCreated):
"""Add a marker in the specified location with the specified id"""
pass
def areLocalMarkersShowing(self):
"""Returns true if we are showing the markers on this local machine"""
pass
def captureQuestMarker(self,id, captured):
"""Sets a marker as captured or not"""
pass
def captureTeamMarker(self,id, team):
"""Sets a marker as captured by the specified team (0 = not captured)"""
pass
def clearSelectedMarker(self):
"""Unselects the selected marker"""
pass
def getMarkersRespawn(self):
"""Returns whether markers respawn after being captured, or not"""
pass
def getSelectedMarker(self):
"""Returns the id of the selected marker"""
pass
def hideMarkersLocal(self):
"""Hides the markers on your machine, so you can no longer see where they are"""
pass
def removeAllMarkers(self):
"""Removes all markers"""
pass
def removeMarker(self,id):
"""Removes the specified marker from the game"""
pass
def setMarkersRespawn(self,respawn):
"""Sets whether markers respawn after being captured, or not"""
pass
def setSelectedMarker(self,id):
"""Sets the selected marker to the one with the specified id"""
pass
def showMarkersLocal(self):
"""Shows the markers on your machine, so you can see where they are"""
pass
class ptMatrix44:
"""Plasma Matrix44 class"""
def __init__(self):
"""None"""
pass
def copy(self):
"""Copies the matrix and returns the copy"""
pass
def getAdjoint(self,adjointMat):
"""Returns the adjoint of the matrix"""
pass
def getData(self):
"""Returns the matrix in tuple form"""
pass
def getDeterminant(self):
"""Get the matrix's determinant"""
pass
def getInverse(self,inverseMat):
"""Returns the inverse of the matrix"""
pass
def getParity(self):
"""Get the parity of the matrix"""
pass
def getTranslate(self,vector):
"""Returns the translate vector of the matrix (and sets vector to it as well)"""
pass
def getTranspose(self,transposeMat):
"""Returns the transpose of the matrix"""
pass
def make(self,fromPt, atPt, upVec):
"""Creates the matrix from from and at points, and the up vector"""
pass
def makeRotateMat(self,axis,radians):
"""Makes the matrix a rotation matrix"""
pass
def makeScaleMat(self,scale):
"""Makes the matrix a scaling matrix"""
pass
def makeTranslateMat(self,trans):
"""Makes the matrix a translation matrix"""
pass
def makeUpPreserving(self,fromPt, atPt, upVec):
"""Creates the matrix from from and at points, and the up vector (perserving the up vector)"""
pass
def reset(self):
"""Reset the matrix to identity"""
pass
def right(self):
"""Returns the right vector of the matrix"""
pass
def rotate(self,axis,radians):
"""Rotates the matrix by radians around the axis"""
pass
def scale(self,scale):
"""Scales the matrix by the vector"""
pass
def setData(self,mat):
"""Sets the matrix using tuples"""
pass
def translate(self,vector):
"""Translates the matrix by the vector"""
pass
def up(self):
"""Returns the up vector of the matrix"""
pass
def view(self):
"""Returns the view vector of the matrix"""
pass
class ptMoviePlayer:
"""Accessor class to play in the MoviePlayer"""
def __init__(self,movieName,selfKey):
"""None"""
pass
def pause(self):
"""Pauses the movie"""
pass
def play(self):
"""Plays the movie"""
pass
def playPaused(self):
"""Plays movie, but pauses at first frame"""
pass
def resume(self):
"""Resumes movie after pausing"""
pass
def setCenter(self,x,y):
"""Sets the center of the movie"""
pass
def setColor(self,color):
"""Sets the color of the movie"""
pass
def setOpacity(self,opacity):
"""Sets the opacity of the movie"""
pass
def setScale(self,width,height):
"""Sets the width and height scale of the movie"""
pass
def setVolume(self,volume):
"""Set the volume of the movie"""
pass
def stop(self):
"""Stops the movie"""
pass
class ptNetLinkingMgr:
"""Constructor to get access to the net link manager"""
def __init__(self):
"""None"""
pass
def getCurrAgeLink(self):
"""Get the ptAgeLinkStruct for the current age"""
pass
def getPrevAgeLink(self):
"""Get the ptAgeLinkStruct for the previous age"""
pass
def isEnabled(self):
"""True if linking is enabled."""
pass
def linkPlayerHere(self,pid):
"""link player(pid) to where I am"""
pass
def linkPlayerToAge(self,ageLink,pid):
"""Link player(pid) to ageLink"""
pass
def linkToAge(self,ageLink):
"""Links to ageLink (ptAgeLinkStruct)"""
pass
def linkToMyNeighborhoodAge(self):
"""Link to my Neighborhood Age"""
pass
def linkToMyPersonalAge(self):
"""Link to my Personal Age"""
pass
def linkToMyPersonalAgeWithYeeshaBook(self):
"""Link to my Personal Age with the YeeshaBook"""
pass
def linkToPlayersAge(self,pid):
"""Link me to where player(pid) is"""
pass
def setEnabled(self,enable):
"""Enable/Disable linking."""
pass
class ptNotify:
"""Creates a Notify message
- selfKey is ptKey of your PythonFile modifier"""
def __init__(self,selfKey):
"""None"""
pass
def addActivateEvent(self,activeFlag,activateFlag):
"""Add an activate event record to the notify message"""
pass
def addCallbackEvent(self,eventNumber):
"""Add a callback event record to the notify message"""
pass
def addCollisionEvent(self,enterFlag,hitterKey,hitteeKey):
"""Add a collision event record to the Notify message"""
pass
def addContainerEvent(self,enteringFlag,containerKey,containedKey):
"""Add a container event record to the notify message"""
pass
def addControlKeyEvent(self,keynumber,downFlag):
"""Add a keyboard event record to the Notify message"""
pass
def addFacingEvent(self,enabledFlag,facerKey, faceeKey, dotProduct):
"""Add a facing event record to the Notify message"""
pass
def addPickEvent(self,enabledFlag,pickerKey,pickeeKey,hitPoint):
"""Add a pick event record to the Notify message"""
pass
def addReceiver(self,key):
"""Add a receivers key to receive this Notify message"""
pass
def addResponderState(self,state):
"""Add a responder state event record to the notify message"""
pass
def addVarKey(self,name,key):
"""Add a ptKey variable event record to the Notify message
This event record is used to pass a ptKey variable to another python program"""
pass
def addVarNumber(self,name,number):
"""Add a number variable event record to the Notify message
Method will try to pick appropriate variable type
This event record is used to pass a number variable to another python program"""
pass
def addVarFloat(self,name,number):
"""Add a float variable event record to the Notify message
This event record is used to pass a number variable to another python program"""
pass
def addVarInt(self,name,number):
"""Add a integer variable event record to the Notify message
This event record is used to pass a number variable to another python program"""
pass
def addVarNull(self,name):
"""Add a null (no data) variable event record to the Notify message
This event record is used to pass a number variable to another python program"""
pass
def clearReceivers(self):
"""Remove all the receivers that this Notify message has
- receivers are automatically added if from a ptAttribActivator"""
pass
def netForce(self,forceFlag):
"""Specify whether this object needs to use messages that are forced to the network
- This is to be used if your Python program is running on only one client
Such as a game master, only running on the client that owns a particular object"""
pass
def netPropagate(self,netFlag):
"""Sets the net propagate flag - default to set"""
pass
def send(self):
"""Send the notify message"""
pass
def setActivate(self,state):
"""Set the activate state to true(1.0) or false(0.0)"""
pass
def setType(self,type):
"""Sets the message type"""
pass
class ptParticle:
"""Plasma particle system class"""
def __init__(self):
"""None"""
pass
def netForce(self,forceFlag):
"""Specify whether this object needs to use messages that are forced to the network
- This is to be used if your Python program is running on only one client
Such as a game master, only running on the client that owns a particular object"""
pass
def setGeneratorLife(self,value):
"""NEEDS DOCSTRING"""
pass
def setHeightSize(self,value):
"""NEEDS DOCSTRING"""
pass
def setInitPitchRange(self,value):
"""NEEDS DOCSTRING"""
pass
def setInitYawRange(self,value):
"""NEEDS DOCSTRING"""
pass
def setParticleLifeMaximum(self,value):
"""NEEDS DOCSTRING"""
pass
def setParticleLifeMinimum(self,value):
"""NEEDS DOCSTRING"""
pass
def setParticlesPerSecond(self,value):
"""NEEDS DOCSTRING"""
pass
def setScaleMaximum(self,value):
"""NEEDS DOCSTRING"""
pass
def setScaleMinimum(self,value):
"""NEEDS DOCSTRING"""
pass
def setVelocityMaximum(self,value):
"""NEEDS DOCSTRING"""
pass
def setVelocityMinimum(self,value):
"""NEEDS DOCSTRING"""
pass
def setWidthSize(self,value):
"""NEEDS DOCSTRING"""
pass
class ptPhysics:
"""Plasma physics class"""
def __init__(self):
"""None"""
pass
def angularImpulse(self,impulseVector):
"""Add the given vector (representing a rotation axis and magnitude) to
the attached sceneobject's velocity"""
pass
def damp(self,damp):
"""Reduce all velocities on the object (0 = all stop, 1 = no effect)"""
pass
def disable(self):
"""Disables physics on the sceneobject attached"""
pass
def disableCollision(self):
"""Disables collision detection on the attached sceneobject"""
pass
def enable(self,state=1):
"""Sets the physics enable state for the sceneobject attached"""
pass
def enableCollision(self):
"""Enables collision detection on the attached sceneobject"""
pass
def force(self,forceVector):
"""Applies the specified force to the attached sceneobject"""
pass
def forceWithOffset(self,forceVector,offsetPt):
"""Applies the specified offsetted force to the attached sceneobject"""
pass
def impulse(self,impulseVector):
"""Adds the given vector to the attached sceneobject's velocity"""
pass
def impulseWithOffset(self,impulseVector,offsetPt):
"""Adds the given vector to the attached sceneobject's velocity
with the specified offset"""
pass
def move(self,direction,distance):
"""Moves the attached sceneobject the specified distance in the specified direction"""
pass
def netForce(self,forceFlag):
"""Specify whether this object needs to use messages that are forced to the network
- This is to be used if your Python program is running on only one client
Such as a game master, only running on the client that owns a particular object"""
pass
def rotate(self,radians,axis):
"""Rotates the attached sceneobject the specified radians around the specified axis"""
pass
def shiftMass(self,offsetVector):
"""Shifts the attached sceneobject's center to mass in the specified direction and distance"""
pass
def suppress(self,doSuppress):
"""Completely remove the physical, but keep it around so it
can be added back later."""
pass
def torque(self,torqueVector):
"""Applies the specified torque to the attached sceneobject"""
pass
def warp(self,position):
"""Warps the sceneobject to a specified location.
'position' can be a ptPoint3 or a ptMatrix44"""
pass
def warpObj(self,objkey):
"""Warps the sceneobject to match the location and orientation of the specified object"""
pass
class ptPlayer:
"""And optionally __init__(name,playerID)"""
def __init__(self,avkey,name,playerID,distanceSq):
"""None"""
pass
def getDistanceSq(self):
"""Returns the distance to remote player from local player"""
pass
def getPlayerID(self):
"""Returns the unique player ID"""
pass
def getPlayerName(self):
"""Returns the name of the player"""
pass
def getPlayerNameW(self):
"""Returns the name of the player as Unicode"""
pass
def isCCR(self):
"""Is this player a CCR?"""
pass
def isServer(self):
"""Is this player a server?"""
pass
class ptPoint3:
"""Plasma Point class"""
def __init__(self,x=0, y=0, z=0):
"""None"""
pass
def copy(self):
"""Returns a copy of the point in another ptPoint3 object"""
pass
def distance(self,other):
"""Computes the distance from this point to 'other' point"""
pass
def distanceSq(self,other):
"""Computes the distance squared from this point to 'other' point
- this function is faster than distance(other)"""
pass
def getX(self):
"""Returns the 'x' component of the point"""
pass
def getY(self):
"""Returns the 'y' component of the point"""
pass
def getZ(self):
"""Returns the 'z' component of the point"""
pass
def setX(self,x):
"""Sets the 'x' component of the point"""
pass
def setY(self,y):
"""Sets the 'y' component of the point"""
pass
def setZ(self,z):
"""Sets the 'z' component of the point"""
pass
def zero(self):
"""Sets the 'x','y' and the 'z' component to zero"""
pass
class ptSDL:
"""SDL accessor"""
def __init__(self):
"""None"""
pass
def sendToClients(self,key):
"""Sets it so changes to this key are sent to the
server AND the clients. (Normally it just goes
to the server.)"""
pass
def setDefault(self,key,value):
"""Like setitem, but doesn't broadcast over the net.
Only use for setting defaults that everyone will
already know (from reading it off disk)"""
pass
def setFlags(self,name,sendImmediate,skipOwnershipCheck):
"""Sets the flags for a variable in this SDL"""
pass
def setIndex(self,key,idx,value):
"""Sets the value at a specific index in the tuple,
so you don't have to pass the whole thing in"""
pass
def setIndexNow(self,key,idx,value):
"""Same as setIndex but sends immediately"""
pass
def setNotify(self,selfkey,key,tolerance):
"""Sets the OnSDLNotify to be called when 'key'
SDL variable changes by 'tolerance' (if number)"""
pass
def setTagString(self,name,tag):
"""Sets the tag string for a variable"""
pass
class ptSDLStateDataRecord:
"""Basic SDL state data record class"""
def __init__(self):
"""None"""
pass
def findVar(self,name):
"""Finds and returns the specified ptSimpleStateVariable"""
pass
def getName(self):
"""Returns our record's name"""
pass
def getVarList(self):
"""Returns the names of the vars we hold as a list of strings"""
pass
def setFromDefaults(self,timeStampNow):
"""Sets all our vars to their defaults"""
pass
class ptSceneobject:
"""Plasma Sceneobject class"""
def __init__(self,objKey, selfKey):
"""None"""
pass
def addKey(self,key):
"""Mostly used internally.
Add another sceneobject ptKey"""
pass
def animate(self):
"""If we can animate, start animating"""
pass
def avatarVelocity(self):
"""Returns the velocity of the first attached avatar scene object"""
pass
def fastForwardAttachedResponder(self,state):
"""Fast forward the attached responder to the specified state"""
pass
def findObject(self,name):
"""Find a particular object in just the sceneobjects that are attached"""
pass
def getKey(self):
"""Get the ptKey of this sceneobject
If there are more then one attached, get the first one"""
pass
def getLocalToParent(self):
"""Returns ptMatrix44 of the local to parent transform for this sceneobject
- If there is more than one sceneobject attached, returns just the first one"""
pass
def getLocalToWorld(self):
"""Returns ptMatrix44 of the local to world transform for this sceneobject
- If there is more than one sceneobject attached, returns just the first one"""
pass
def getName(self):
"""Returns the name of the sceneobject (Max name)
- If there are more than one sceneobject attached, return just the first one"""
pass
def getParentToLocal(self):
"""Returns ptMatrix44 of the parent to local transform for this sceneobject
- If there is more than one sceneobject attached, returns just the first one"""
pass
def getPythonMods(self):
"""Returns list of ptKeys of the python modifiers attached to this sceneobject"""
pass
def getResponderState(self):
"""Return the responder state (if we are a responder)"""
pass
def getResponders(self):
"""Returns list of ptKeys of the responders attached to this sceneobject"""
pass
def getSoundIndex(self,sndComponentName):
"""Get the index of the requested sound component"""
pass
def getWorldToLocal(self):
"""Returns ptMatrix44 of the world to local transform for this sceneobject
- If there is more than one sceneobject attached, returns just the first one"""
pass
def isAvatar(self):
"""Returns true if the scene object is an avatar"""
pass
def isHuman(self):
"""Returns true if the scene object is a human avatar"""
pass
def isLocallyOwned(self):
"""Returns true(1) if this object is locally owned by this client
or returns false(0) if it is not or don't know"""
pass
def netForce(self,forceFlag):
"""Specify whether this object needs to use messages that are forced to the network
- This is to be used if your Python program is running on only one client
Such as a game master, only running on the client that owns a particular object
- Setting the netForce flag on a sceneobject will also set the netForce flag on
its draw, physics, avatar, particle objects"""
pass
def playAnimNamed(self,animName):
"""Play the attached named animation"""
pass
def popCamera(self,avKey):
"""Pop the camera stack and go back to the previous camera"""
pass
def popCutsceneCamera(self,avKey):
"""Pop the camera stack and go back to previous camera."""
pass
def position(self):
"""Returns the scene object's current position"""
pass
def pushCamera(self,avKey):
"""Switch to this object (if it is a camera)"""
pass
def pushCameraCut(self,avKey):
"""Switch to this object, cutting the view (if it is a camera)"""
pass
def pushCutsceneCamera(self,cutFlag,avKey):
"""Switch to this object (assuming that it is actually a camera)"""
pass
def rewindAnimNamed(self,animName):
"""Rewind the attached named animation"""
pass
def right(self):
"""Returns the scene object's current right vector"""
pass
def runAttachedResponder(self,state):
"""Run the attached responder to the specified state"""
pass
def setSoundFilename(self,index, filename, isCompressed):
"""Sets the sound attached to this sceneobject to use the specified sound file."""
pass
def setTransform(self,local2world,world2local):
"""Set our current transforms"""
pass
def stopAnimNamed(self,animName):
"""Stop the attached named animation"""
pass
def up(self):
"""Returns the scene object's current up vector"""
pass
def view(self):
"""Returns the scene object's current view vector"""
pass
def volumeSensorIgnoreExtraEnters(self,ignore):
"""Tells the volume sensor attached to this object to ignore extra enters (default), or not (hack for garrison)."""
pass
class ptSimpleStateVariable:
"""Basic SDL state data record class"""
def __init__(self):
"""None"""
pass
def getBool(self,idx=0):
"""Returns a boolean variable's value"""
pass
def getByte(self,idx=0):
"""Returns a byte variable's value"""
pass
def getDefault(self):
"""Returns the variable's default"""
pass
def getDisplayOptions(self):
"""Returns the variable's display options"""
pass
def getDouble(self,idx=0):
"""Returns a double variable's value"""
pass
def getFloat(self,idx=0):
"""Returns a float variable's value"""
pass
def getInt(self,idx=0):
"""Returns an int variable's value"""
pass
def getShort(self,idx=0):
"""Returns a short variable's value"""
pass
def getString(self,idx=0):
"""Returns a string variable's value"""
pass
def getType(self):
"""Returns the variable's type"""
pass
def isAlwaysNew(self):
"""Is this variable always new?"""
pass
def isInternal(self):
"""Is this an internal variable?"""
pass
def isUsed(self):
"""Is this variable used?"""
pass
def setBool(self,val,idx=0):
"""Sets a boolean variable's value"""
pass
def setByte(self,val,idx=0):
"""Sets a byte variable's value"""
pass
def setDouble(self,val,idx=0):
"""Sets a double variable's value"""
pass
def setFloat(self,val,idx=0):
"""Sets a float variable's value"""
pass
def setInt(self,val,idx=0):
"""Sets an int variable's value"""
pass
def setShort(self,val,idx=0):
"""Sets a short variable's value"""
pass
def setString(self,val,idx=0):
"""Sets a string variable's value"""
pass
class ptSpawnPointInfo:
"""Class to hold spawn point data"""
def __init__(self,title=None,spawnPt=None):
"""None"""
pass
def getCameraStack(self):
"""Returns the camera stack for this spawnpoint as a string"""
pass
def getName(self):
"""Returns the spawnpoint's name"""
pass
def getTitle(self):
"""Returns the spawnpoint's title"""
pass
def setCameraStack(self,stack):
"""Sets the spawnpoint's camera stack (as a string)"""
pass
def setName(self,name):
"""Sets the spawnpoint's name"""
pass
def setTitle(self,title):
"""Sets the spawnpoint's title"""
pass
class ptSpawnPointInfoRef:
"""Class to hold spawn point data"""
def __init__(self):
"""None"""
pass
def getCameraStack(self):
"""Returns the camera stack for this spawnpoint as a string"""
pass
def getName(self):
"""Returns the spawnpoint's name"""
pass
def getTitle(self):
"""Returns the spawnpoint's title"""
pass
def setCameraStack(self,stack):
"""Sets the spawnpoint's camera stack (as a string)"""
pass
def setName(self,name):
"""Sets the spawnpoint's name"""
pass
def setTitle(self,title):
"""Sets the spawnpoint's title"""
pass
class ptStatusLog:
"""A status log class"""
def __init__(self):
"""None"""
pass
def close(self):
"""Close the status log file"""
pass
def isOpen(self):
"""Returns whether the status log is currently opened"""
pass
def open(self,logName,numLines,flags):
"""Open a status log for writing to
'logname' is the name of the log file (example: special.log)
'numLines' is the number of lines to display on debug screen
'flags' is a PlasmaConstants.PtStatusLogFlags"""
pass
def write(self,text,color=None):
"""If the status log is open, write 'text' to log
'color' is the display color in debug screen"""
pass
class ptStream:
"""A basic stream class"""
def __init__(self):
"""None"""
pass
def close(self):
"""Close the status log file"""
pass
def isOpen(self):
"""Returns whether the stream file is currently opened"""
pass
def open(self,fileName,flags):
"""Open a stream file for reading or writing"""
pass
def readlines(self):
"""Reads a list of strings from the file"""
pass
def writelines(self,lines):
"""Write a list of strings to the file"""
pass
class ptSwimCurrentInterface:
"""Creates a new ptSwimCurrentInterface"""
def __init__(self,key):
"""None"""
pass
def disable(self):
"""UNKNOWN"""
pass
def enable(self):
"""UNKNOWN"""
pass
class ptVault:
"""Accessor class to the player's vault"""
def __init__(self):
"""None"""
pass
def addChronicleEntry(self,entryName,type,string):
"""Adds an entry to the player's chronicle with a value of 'string'."""
pass
def amAgeCzar(self,ageInfo):
"""Are we the czar (WTH is this?) of the specified age?"""
pass
def amAgeOwner(self,ageInfo):
"""Are we the owner of the specified age?"""
pass
def amCzarOfCurrentAge(self):
"""Are we the czar (WTH is this?) of the current age?"""
pass
def amOwnerOfCurrentAge(self):
"""Are we the owner of the current age?"""
pass
def createNeighborhood(self):
"""Creates a new neighborhood"""
pass
def findChronicleEntry(self,entryName):
"""Returns a ptVaultNode of type kNodeTypeChronicle of the current player's chronicle entry by entryName."""
pass
def findNode(self,templateNode):
"""Find the node matching the template"""
pass
def getAgeJournalsFolder(self):
"""Returns a ptVaultFolderNode of the current player's age journals folder."""
pass
def getAgesICanVisitFolder(self):
"""Returns a ptVaultFolderNode of ages I can visit"""
pass
def getAgesIOwnFolder(self):
"""Returns a ptVaultFolderNode of ages that I own"""
pass
def getAllPlayersFolder(self):
"""Returns a ptVaultPlayerInfoListNode of the all players folder."""
pass
def getAvatarClosetFolder(self):
"""Do not use.
Returns a ptVaultFolderNode of the avatars outfit in their closet."""
pass
def getAvatarOutfitFolder(self):
"""Do not use.
Returns a ptVaultFolderNode of the avatars outfit."""
pass
def getBuddyListFolder(self):
"""Returns a ptVaultPlayerInfoListNode of the current player's buddy list folder."""
pass
def getChronicleFolder(self):
"""Returns a ptVaultFolderNode of the current player's chronicle folder."""
pass
def getGlobalInbox(self):
"""Returns a ptVaultFolderNode of the global inbox folder."""
pass
def getIgnoreListFolder(self):
"""Returns a ptVaultPlayerInfoListNode of the current player's ignore list folder."""
pass
def getInbox(self):
"""Returns a ptVaultFolderNode of the current player's inbox folder."""
pass
def getInviteFolder(self):
"""Returns a ptVaultFolderNode of invites"""
pass
def getKIUsage(self):
"""Returns a tuple with usage statistics of the KI (# of pics, # of text notes, # of marker games)"""
pass
def getLinkToCity(self):
"""Returns a ptVaultAgeLinkNode that will go to the city"""
pass
def getLinkToMyNeighborhood(self):
"""Returns a ptVaultAgeLinkNode that will go to my neighborhood"""
pass
def getOwnedAgeLink(self,ageInfo):
"""Returns a ptVaultAgeLinkNode to my owned age(ageInfo)"""
pass
def getPeopleIKnowAboutFolder(self):
"""Returns a ptVaultPlayerInfoListNode of the current player's people I know about (Recent) list folder."""
pass
def getPlayerInfo(self):
"""Returns a ptVaultNode of type kNodeTypePlayerInfo of the current player"""
pass
def getPsnlAgeSDL(self):
"""Returns the personal age SDL"""
pass
def getVisitAgeLink(self,ageInfo):
"""Returns a ptVaultAgeLinkNode for a visitor to age(ageInfo)"""
pass
def inMyNeighborhoodAge(self):
"""Are we in the player's neighborhood age?"""
pass
def inMyPersonalAge(self):
"""Are we in the player's personal age?"""
pass
def invitePlayerToAge(self,link,playerID):
"""Sends an invitation to visit the age to the specified player"""
pass
def offerLinkToPlayer(self,link,playerID):
"""Offer a one-time link to the specified player"""
pass
def registerMTStation(self,stationName,mtSpawnPoint):
"""Registers this player at the specified mass-transit point"""
pass
def registerOwnedAge(self,link):
"""Registers the specified age as owned by the player"""
pass
def registerVisitAge(self,link):
"""Register this age as visitable by this player"""
pass
def sendToDevice(self,node,deviceName):
"""Sends a ptVaultNode object to an Age's device by deviceName."""
pass
def setAgePublic(self,ageInfo,makePublic):
"""Makes the specified age public or private"""
pass
def unInvitePlayerToAge(self,guid,playerID):
"""Revokes the invitation to visit the age"""
pass
def unRegisterOwnedAge(self,ageFilename):
"""Unregisters the specified age so it's no longer owned by this player"""
pass
def unRegisterVisitAge(self,guid):
"""Unregisters the specified age so it can no longer be visited by this player"""
pass
def updatePsnlAgeSDL(self,pyrec):
"""Updates the personal age SDL to the specified data"""
pass
class ptVaultNode:
"""Vault node class"""
def __init__(self):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultFolderNode(ptVaultNode):
"""Plasma vault folder node"""
def __init__(self,n=0):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def folderGetName(self):
"""LEGACY
Returns the folder's name"""
pass
def folderGetType(self):
"""LEGACY
Returns the folder type (of the standard folder types)"""
pass
def folderSetName(self,name):
"""LEGACY
Set the folder name"""
pass
def folderSetType(self,type):
"""LEGACY
Set the folder type"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getFolderName(self):
"""Returns the folder's name"""
pass
def getFolderNameW(self):
"""Unicode version of getFolerName"""
pass
def getFolderType(self):
"""Returns the folder type (of the standard folder types)"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setFolderName(self,name):
"""Set the folder name"""
pass
def setFolderNameW(self,name):
"""Unicode version of setFolderName"""
pass
def setFolderType(self,type):
"""Set the folder type"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultAgeInfoListNode(ptVaultFolderNode):
"""Plasma vault age info list node"""
def __init__(self,n=0):
"""None"""
pass
def addAge(self,ageID):
"""Adds ageID to list of ages"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def folderGetName(self):
"""LEGACY
Returns the folder's name"""
pass
def folderGetType(self):
"""LEGACY
Returns the folder type (of the standard folder types)"""
pass
def folderSetName(self,name):
"""LEGACY
Set the folder name"""
pass
def folderSetType(self,type):
"""LEGACY
Set the folder type"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getFolderName(self):
"""Returns the folder's name"""
pass
def getFolderNameW(self):
"""Unicode version of getFolerName"""
pass
def getFolderType(self):
"""Returns the folder type (of the standard folder types)"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasAge(self,ageID):
"""Returns whether ageID is in the list of ages"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAge(self,ageID):
"""Removes ageID from list of ages"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setFolderName(self,name):
"""Set the folder name"""
pass
def setFolderNameW(self,name):
"""Unicode version of setFolderName"""
pass
def setFolderType(self,type):
"""Set the folder type"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultAgeInfoNode(ptVaultNode):
"""Plasma vault age info node"""
def __init__(self,n=0):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def asAgeInfoStruct(self):
"""Returns this ptVaultAgeInfoNode as a ptAgeInfoStruct"""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getAgeDescription(self):
"""Returns the description of the age"""
pass
def getAgeFilename(self):
"""Returns the age filename"""
pass
def getAgeID(self):
"""Returns the age ID"""
pass
def getAgeInstanceGuid(self):
"""Returns the age instance guid"""
pass
def getAgeInstanceName(self):
"""Returns the instance name of the age"""
pass
def getAgeLanguage(self):
"""Returns the age's language (integer)"""
pass
def getAgeOwnersFolder(self):
"""Returns a ptVaultPlayerInfoList of the players that own this age"""
pass
def getAgeSDL(self):
"""Returns a ptVaultSDLNode of the age's SDL"""
pass
def getAgeSequenceNumber(self):
"""Returns the sequence number of this instance of the age"""
pass
def getAgeUserDefinedName(self):
"""Returns the user define part of the age name"""
pass
def getCanVisitFolder(self):
"""Returns a ptVaultPlayerInfoList of the players that can visit this age"""
pass
def getChildAgesFolder(self):
"""Returns a ptVaultFolderNode of the child ages of this age"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getCzar(self):
"""Returns ptVaultPlayerInfoNode of the player that is the Czar"""
pass
def getCzarID(self):
"""Returns the ID of the age's czar"""
pass
def getDisplayName(self):
"""Returns the displayable version of the age name"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getParentAgeLink(self):
"""Returns ptVaultAgeLinkNode of the age's parent age, or None if not a child age"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def isPublic(self):
"""Returns whether the age is Public or Not"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setAgeDescription(self,description):
"""Sets the description of the age"""
pass
def setAgeFilename(self,fileName):
"""Sets the filename"""
pass
def setAgeID(self,ageID):
"""Sets the age ID"""
pass
def setAgeInstanceGuid(self,guid):
"""Sets the age instance GUID"""
pass
def setAgeInstanceName(self,instanceName):
"""Sets the instance name"""
pass
def setAgeLanguage(self,lang):
"""Sets the age's language (integer)"""
pass
def setAgeSequenceNumber(self,seqNumber):
"""Sets the sequence number"""
pass
def setAgeUserDefinedName(self,udname):
"""Sets the user defined part of the name"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultAgeLinkNode(ptVaultNode):
"""Plasma vault age link node"""
def __init__(self,n=0):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def addSpawnPoint(self,point):
"""Adds the specified ptSpawnPointInfo or ptSpawnPointInfoRef"""
pass
def asAgeLinkStruct(self):
"""Returns this ptVaultAgeLinkNode as a ptAgeLinkStruct"""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getAgeInfo(self):
"""Returns the ageInfo as a ptAgeInfoStruct"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getLocked(self):
"""Returns whether the link is locked or not"""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getSpawnPoints(self):
"""Returns a list of ptSpawnPointInfo objects"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def getVolatile(self):
"""Returns whether the link is volatile or not"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def hasSpawnPoint(self,spawnPtName):
"""Returns true if this link has the specified spawn point"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def removeSpawnPoint(self,point):
"""Removes the specified spawn point based on a ptSpawnPointInfo, ptSpawnPointInfoRef, or string"""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setLocked(self,state):
"""Sets whether the link is locked or not"""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def setVolatile(self,state):
"""Sets the state of the volitility of the link"""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultChronicleNode(ptVaultNode):
"""Plasma vault chronicle node"""
def __init__(self,n=0):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def chronicleGetName(self):
"""LEGACY: Returns the name of the chronicle node."""
pass
def chronicleGetType(self):
"""LEGACY: Returns the user defined type of the chronicle node."""
pass
def chronicleGetValue(self):
"""LEGACY: Returns the value as a string of this chronicle node."""
pass
def chronicleSetName(self,name):
"""LEGACY: Sets the name of the chronicle node."""
pass
def chronicleSetType(self,type):
"""LEGACY: Sets this chronicle node to a user defined type."""
pass
def chronicleSetValue(self,value):
"""LEGACY: Sets the chronicle to a value that is a string"""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getEntryType(self):
"""Returns the user defined type of the chronicle node."""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getName(self):
"""Returns the name of the chronicle node."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def getValue(self):
"""Returns the value as a string of this chronicle node."""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setEntryType(self,type):
"""Sets this chronicle node to a user defined type."""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setName(self,name):
"""Sets the name of the chronicle node."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def setValue(self,value):
"""Sets the chronicle to a value that is a string"""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultImageNode(ptVaultNode):
"""Plasma vault image node"""
def __init__(self,n=0):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getImage(self):
"""Returns the image(ptImage) of this image node"""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getTitle(self):
"""Returns the title (caption) of this image node"""
pass
def getTitleW(self):
"""Unicode version of getTitle"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def imageGetImage(self):
"""LEGACY
Returns the image(ptImage) of this image node"""
pass
def imageGetTitle(self):
"""LEGACY
Returns the title (caption) of this image node"""
pass
def imageSetImage(self,image):
"""LEGACY
Sets the image(ptImage) of this image node"""
pass
def imageSetTitle(self,title):
"""LEGACY
Sets the title (caption) of this image node"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setImage(self,image):
"""Sets the image(ptImage) of this image node"""
pass
def setImageFromBuf(self,buf):
"""Sets our image from a buffer"""
pass
def setImageFromScrShot(self):
"""Grabs a screenshot and stuffs it into this node"""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setTitle(self,title):
"""Sets the title (caption) of this image node"""
pass
def setTitleW(self,title):
"""Unicode version of setTitle"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultMarkerGameNode(ptVaultNode):
"""Plasma vault age info node"""
def __init__(self,n=0):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getGameGuid(self):
"""Returns the marker game's guid"""
pass
def getGameName(self):
"""Returns the marker game's name"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setGameGuid(self,guid):
"""Sets the marker game's guid"""
pass
def setGameName(self,name):
"""Sets marker game's name"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultNodeRef:
"""Vault node relationship pseudo class"""
def __init__(self):
"""None"""
pass
def beenSeen(self):
"""Returns true until we reimplement this"""
pass
def getChild(self):
"""Returns a ptVaultNode that is the child of this reference"""
pass
def getChildID(self):
"""Returns id of the child node"""
pass
def getParent(self):
"""Returns a ptVaultNode that is the parent of the reference"""
pass
def getParentID(self):
"""Returns id of the parent node"""
pass
def getSaver(self):
"""Returns a ptVaultPlayerInfoNode of player that created this relationship"""
pass
def getSaverID(self):
"""Returns id of player that created this relationship"""
pass
def setSeen(self):
"""Does nothing until we reimplement this"""
pass
class ptVaultPlayerInfoListNode(ptVaultFolderNode):
"""Plasma vault player info list node"""
def __init__(self,n=0):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def addPlayer(self,playerID):
"""Adds playerID player to this player info list node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def folderGetName(self):
"""LEGACY
Returns the folder's name"""
pass
def folderGetType(self):
"""LEGACY
Returns the folder type (of the standard folder types)"""
pass
def folderSetName(self,name):
"""LEGACY
Set the folder name"""
pass
def folderSetType(self,type):
"""LEGACY
Set the folder type"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getFolderName(self):
"""Returns the folder's name"""
pass
def getFolderNameW(self):
"""Unicode version of getFolerName"""
pass
def getFolderType(self):
"""Returns the folder type (of the standard folder types)"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getPlayer(self,playerID):
"""Gets the player info node for the specified player."""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def hasPlayer(self,playerID):
"""Returns whether the 'playerID' is a member of this player info list node."""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def playerlistAddPlayer(self,playerID):
"""LEGACY: Adds playerID player to this player info list node."""
pass
def playerlistGetPlayer(self,playerID):
"""LEGACY: Gets the player info node for the specified player."""
pass
def playerlistHasPlayer(self,playerID):
"""LEGACY: Returns whether the 'playerID' is a member of this player info list node."""
pass
def playerlistRemovePlayer(self,playerID):
"""LEGACY: Removes playerID player from this player info list node."""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def removePlayer(self,playerID):
"""Removes playerID player from this player info list node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setFolderName(self,name):
"""Set the folder name"""
pass
def setFolderNameW(self,name):
"""Unicode version of setFolderName"""
pass
def setFolderType(self,type):
"""Set the folder type"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def sort(self):
"""Sorts the player list by some means...?"""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultPlayerInfoNode(ptVaultNode):
"""Plasma vault folder node"""
def __init__(self):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def playerGetAgeGuid(self):
"""Returns the guid as a string of where the player is for this player info node."""
pass
def playerGetAgeInstanceName(self):
"""Returns the name of the Age where the player is for this player info node."""
pass
def playerGetCCRLevel(self):
"""Returns the ccr level of the player for this player info node."""
pass
def playerGetID(self):
"""Returns the player ID for this player info node."""
pass
def playerGetName(self):
"""Returns the player name of this player info node."""
pass
def playerIsOnline(self):
"""Returns the online status of the player for this player info node."""
pass
def playerSetAgeGuid(self,guidString):
"""Not sure this should be used. Sets the guid for this player info node."""
pass
def playerSetAgeInstanceName(self,name):
"""Not sure this should be used. Sets the name of the age where the player is for this player info node."""
pass
def playerSetID(self,playerID):
"""Not sure this should be used. Sets the playerID for this player info node."""
pass
def playerSetName(self,name):
"""Not sure this should be used. Sets the player name of this player info node."""
pass
def playerSetOnline(self,state):
"""Not sure this should be used. Sets the state of the player online status for this player info node."""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultSDLNode(ptVaultNode):
"""Plasma vault SDL node"""
def __init__(self):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getIdent(self):
"""UNKNOWN"""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getStateDataRecord(self):
"""Returns the ptSDLStateDataRecord associated with this node"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def initStateDataRecord(self,filename,flags):
"""Read the SDL Rec from File if needed"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setIdent(self,v):
"""UNKNOWN"""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setStateDataRecord(self,rec,writeOptions=0):
"""Sets the ptSDLStateDataRecord"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultSystemNode(ptVaultNode):
"""Plasma vault system node"""
def __init__(self):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getType(self):
"""Returns the type of ptVaultNode this is.
See PlasmaVaultTypes.py"""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setType(self,type):
"""Set the type of ptVaultNode this is."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVaultTextNoteNode(ptVaultNode):
"""Plasma vault text note node"""
def __init__(self):
"""None"""
pass
def addNode(self,node,cb=None,cbContext=0):
"""Adds 'node'(ptVaultNode) as a child to this node."""
pass
def findNode(self,templateNode):
"""Returns ptVaultNode if child node found matching template, or None"""
pass
def getChildNodeCount(self):
"""Returns how many children this node has."""
pass
def getChildNodeRefList(self):
"""Returns a list of ptVaultNodeRef that are the children of this node."""
pass
def getClientID(self):
"""Returns the client's ID."""
pass
def getCreateAgeCoords(self):
"""Returns the location in the Age where this node was created."""
pass
def getCreateAgeGuid(self):
"""Returns the guid as a string of the Age where this node was created."""
pass
def getCreateAgeName(self):
"""Returns the name of the Age where this node was created."""
pass
def getCreateAgeTime(self):
"""Returns the time in the Age that the node was created...(?)"""
pass
def getCreateTime(self):
"""Returns the when this node was created, that is useable by python's time library."""
pass
def getCreatorNode(self):
"""Returns the creator's node"""
pass
def getCreatorNodeID(self):
"""Returns the creator's node ID"""
pass
def getDeviceInbox(self):
"""Returns a ptVaultFolderNode"""
pass
def getID(self):
"""Returns the unique ID of this ptVaultNode."""
pass
def getModifyTime(self):
"""Returns the modified time of this node, that is useable by python's time library."""
pass
def getNode(self,id):
"""Returns ptVaultNodeRef if is a child node, or None"""
pass
def getOwnerNode(self):
"""Returns a ptVaultNode of the owner of this node"""
pass
def getOwnerNodeID(self):
"""Returns the node ID of the owner of this node"""
pass
def getSubType(self):
"""Returns the subtype of this text note node."""
pass
def getText(self):
"""Returns the text of this text note node."""
pass
def getTextW(self):
"""Unicode version of getText."""
pass
def getTitle(self):
"""Returns the title of this text note node."""
pass
def getTitleW(self):
"""Unicode version of getTitle"""
pass
def getType(self):
"""Returns the type of text note for this text note node."""
pass
def hasNode(self,id):
"""Returns true if node if a child node"""
pass
def linkToNode(self,nodeID,cb=None,cbContext=0):
"""Adds a link to the node designated by nodeID"""
pass
def noteGetSubType(self):
"""LEGACY
Returns the subtype of this text note node."""
pass
def noteGetText(self):
"""LEGACY
Returns the text of this text note node."""
pass
def noteGetTitle(self):
"""LEGACY
Returns the title of this text note node."""
pass
def noteGetType(self):
"""LEGACY
Returns the type of text note for this text note node."""
pass
def noteSetSubType(self,subType):
"""LEGACY
Sets the subtype of the this text note node."""
pass
def noteSetText(self,text):
"""LEGACY
Sets text of the this text note node."""
pass
def noteSetTitle(self,title):
"""LEGACY
Sets the title of this text note node."""
pass
def noteSetType(self,type):
"""LEGACY
Sets the type of text note for this text note node."""
pass
def removeAllNodes(self):
"""Removes all the child nodes on this node."""
pass
def removeNode(self,node,cb=None,cbContext=0):
"""Removes the child 'node'(ptVaultNode) from this node."""
pass
def save(self,cb=None,cbContext=0):
"""Save the changes made to this node."""
pass
def saveAll(self,cb=None,cbContext=0):
"""Saves this node and all its children nodes."""
pass
def sendTo(self,destID,cb=None,cbContext=0):
"""Send this node to inbox at 'destID'"""
pass
def setCreateAgeGuid(self,guid):
"""Set guid as a string of the Age where this node was created."""
pass
def setCreateAgeName(self,name):
"""Set name of the Age where this node was created."""
pass
def setCreatorNodeID(self,id):
"""Set creator's node ID"""
pass
def setDeviceInbox(self,inboxName,cb=None,cbContext=0):
"""Sets the device inbox"""
pass
def setID(self,id):
"""Sets ID of this ptVaultNode."""
pass
def setOwnerNodeID(self,id):
"""Set node ID of the owner of this node"""
pass
def setSubType(self,subType):
"""Sets the subtype of the this text note node."""
pass
def setText(self,text):
"""Sets text of the this text note node."""
pass
def setTextW(self,text):
"""Unicode version of setText"""
pass
def setTitle(self,title):
"""Sets the title of this text note node."""
pass
def setTitleW(self,title):
"""Unicode version of setTitle"""
pass
def setType(self,type):
"""Sets the type of text note for this text note node."""
pass
def upcastToAgeInfoListNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoListNode"""
pass
def upcastToAgeInfoNode(self):
"""Returns this ptVaultNode as ptVaultAgeInfoNode"""
pass
def upcastToAgeLinkNode(self):
"""Returns this ptVaultNode as ptVaultAgeLinkNode"""
pass
def upcastToChronicleNode(self):
"""Returns this ptVaultNode as ptVaultChronicleNode"""
pass
def upcastToFolderNode(self):
"""Returns this ptVaultNode as ptVaultFolderNode"""
pass
def upcastToImageNode(self):
"""Returns this ptVaultNode as ptVaultImageNode"""
pass
def upcastToMarkerGameNode(self):
"""Returns this ptVaultNode as ptVaultMarkerNode"""
pass
def upcastToPlayerInfoListNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoListNode"""
pass
def upcastToPlayerInfoNode(self):
"""Returns this ptVaultNode as ptVaultPlayerInfoNode"""
pass
def upcastToPlayerNode(self):
"""Returns this ptVaultNode as a ptVaultPlayerNode"""
pass
def upcastToSDLNode(self):
"""Returns this ptVaultNode as a ptVaultSDLNode"""
pass
def upcastToSystemNode(self):
"""Returns this ptVaultNode as a ptVaultSystemNode"""
pass
def upcastToTextNoteNode(self):
"""Returns this ptVaultNode as ptVaultTextNoteNode"""
pass
class ptVector3:
"""Plasma Point class"""
def __init__(self,x=0, y=0, z=0):
"""None"""
pass
def add(self,other):
"""Adds other to the current vector"""
pass
def copy(self):
"""Copies the vector into another one (which it returns)"""
pass
def crossProduct(self,other):
"""Finds the cross product between other and this vector"""
pass
def dotProduct(self,other):
"""Finds the dot product between other and this vector"""
pass
def getX(self):
"""Returns the 'x' component of the vector"""
pass
def getY(self):
"""Returns the 'y' component of the vector"""
pass
def getZ(self):
"""Returns the 'z' component of the vector"""
pass
def length(self):
"""Returns the length of the vector"""
pass
def lengthSq(self):
"""Returns the length of the vector, squared
- this function is faster then length(other)"""
pass
def normalize(self):
"""Normalizes the vector to length 1"""
pass
def scale(self,scale):
"""Scale the vector by scale"""
pass
def setX(self,x):
"""Sets the 'x' component of the vector"""
pass
def setY(self,y):
"""Sets the 'y' component of the vector"""
pass
def setZ(self,z):
"""Sets the 'z' component of the vector"""
pass
def subtract(self,other):
"""Subtracts other from the current vector"""
pass
def zero(self):
"""Zeros the vector's components"""
pass
class ptWaveSet:
"""Creates a new ptWaveSet"""
def __init__(self,ey):
"""None"""
pass
def getDepthFalloff(self):
"""Returns the attribute's value"""
pass
def getEnvCenter(self):
"""Returns the attribute's value"""
pass
def getEnvRadius(self):
"""Returns the attribute's value"""
pass
def getGeoAmpOverLen(self):
"""Returns the attribute's value"""
pass
def getGeoAngleDev(self):
"""Returns the attribute's value"""
pass
def getGeoChop(self):
"""Returns the attribute's value"""
pass
def getGeoMaxLength(self):
"""Returns the attribute's value"""
pass
def getGeoMinLength(self):
"""Returns the attribute's value"""
pass
def getMaxAtten(self):
"""Returns the attribute's value"""
pass
def getMinAtten(self):
"""Returns the attribute's value"""
pass
def getOpacFalloff(self):
"""Returns the attribute's value"""
pass
def getOpacOffset(self):
"""Returns the attribute's value"""
pass
def getReflFalloff(self):
"""Returns the attribute's value"""
pass
def getReflOffset(self):
"""Returns the attribute's value"""
pass
def getRippleScale(self):
"""Returns the attribute's value"""
pass
def getSpecularEnd(self):
"""Returns the attribute's value"""
pass
def getSpecularMute(self):
"""Returns the attribute's value"""
pass
def getSpecularNoise(self):
"""Returns the attribute's value"""
pass
def getSpecularStart(self):
"""Returns the attribute's value"""
pass
def getSpecularTint(self):
"""Returns the attribute's value"""
pass
def getTexAmpOverLen(self):
"""Returns the attribute's value"""
pass
def getTexAngleDev(self):
"""Returns the attribute's value"""
pass
def getTexChop(self):
"""Returns the attribute's value"""
pass
def getTexMaxLength(self):
"""Returns the attribute's value"""
pass
def getTexMinLength(self):
"""Returns the attribute's value"""
pass
def getWaterHeight(self):
"""Returns the attribute's value"""
pass
def getWaterOffset(self):
"""Returns the attribute's value"""
pass
def getWaterOpacity(self):
"""Returns the attribute's value"""
pass
def getWaterTint(self):
"""Returns the attribute's value"""
pass
def getWaveFalloff(self):
"""Returns the attribute's value"""
pass
def getWaveOffset(self):
"""Returns the attribute's value"""
pass
def getWindDir(self):
"""Returns the attribute's value"""
pass
def setDepthFalloff(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setEnvCenter(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setEnvRadius(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setGeoAmpOverLen(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setGeoAngleDev(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setGeoChop(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setGeoMaxLength(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setGeoMinLength(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setMaxAtten(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setMinAtten(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setOpacFalloff(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setOpacOffset(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setReflFalloff(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setReflOffset(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setRippleScale(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setSpecularEnd(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setSpecularMute(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setSpecularNoise(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setSpecularStart(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setSpecularTint(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setTexAmpOverLen(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setTexAngleDev(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setTexChop(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setTexMaxLength(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setTexMinLength(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setWaterHeight(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setWaterOffset(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setWaterOpacity(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setWaterTint(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setWaveFalloff(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setWaveOffset(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
def setWindDir(self,s, secs = 0):
"""Sets the attribute to s over secs time"""
pass
| TOC-Shard/moul-scripts | Python/plasma/Plasma.py | Python | gpl-3.0 | 263,729 | [
"VisIt"
] | 8b916d7c3f374ab365e1515349a2818013debfc9d2ea191eae7f6b5d12ef437e |
#!/usr/bin/env python
import sys
import subprocess as sb
import numpy as np
import argparse
from utilities import filesFromList, writeLog
from plotTools import userLabels, extractFromCSV, addToPlot
import matplotlib.pyplot as plt
'''
Description:
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#======== Function definitions =============================#
def p2pMaxMin( r ):
# Peak to peak max and min evaluation routine.
dr = (r[1:] - r[:-1])
fpos = (dr>=0.).astype(int)
fneg = (dr<0.).astype(int)
rp_cum = 0.; rn_cum = 0.
rp_max = 0.; rn_min = 0.
i = 0
for fp in fpos:
if( fp == 0 ):
if( rp_cum > rp_max ): rp_max = rp_cum
rp_cum = 0.
rp_cum += float(fp)*dr[i]; i+=1
#print('rp_cum[{}] = {} '.format(i,rp_cum))
i = 0
for fn in fneg:
if( fn == 0 ):
if( rn_cum < rn_min ): rn_min = rn_cum
rn_cum = 0.
rn_cum += float(fn)*dr[i]; i+=1
#print('rn_cum[{}] = {} '.format(i,rn_cum))
return rp_max, rn_min
#==========================================================#
parser = argparse.ArgumentParser(prog='approachAnalysis.py')
parser.add_argument("strKey", help="Search string for collecting files.",nargs='?',\
default=".csv")
parser.add_argument("--magy", help="Magnitude of all variables.", action="store_true",\
default=False)
parser.add_argument("--yx", help="Reverse axes: plot(x,y) --> plot(y,x)", action="store_true",\
default=False)
parser.add_argument("--labels", help="User specified labels.", action="store_true",\
default=False)
parser.add_argument("--reuse", help="Reuse once specified variable selections.", action="store_true",\
default=False)
parser.add_argument("-v", "--var", help="Variable Name in CSV-file", type=str, nargs='+',\
default=['u','v','w'] )
parser.add_argument("-yl","--ylims", help="Y-axis limits: [min,max]. Default=[0,10]",\
type=float,nargs=2,default=[0.,10.])
parser.add_argument("-fn","--figName", help="Name of the (temporary) figures. (default=tmp)",\
type=str,default="tmp")
parser.add_argument("-fa","--fileAnim", help="Name of the animation file. (default=anim.gif)",\
type=str,default="anim.gif")
parser.add_argument("-na", "--noAnim", help="Do not make an animation.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args )
#==========================================================#
strKey = args.strKey
figName = args.figName
fileAnim = args.fileAnim
noAnimation = args.noAnim
ylims = args.ylims
varList = args.var
fileNos, fileList = filesFromList( "*"+strKey+"*" )
print(' The varList [-v, --var] option is over ridden at this point. ')
print(' Reading coordinate values from file {} ...'.format( fileList[0]) )
coordList = [ 'arc_length', 'Points:0', 'Points:1', 'Points:2']
xv = extractFromCSV( fileList[0] , coordList )
s = xv[0].copy() # arc_length
x = xv[1].copy(); y = xv[2].copy(); z = xv[3].copy()
xv = None
print(' Done.\n')
# -------------------------------------------------------- #
print(' Computing the mean velocity values ... ')
varList = ['u', 'v', 'w']
Ux_mean = None; Uy_mean = None; Uz_mean = None
n = 0
for fn in fileNos:
n += 1
#pfig = pl.figure(num=1, figsize=(18.,9.))
tv = extractFromCSV( fileList[fn] , varList )
u = tv[0].copy(); v = tv[1].copy(); w = tv[2].copy()
tv = None
# The velocity data may contain nan entries, which should be replaced by 0.
u[np.isnan(u)] = 0.; v[np.isnan(v)] = 0.; w[np.isnan(w)] = 0.
# Accumulate sums for mean values.
if( Ux_mean == None ):
Ux_mean = np.zeros( u.shape ) # Initialize
Uy_mean = np.zeros( u.shape )
Uz_mean = np.zeros( u.shape )
Ux_mean += u; Uy_mean += v; Uz_mean += w
# Use the sums to compute the mean values.
Ux_mean /= float(n); Uy_mean /= float(n); Uz_mean /= float(n)
print(' Done.\n')
# -------------------------------------------------------- #
print(' Extract directional data from the approach line ... ')
#pfig = plotCSV( pfig, fileList[fn], args.yx, args.magy, args.reuse )
rad2deg = 180./np.pi
deg2rad = np.pi/180.
# Starting point: Rissala's approach line (from Paraview)
p1 = np.array([ x[0], y[0], z[0] ]) # np.array([6800., 1250., 0.])
p2 = np.array([ x[-1],y[-1],z[-1] ]) # np.array([7700., 650., 72.])
da = p2 - p1
da_mag = np.sqrt( np.sum( da**2 ) )
da_xy = np.sqrt( np.sum( da[0:2]**2))
# Approach direction (normal vector)
na = da/da_mag
# Sharp angle between the runway and the mean wind
theta = np.arccos( da[0]/da_xy )
print(' Sharp angle between the runway and the mean wind: theta = {} deg'.format( theta*rad2deg ))
print(' Done.\n')
# -------------------------------------------------------- #
# Hornet's approach speed and velocity
Uappr_mag = 69.
Ua = Uappr_mag*na
# Mean headwind
Uhw_mean = Ux_mean * np.cos( theta ) - Uy_mean * np.sin( theta )
# Speed relative to the ground ... perhaps not needed.
U_grd = Uappr_mag - Uhw_mean
# Approach angle
gamma = np.arctan( da[2]/da_xy )
# F18 Data:
rho = 1.2 # standard air
CL = 1.2 # at 7deg angle of attack
CLa = 2.86 # 1/rad (alpha in range [3deg, 10deg])
Aref=18.*3.
K = 0.5*rho*Aref
# Extract deviations in the headwind and compute the changes in AoA [alpha].
Lift = K*Uappr_mag**2*CL
n = 0
dL_max = 0.
dL_sum = 0.
dL_mxv = 0. # Maximum variance.
dL_p2p_max = 0.
dL_p2p_min = 0.
for fn in fileNos:
n += 1
#pfig = pl.figure(num=1, figsize=(18.,9.))
tv = extractFromCSV( fileList[fn] , varList ) # NOTE: varList = ['u', 'v', 'w']
du = tv[0]-Ux_mean
dv = tv[1]-Uy_mean
dw = tv[2]-Uz_mean # Uz_mean could be replaced by 0.
tv = None
# The velocity data may contain nan entries, which should be replaced by 0.
du[np.isnan(du)] = 0.; dv[np.isnan(dv)] = 0.; dw[np.isnan(dw)] = 0.
dU_hw = du * np.cos( theta ) - dv * np.sin( theta )
dalpha = np.arctan( dw/Uappr_mag)
# Change in lift due to changes in AoA:
dL_a = K*Uappr_mag**2*CLa*dalpha
# Change in lift due to changes in head wind.
dL_u = 2.*K*CL*Uappr_mag*dU_hw
dLp_a = dL_a/Lift * 100. # In percentage
dLp_u = dL_u/Lift * 100.
dLp_mag= np.sqrt( (dLp_a+dLp_u)**2 )
#fig = plt.figure(num=1, figsize=(18,9))
fig, (ax1, ax2) = plt.subplots(num=1, nrows=2, sharex=True, figsize=(18,11))
lines11,=ax1.plot( s,dLp_a,'-o', linewidth=1.6 )
lines12,=ax1.plot( s,dLp_u,'-o', linewidth=1.6 )
ax1.legend( (lines11,lines12) , ('dL(alpha) [%]',' dL(u) [%]'), loc=1 )
ax1.set_ylim([-8., 8.])
ax1.set_xlim([ min(s) , 1.05*max(s)]) # s: arc_length
ax1.set_title(' Changes in Lift due to turbulence ', fontsize=22)
ax1.set_ylabel(' dL [%] ', fontsize=22); ax1.grid(True)
lines2,=ax2.plot(s,dLp_mag,'-ro', linewidth=1.6 )
ax2.legend( (lines2,) , (' ABS(SUM(dL)) [%]',), loc=1 )
ax2.set_xlim([ min(s) , 1.05*max(s)]) # s: arc_length
ax2.set_ylim([-1., 12.5]); ax2.set_xlim([ min(s) , max(s)])
ax2.set_xlabel(' Distance along approach line [m] ', fontsize=22 )
ax2.set_ylabel(' dL [%] ', fontsize=22 ); ax2.grid(True)
# Maximum variance
dL_ivar = np.var( dLp_mag[ du > 0 ] ) # Consider only nonzero values.
if( dL_ivar > dL_mxv ): dL_mxv = dL_ivar
# Mean variance
dL_sum += dL_ivar
dL_var = dL_sum/float(n)
dL_imax = np.max(dLp_mag)
if( dL_imax > dL_max ): dL_max = dL_imax
dL_ip2p_mx, dL_ip2p_mn = p2pMaxMin( (dLp_a+dLp_u) )
if( dL_ip2p_mx > dL_p2p_max ): dL_p2p_max = dL_ip2p_mx
if( dL_ip2p_mn < dL_p2p_min ): dL_p2p_min = dL_ip2p_mn
infoStr =' Time = {:4d}s\n'.format((n-1)*2)
infoStr +=' Current P2P(dL) [max,min] = [{:4.1f}% , {:4.1f}%]\n'.format(dL_ip2p_mx, dL_ip2p_mn)
infoStr +=' Running P2P(dL) [max,min] = [{:4.1f}% , {:4.1f}%]\n'.format(dL_p2p_max, dL_p2p_min)
#infoStr +=' Max(dL) = {:4.1f}%\n'.format(dL_imax)
infoStr +=' Running Max(dL) = {:4.1f}%\n'.format(dL_max)
#infoStr +=' Var(dL) = {:4.1f}%\n'.format(dL_ivar)
infoStr +=' Running Mean(Var(dL)) = {:4.1f}%\n'.format(dL_var)
infoStr +=' Running Max(Var(dL)) = {:4.1f}%\n'.format(dL_mxv)
plt.text( 1. , 5.5, infoStr , fontsize=20)
figStr = '{}_{:04d}.jpg'.format(figName,n)
print(' Saving figure {} '.format(figStr))
fig.savefig(figStr)
ax1.cla(); ax2.cla(); fig.clf()
if( not noAnimation ):
cmd = 'convert {}_* {} '.format(figName,fileAnim)
print ' Executing command: ${}'.format(cmd)
sb.call(cmd, shell=True)
print(' All Done! ')
| saskartt/P4UL | pyAnalyze/approachAnalysis.py | Python | mit | 8,459 | [
"ParaView"
] | 2bbc8ba22826622aabea1619a0f8053f35e7060327fa62ff1cf9571e0df7a5a7 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import itertools
import logging
from operator import mul
import functools
from monty.json import MSONable
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.analysis.structure_prediction.substitution_probability \
import SubstitutionProbability
from pymatgen.transformations.standard_transformations \
import SubstitutionTransformation
from pymatgen.alchemy.transmuters import StandardTransmuter
from pymatgen.alchemy.materials import TransformedStructure
from pymatgen.alchemy.filters import RemoveDuplicatesFilter, \
RemoveExistingFilter
"""
This module provides classes for predicting new structures from existing ones.
"""
__author__ = "Will Richards, Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Will Richards"
__email__ = "wrichard@mit.edu"
__date__ = "Aug 31, 2012"
class Substitutor(MSONable):
"""
This object uses a data mined ionic substitution approach to propose
compounds likely to be stable. It relies on an algorithm presented in
Hautier, G., Fischer, C., Ehrlacher, V., Jain, A., and Ceder, G. (2011).
Data Mined Ionic Substitutions for the Discovery of New Compounds.
Inorganic Chemistry, 50(2), 656-663. doi:10.1021/ic102031h
"""
def __init__(self, threshold=1e-3, symprec=0.1, **kwargs):
"""
This substitutor uses the substitution probability class to
find good substitutions for a given chemistry or structure.
Args:
threshold:
probability threshold for predictions
symprec:
symmetry precision to determine if two structures
are duplicates
kwargs:
kwargs for the SubstitutionProbability object
lambda_table, alpha
"""
self._kwargs = kwargs
self._sp = SubstitutionProbability(**kwargs)
self._threshold = threshold
self._symprec = symprec
def get_allowed_species(self):
"""
returns the species in the domain of the probability function
any other specie will not work
"""
return self._sp.species
def pred_from_structures(self, target_species, structures_list,
remove_duplicates=True, remove_existing=False):
"""
performs a structure prediction targeting compounds containing all of
the target_species, based on a list of structure (those structures
can for instance come from a database like the ICSD). It will return
all the structures formed by ionic substitutions with a probability
higher than the threshold
Notes:
If the default probability model is used, input structures must
be oxidation state decorated. See AutoOxiStateDecorationTransformation
This method does not change the number of species in a structure. i.e
if the number of target species is 3, only input structures containing
3 species will be considered.
Args:
target_species:
a list of species with oxidation states
e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)]
structures_list:
a list of dictionnary of the form {'structure':Structure object
,'id':some id where it comes from}
the id can for instance refer to an ICSD id.
remove_duplicates:
if True, the duplicates in the predicted structures will
be removed
remove_existing:
if True, the predicted structures that already exist in the
structures_list will be removed
Returns:
a list of TransformedStructure objects.
"""
target_species = get_el_sp(target_species)
result = []
transmuter = StandardTransmuter([])
if len(list(set(target_species) & set(self.get_allowed_species()))) \
!= len(target_species):
raise ValueError("the species in target_species are not allowed "
+ "for the probability model you are using")
for permut in itertools.permutations(target_species):
for s in structures_list:
# check if: species are in the domain,
# and the probability of subst. is above the threshold
els = s['structure'].composition.elements
if len(els) == len(permut) and \
len(list(set(els) & set(
self.get_allowed_species()))) == \
len(els) and self._sp.cond_prob_list(permut,
els) > \
self._threshold:
clean_subst = {els[i]: permut[i]
for i in range(0, len(els))
if els[i] != permut[i]}
if len(clean_subst) == 0:
continue
transf = SubstitutionTransformation(clean_subst)
if Substitutor._is_charge_balanced(
transf.apply_transformation(s['structure'])):
ts = TransformedStructure(
s['structure'], [transf],
history=[{"source": s['id']}],
other_parameters={
'type': 'structure_prediction',
'proba': self._sp.cond_prob_list(permut, els)}
)
result.append(ts)
transmuter.append_transformed_structures([ts])
if remove_duplicates:
transmuter.apply_filter(RemoveDuplicatesFilter(
symprec=self._symprec))
if remove_existing:
# Make the list of structures from structures_list that corresponds to the
# target species
chemsys = list(set([sp.symbol for sp in target_species]))
structures_list_target = [st['structure'] for st in structures_list
if Substitutor._is_from_chemical_system(
chemsys,
st['structure'])]
transmuter.apply_filter(RemoveExistingFilter(structures_list_target,
symprec=self._symprec))
return transmuter.transformed_structures
@staticmethod
def _is_charge_balanced(struct):
"""
checks if the structure object is charge balanced
"""
if sum([s.specie.oxi_state for s in struct.sites]) == 0.0:
return True
else:
return False
@staticmethod
def _is_from_chemical_system(chemical_system, struct):
"""
checks if the structure object is from the given chemical system
"""
chemsys = list(set([sp.symbol for sp in struct.composition]))
if len(chemsys) != len(chemical_system):
return False
for el in chemsys:
if not el in chemical_system:
return False
return True
def pred_from_list(self, species_list):
"""
There are an exceptionally large number of substitutions to
look at (260^n), where n is the number of species in the
list. We need a more efficient than brute force way of going
through these possibilities. The brute force method would be::
output = []
for p in itertools.product(self._sp.species_list
, repeat = len(species_list)):
if self._sp.conditional_probability_list(p, species_list)
> self._threshold:
output.append(dict(zip(species_list,p)))
return output
Instead of that we do a branch and bound.
Args:
species_list:
list of species in the starting structure
Returns:
list of dictionaries, each including a substitutions
dictionary, and a probability value
"""
species_list = get_el_sp(species_list)
# calculate the highest probabilities to help us stop the recursion
max_probabilities = []
for s2 in species_list:
max_p = 0
for s1 in self._sp.species:
max_p = max([self._sp.cond_prob(s1, s2), max_p])
max_probabilities.append(max_p)
output = []
def _recurse(output_prob, output_species):
best_case_prob = list(max_probabilities)
best_case_prob[:len(output_prob)] = output_prob
if functools.reduce(mul, best_case_prob) > self._threshold:
if len(output_species) == len(species_list):
odict = {
'substitutions':
dict(zip(species_list, output_species)),
'probability': functools.reduce(mul, best_case_prob)}
output.append(odict)
return
for sp in self._sp.species:
i = len(output_prob)
prob = self._sp.cond_prob(sp, species_list[i])
_recurse(output_prob + [prob], output_species + [sp])
_recurse([], [])
logging.info('{} substitutions found'.format(len(output)))
return output
def pred_from_comp(self, composition):
"""
Similar to pred_from_list except this method returns a list after
checking that compositions are charge balanced.
"""
output = []
predictions = self.pred_from_list(composition.elements)
for p in predictions:
subs = p['substitutions']
charge = 0
for i_el in composition.elements:
f_el = subs[i_el]
charge += f_el.oxi_state * composition[i_el]
if charge == 0:
output.append(p)
logging.info('{} charge balanced '
'compositions found'.format(len(output)))
return output
def as_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"kwargs": self._kwargs, "threshold": self._threshold,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
t = d['threshold']
kwargs = d['kwargs']
return cls(threshold=t, **kwargs)
| dongsenfo/pymatgen | pymatgen/analysis/structure_prediction/substitutor.py | Python | mit | 10,806 | [
"pymatgen"
] | 39db3adf3cc374290f1c468f3694315d24a13583d7a78454af43662ca9c7c826 |
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .model import Model
from .parameterization.variational import VariationalPosterior
from .mapping import Mapping
from .. import likelihoods
from .. import kern
from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation
from ..util.normalizer import Standardize
from paramz import ObsAr
import logging
import warnings
logger = logging.getLogger("GP")
class GP(Model):
"""
General purpose Gaussian process model
:param X: input observations
:param Y: output observations
:param kernel: a GPy kernel, defaults to rbf+white
:param likelihood: a GPy likelihood
:param inference_method: The :class:`~GPy.inference.latent_function_inference.LatentFunctionInference` inference method to use for this GP
:rtype: model object
:param Norm normalizer:
normalize the outputs Y.
Prediction will be un-normalized using this normalizer.
If normalizer is True, we will normalize using Standardize.
If normalizer is False, no normalization will be done.
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, Y, kernel, likelihood, mean_function=None, inference_method=None, name='gp', Y_metadata=None, normalizer=False):
super(GP, self).__init__(name)
assert X.ndim == 2
if isinstance(X, (ObsAr, VariationalPosterior)):
self.X = X.copy()
else: self.X = ObsAr(X)
self.num_data, self.input_dim = self.X.shape
assert Y.ndim == 2
logger.info("initializing Y")
if normalizer is True:
self.normalizer = Standardize()
elif normalizer is False:
self.normalizer = None
else:
self.normalizer = normalizer
if self.normalizer is not None:
self.normalizer.scale_by(Y)
self.Y_normalized = ObsAr(self.normalizer.normalize(Y))
self.Y = Y
elif isinstance(Y, np.ndarray):
self.Y = ObsAr(Y)
self.Y_normalized = self.Y
else:
self.Y = Y
self.Y_normalized = self.Y
if Y.shape[0] != self.num_data:
#There can be cases where we want inputs than outputs, for example if we have multiple latent
#function values
warnings.warn("There are more rows in your input data X, \
than in your output data Y, be VERY sure this is what you want")
_, self.output_dim = self.Y.shape
assert ((Y_metadata is None) or isinstance(Y_metadata, dict))
self.Y_metadata = Y_metadata
assert isinstance(kernel, kern.Kern)
#assert self.input_dim == kernel.input_dim
self.kern = kernel
assert isinstance(likelihood, likelihoods.Likelihood)
self.likelihood = likelihood
if self.kern._effective_input_dim != self.X.shape[1]:
warnings.warn("Your kernel has a different input dimension {} then the given X dimension {}. Be very sure this is what you want and you have not forgotten to set the right input dimenion in your kernel".format(self.kern._effective_input_dim, self.X.shape[1]))
#handle the mean function
self.mean_function = mean_function
if mean_function is not None:
assert isinstance(self.mean_function, Mapping)
assert mean_function.input_dim == self.input_dim
assert mean_function.output_dim == self.output_dim
self.link_parameter(mean_function)
#find a sensible inference method
logger.info("initializing inference method")
if inference_method is None:
if isinstance(likelihood, likelihoods.Gaussian) or isinstance(likelihood, likelihoods.MixedNoise):
inference_method = exact_gaussian_inference.ExactGaussianInference()
else:
inference_method = expectation_propagation.EP()
print("defaulting to " + str(inference_method) + " for latent function inference")
self.inference_method = inference_method
logger.info("adding kernel and likelihood as parameters")
self.link_parameter(self.kern)
self.link_parameter(self.likelihood)
self.posterior = None
def to_dict(self, save_data=True):
input_dict = super(GP, self)._to_dict()
input_dict["class"] = "GPy.core.GP"
if not save_data:
input_dict["X"] = None
input_dict["Y"] = None
else:
try:
input_dict["X"] = self.X.values.tolist()
except:
input_dict["X"] = self.X.tolist()
try:
input_dict["Y"] = self.Y.values.tolist()
except:
input_dict["Y"] = self.Y.tolist()
input_dict["kernel"] = self.kern.to_dict()
input_dict["likelihood"] = self.likelihood.to_dict()
if self.mean_function is not None:
input_dict["mean_function"] = self.mean_function.to_dict()
input_dict["inference_method"] = self.inference_method.to_dict()
#FIXME: Assumes the Y_metadata is serializable. We should create a Metadata class
if self.Y_metadata is not None:
input_dict["Y_metadata"] = self.Y_metadata
if self.normalizer is not None:
input_dict["normalizer"] = self.normalizer.to_dict()
return input_dict
@staticmethod
def _from_dict(input_dict, data=None):
import GPy
import numpy as np
if (input_dict['X'] is None) or (input_dict['Y'] is None):
assert(data is not None)
input_dict["X"], input_dict["Y"] = np.array(data[0]), np.array(data[1])
elif data is not None:
print("WARNING: The model has been saved with X,Y! The original values are being overriden!")
input_dict["X"], input_dict["Y"] = np.array(data[0]), np.array(data[1])
else:
input_dict["X"], input_dict["Y"] = np.array(input_dict['X']), np.array(input_dict['Y'])
input_dict["kernel"] = GPy.kern.Kern.from_dict(input_dict["kernel"])
input_dict["likelihood"] = GPy.likelihoods.likelihood.Likelihood.from_dict(input_dict["likelihood"])
mean_function = input_dict.get("mean_function")
if mean_function is not None:
input_dict["mean_function"] = GPy.core.mapping.Mapping.from_dict(mean_function)
else:
input_dict["mean_function"] = mean_function
input_dict["inference_method"] = GPy.inference.latent_function_inference.LatentFunctionInference.from_dict(input_dict["inference_method"])
#FIXME: Assumes the Y_metadata is serializable. We should create a Metadata class
Y_metadata = input_dict.get("Y_metadata")
input_dict["Y_metadata"] = Y_metadata
normalizer = input_dict.get("normalizer")
if normalizer is not None:
input_dict["normalizer"] = GPy.util.normalizer._Norm.from_dict(normalizer)
else:
input_dict["normalizer"] = normalizer
return GP(**input_dict)
def save_model(self, output_filename, compress=True, save_data=True):
self._save_model(output_filename, compress=True, save_data=True)
# The predictive variable to be used to predict using the posterior object's
# woodbury_vector and woodbury_inv is defined as predictive_variable
# as long as the posterior has the right woodbury entries.
# It is the input variable used for the covariance between
# X_star and the posterior of the GP.
# This is usually just a link to self.X (full GP) or self.Z (sparse GP).
# Make sure to name this variable and the predict functions will "just work"
# In maths the predictive variable is:
# K_{xx} - K_{xp}W_{pp}^{-1}K_{px}
# W_{pp} := \texttt{Woodbury inv}
# p := _predictive_variable
@property
def _predictive_variable(self):
return self.X
def set_XY(self, X=None, Y=None):
"""
Set the input / output data of the model
This is useful if we wish to change our existing data but maintain the same model
:param X: input observations
:type X: np.ndarray
:param Y: output observations
:type Y: np.ndarray
"""
self.update_model(False)
if Y is not None:
if self.normalizer is not None:
self.normalizer.scale_by(Y)
self.Y_normalized = ObsAr(self.normalizer.normalize(Y))
self.Y = Y
else:
self.Y = ObsAr(Y)
self.Y_normalized = self.Y
if X is not None:
if self.X in self.parameters:
# LVM models
if isinstance(self.X, VariationalPosterior):
assert isinstance(X, type(self.X)), "The given X must have the same type as the X in the model!"
index = self.X._parent_index_
self.unlink_parameter(self.X)
self.X = X
self.link_parameter(self.X, index=index)
else:
index = self.X._parent_index_
self.unlink_parameter(self.X)
from ..core import Param
self.X = Param('latent mean', X)
self.link_parameter(self.X, index=index)
else:
self.X = ObsAr(X)
self.update_model(True)
def set_X(self,X):
"""
Set the input data of the model
:param X: input observations
:type X: np.ndarray
"""
self.set_XY(X=X)
def set_Y(self,Y):
"""
Set the output data of the model
:param X: output observations
:type X: np.ndarray
"""
self.set_XY(Y=Y)
def parameters_changed(self):
"""
Method that is called upon any changes to :class:`~GPy.core.parameterization.param.Param` variables within the model.
In particular in the GP class this method re-performs inference, recalculating the posterior and log marginal likelihood and gradients of the model
.. warning::
This method is not designed to be called manually, the framework is set up to automatically call this method upon changes to parameters, if you call
this method yourself, there may be unexpected consequences.
"""
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y_normalized, self.mean_function, self.Y_metadata)
self.likelihood.update_gradients(self.grad_dict['dL_dthetaL'])
self.kern.update_gradients_full(self.grad_dict['dL_dK'], self.X)
if self.mean_function is not None:
self.mean_function.update_gradients(self.grad_dict['dL_dm'], self.X)
def log_likelihood(self):
"""
The log marginal likelihood of the model, :math:`p(\mathbf{y})`, this is the objective function of the model being optimised
"""
return self._log_marginal_likelihood
def _raw_predict(self, Xnew, full_cov=False, kern=None):
"""
For making predictions, does not account for normalization or likelihood
full_cov is a boolean which defines whether the full covariance matrix
of the prediction is computed. If full_cov is False (default), only the
diagonal of the covariance is returned.
.. math::
p(f*|X*, X, Y) = \int^{\inf}_{\inf} p(f*|f,X*)p(f|X,Y) df
= N(f*| K_{x*x}(K_{xx} + \Sigma)^{-1}Y, K_{x*x*} - K_{xx*}(K_{xx} + \Sigma)^{-1}K_{xx*}
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
"""
mu, var = self.posterior._raw_predict(kern=self.kern if kern is None else kern, Xnew=Xnew, pred_var=self._predictive_variable, full_cov=full_cov)
if self.mean_function is not None:
mu += self.mean_function.f(Xnew)
return mu, var
def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None,
likelihood=None, include_likelihood=True):
"""
Predict the function(s) at the new point(s) Xnew. This includes the
likelihood variance added to the predicted underlying function
(usually referred to as f).
In order to predict without adding in the likelihood give
`include_likelihood=False`, or refer to self.predict_noiseless().
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray (Nnew x self.input_dim)
:param full_cov: whether to return the full covariance matrix, or just
the diagonal
:type full_cov: bool
:param Y_metadata: metadata about the predicting point to pass to the
likelihood
:param kern: The kernel to use for prediction (defaults to the model
kern). this is useful for examining e.g. subprocesses.
:param include_likelihood: Whether or not to add likelihood noise to
the predicted underlying latent function f.
:type include_likelihood: bool
:returns: (mean, var):
mean: posterior mean, a Numpy array, Nnew x self.input_dim
var: posterior variance, a Numpy array, Nnew x 1 if full_cov=False,
Nnew x Nnew otherwise
If full_cov and self.input_dim > 1, the return shape of var is
Nnew x Nnew x self.input_dim. If self.input_dim == 1, the return
shape is Nnew x Nnew. This is to allow for different normalizations
of the output dimensions.
Note: If you want the predictive quantiles (e.g. 95% confidence
interval) use :py:func:"~GPy.core.gp.GP.predict_quantiles".
"""
# Predict the latent function values
mean, var = self._raw_predict(Xnew, full_cov=full_cov, kern=kern)
if include_likelihood:
# now push through likelihood
if likelihood is None:
likelihood = self.likelihood
mean, var = likelihood.predictive_values(mean, var, full_cov,
Y_metadata=Y_metadata)
if self.normalizer is not None:
mean = self.normalizer.inverse_mean(mean)
# We need to create 3d array for the full covariance matrix with
# multiple outputs.
if full_cov & (mean.shape[1] > 1):
var = self.normalizer.inverse_covariance(var)
else:
var = self.normalizer.inverse_variance(var)
return mean, var
def predict_noiseless(self, Xnew, full_cov=False, Y_metadata=None, kern=None):
"""
Convenience function to predict the underlying function of the GP (often
referred to as f) without adding the likelihood variance on the
prediction function.
This is most likely what you want to use for your predictions.
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray (Nnew x self.input_dim)
:param full_cov: whether to return the full covariance matrix, or just
the diagonal
:type full_cov: bool
:param Y_metadata: metadata about the predicting point to pass to the likelihood
:param kern: The kernel to use for prediction (defaults to the model
kern). this is useful for examining e.g. subprocesses.
:returns: (mean, var):
mean: posterior mean, a Numpy array, Nnew x self.input_dim
var: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
If full_cov and self.input_dim > 1, the return shape of var is Nnew x Nnew x self.input_dim. If self.input_dim == 1, the return shape is Nnew x Nnew.
This is to allow for different normalizations of the output dimensions.
Note: If you want the predictive quantiles (e.g. 95% confidence interval) use :py:func:"~GPy.core.gp.GP.predict_quantiles".
"""
return self.predict(Xnew, full_cov, Y_metadata, kern, None, False)
def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None, kern=None, likelihood=None):
"""
Get the predictive quantiles around the prediction at X
:param X: The points at which to make a prediction
:type X: np.ndarray (Xnew x self.input_dim)
:param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval
:type quantiles: tuple
:param kern: optional kernel to use for prediction
:type predict_kw: dict
:returns: list of quantiles for each X and predictive quantiles for interval combination
:rtype: [np.ndarray (Xnew x self.output_dim), np.ndarray (Xnew x self.output_dim)]
"""
m, v = self._raw_predict(X, full_cov=False, kern=kern)
if likelihood is None:
likelihood = self.likelihood
quantiles = likelihood.predictive_quantiles(m, v, quantiles, Y_metadata=Y_metadata)
if self.normalizer is not None:
quantiles = [self.normalizer.inverse_mean(q) for q in quantiles]
return quantiles
def predictive_gradients(self, Xnew, kern=None):
"""
Compute the derivatives of the predicted latent function with respect
to X*
Given a set of points at which to predict X* (size [N*,Q]), compute the
derivatives of the mean and variance. Resulting arrays are sized:
dmu_dX* -- [N*, Q ,D], where D is the number of output in this GP
(usually one).
Note that this is not the same as computing the mean and variance of
the derivative of the function!
dv_dX* -- [N*, Q], (since all outputs have the same variance)
:param X: The points at which to get the predictive gradients
:type X: np.ndarray (Xnew x self.input_dim)
:returns: dmu_dX, dv_dX
:rtype: [np.ndarray (N*, Q ,D), np.ndarray (N*,Q) ]
"""
if kern is None:
kern = self.kern
mean_jac = np.empty((Xnew.shape[0], Xnew.shape[1], self.output_dim))
for i in range(self.output_dim):
mean_jac[:, :, i] = kern.gradients_X(
self.posterior.woodbury_vector[:, i:i+1].T, Xnew,
self._predictive_variable)
# Gradients wrt the diagonal part k_{xx}
dv_dX = kern.gradients_X_diag(np.ones(Xnew.shape[0]), Xnew)
# Grads wrt 'Schur' part K_{xf}K_{ff}^{-1}K_{fx}
if self.posterior.woodbury_inv.ndim == 3:
var_jac = np.empty(dv_dX.shape +
(self.posterior.woodbury_inv.shape[2],))
var_jac[:] = dv_dX[:, :, None]
for i in range(self.posterior.woodbury_inv.shape[2]):
alpha = -2.*np.dot(kern.K(Xnew, self._predictive_variable),
self.posterior.woodbury_inv[:, :, i])
var_jac[:, :, i] += kern.gradients_X(alpha, Xnew,
self._predictive_variable)
else:
var_jac = dv_dX
alpha = -2.*np.dot(kern.K(Xnew, self._predictive_variable),
self.posterior.woodbury_inv)
var_jac += kern.gradients_X(alpha, Xnew, self._predictive_variable)
return mean_jac, var_jac
def predict_jacobian(self, Xnew, kern=None, full_cov=False):
"""
Compute the derivatives of the posterior of the GP.
Given a set of points at which to predict X* (size [N*,Q]), compute the
mean and variance of the derivative. Resulting arrays are sized:
dL_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one).
Note that this is the mean and variance of the derivative,
not the derivative of the mean and variance! (See predictive_gradients for that)
dv_dX* -- [N*, Q], (since all outputs have the same variance)
If there is missing data, it is not implemented for now, but
there will be one output variance per output dimension.
:param X: The points at which to get the predictive gradients.
:type X: np.ndarray (Xnew x self.input_dim)
:param kern: The kernel to compute the jacobian for.
:param boolean full_cov: whether to return the cross-covariance terms between
the N* Jacobian vectors
:returns: dmu_dX, dv_dX
:rtype: [np.ndarray (N*, Q ,D), np.ndarray (N*,Q,(D)) ]
"""
if kern is None:
kern = self.kern
mean_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim))
for i in range(self.output_dim):
mean_jac[:,:,i] = kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1].T, Xnew, self._predictive_variable)
dK_dXnew_full = np.empty((self._predictive_variable.shape[0], Xnew.shape[0], Xnew.shape[1]))
one = np.ones((1,1))
for i in range(self._predictive_variable.shape[0]):
dK_dXnew_full[i] = kern.gradients_X(one, Xnew, self._predictive_variable[[i]])
if full_cov:
dK2_dXdX = kern.gradients_XX(one, Xnew)
else:
dK2_dXdX = kern.gradients_XX_diag(one, Xnew)
#dK2_dXdX = np.zeros((Xnew.shape[0], Xnew.shape[1], Xnew.shape[1]))
#for i in range(Xnew.shape[0]):
# dK2_dXdX[i:i+1,:,:] = kern.gradients_XX(one, Xnew[i:i+1,:])
def compute_cov_inner(wi):
if full_cov:
var_jac = dK2_dXdX - np.einsum('qnm,msr->nsqr', dK_dXnew_full.T.dot(wi), dK_dXnew_full) # n,s = Xnew.shape[0], m = pred_var.shape[0]
else:
var_jac = dK2_dXdX - np.einsum('qnm,mnr->nqr', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
return var_jac
if self.posterior.woodbury_inv.ndim == 3: # Missing data:
if full_cov:
var_jac = np.empty((Xnew.shape[0],Xnew.shape[0],Xnew.shape[1],Xnew.shape[1],self.output_dim))
for d in range(self.posterior.woodbury_inv.shape[2]):
var_jac[:, :, :, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d])
else:
var_jac = np.empty((Xnew.shape[0],Xnew.shape[1],Xnew.shape[1],self.output_dim))
for d in range(self.posterior.woodbury_inv.shape[2]):
var_jac[:, :, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d])
else:
var_jac = compute_cov_inner(self.posterior.woodbury_inv)
return mean_jac, var_jac
def predict_wishart_embedding(self, Xnew, kern=None, mean=True, covariance=True):
"""
Predict the wishart embedding G of the GP. This is the density of the
input of the GP defined by the probabilistic function mapping f.
G = J_mean.T*J_mean + output_dim*J_cov.
:param array-like Xnew: The points at which to evaluate the magnification.
:param :py:class:`~GPy.kern.Kern` kern: The kernel to use for the magnification.
Supplying only a part of the learning kernel gives insights into the density
of the specific kernel part of the input function. E.g. one can see how dense the
linear part of a kernel is compared to the non-linear part etc.
"""
if kern is None:
kern = self.kern
mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False)
mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac)
Sigma = np.zeros(mumuT.shape)
if var_jac.ndim == 4: # Missing data
Sigma = var_jac.sum(-1)
else:
Sigma = self.output_dim*var_jac
G = 0.
if mean:
G += mumuT
if covariance:
G += Sigma
return G
def predict_wishard_embedding(self, Xnew, kern=None, mean=True, covariance=True):
warnings.warn("Wrong naming, use predict_wishart_embedding instead. Will be removed in future versions!", DeprecationWarning)
return self.predict_wishart_embedding(Xnew, kern, mean, covariance)
def predict_magnification(self, Xnew, kern=None, mean=True, covariance=True, dimensions=None):
"""
Predict the magnification factor as
sqrt(det(G))
for each point N in Xnew.
:param bool mean: whether to include the mean of the wishart embedding.
:param bool covariance: whether to include the covariance of the wishart embedding.
:param array-like dimensions: which dimensions of the input space to use [defaults to self.get_most_significant_input_dimensions()[:2]]
"""
G = self.predict_wishart_embedding(Xnew, kern, mean, covariance)
if dimensions is None:
dimensions = self.get_most_significant_input_dimensions()[:2]
G = G[:, dimensions][:,:,dimensions]
from ..util.linalg import jitchol
mag = np.empty(Xnew.shape[0])
for n in range(Xnew.shape[0]):
try:
mag[n] = np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :]))))))
except:
mag[n] = np.sqrt(np.linalg.det(G[n, :, :]))
return mag
def posterior_samples_f(self,X, size=10, full_cov=True, **predict_kwargs):
"""
Samples the posterior GP at the points X.
:param X: The points at which to take the samples.
:type X: np.ndarray (Nnew x self.input_dim)
:param size: the number of a posteriori samples.
:type size: int.
:param full_cov: whether to return the full covariance matrix, or just the diagonal.
:type full_cov: bool.
:returns: fsim: set of simulations
:rtype: np.ndarray (D x N x samples) (if D==1 we flatten out the first dimension)
"""
m, v = self._raw_predict(X, full_cov=full_cov, **predict_kwargs)
if self.normalizer is not None:
m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
def sim_one_dim(m, v):
if not full_cov:
return np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T
else:
return np.random.multivariate_normal(m.flatten(), v, size).T
if self.output_dim == 1:
return sim_one_dim(m, v)
else:
fsim = np.empty((self.output_dim, self.num_data, size))
for d in range(self.output_dim):
if full_cov and v.ndim == 3:
fsim[d] = sim_one_dim(m[:, d], v[:, :, d])
elif (not full_cov) and v.ndim == 2:
fsim[d] = sim_one_dim(m[:, d], v[:, d])
else:
fsim[d] = sim_one_dim(m[:, d], v)
return fsim
def posterior_samples(self, X, size=10, full_cov=False, Y_metadata=None, likelihood=None, **predict_kwargs):
"""
Samples the posterior GP at the points X.
:param X: the points at which to take the samples.
:type X: np.ndarray (Nnew x self.input_dim.)
:param size: the number of a posteriori samples.
:type size: int.
:param full_cov: whether to return the full covariance matrix, or just the diagonal.
:type full_cov: bool.
:param noise_model: for mixed noise likelihood, the noise model to use in the samples.
:type noise_model: integer.
:returns: Ysim: set of simulations,
:rtype: np.ndarray (D x N x samples) (if D==1 we flatten out the first dimension)
"""
fsim = self.posterior_samples_f(X, size, full_cov=full_cov, **predict_kwargs)
if likelihood is None:
likelihood = self.likelihood
if fsim.ndim == 3:
for d in range(fsim.shape[0]):
fsim[d] = likelihood.samples(fsim[d], Y_metadata=Y_metadata)
else:
fsim = likelihood.samples(fsim, Y_metadata=Y_metadata)
return fsim
def input_sensitivity(self, summarize=True):
"""
Returns the sensitivity for each dimension of this model
"""
return self.kern.input_sensitivity(summarize=summarize)
def get_most_significant_input_dimensions(self, which_indices=None):
return self.kern.get_most_significant_input_dimensions(which_indices)
def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs):
"""
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
kwargs are passed to the optimizer. They can be:
:param max_iters: maximum number of function evaluations
:type max_iters: int
:messages: whether to display during optimisation
:type messages: bool
:param optimizer: which optimizer to use (defaults to self.preferred optimizer), a range of optimisers can be found in :module:`~GPy.inference.optimization`, they include 'scg', 'lbfgs', 'tnc'.
:type optimizer: string
:param bool ipython_notebook: whether to use ipython notebook widgets or not.
:param bool clear_after_finish: if in ipython notebook, we can clear the widgets after optimization.
"""
self.inference_method.on_optimization_start()
try:
ret = super(GP, self).optimize(optimizer, start, messages, max_iters, ipython_notebook, clear_after_finish, **kwargs)
except KeyboardInterrupt:
print("KeyboardInterrupt caught, calling on_optimization_end() to round things up")
self.inference_method.on_optimization_end()
raise
return ret
def infer_newX(self, Y_new, optimize=True):
"""
Infer X for the new observed data *Y_new*.
:param Y_new: the new observed data for inference
:type Y_new: numpy.ndarray
:param optimize: whether to optimize the location of new X (True by default)
:type optimize: boolean
:return: a tuple containing the posterior estimation of X and the model that optimize X
:rtype: (:class:`~GPy.core.parameterization.variational.VariationalPosterior` and numpy.ndarray, :class:`~GPy.core.model.Model`)
"""
from ..inference.latent_function_inference.inferenceX import infer_newX
return infer_newX(self, Y_new, optimize=optimize)
def log_predictive_density(self, x_test, y_test, Y_metadata=None):
"""
Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param x_test: test locations (x_{*})
:type x_test: (Nx1) array
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param Y_metadata: metadata associated with the test points
"""
mu_star, var_star = self._raw_predict(x_test)
return self.likelihood.log_predictive_density(y_test, mu_star, var_star, Y_metadata=Y_metadata)
def log_predictive_density_sampling(self, x_test, y_test, Y_metadata=None, num_samples=1000):
"""
Calculation of the log predictive density by sampling
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param x_test: test locations (x_{*})
:type x_test: (Nx1) array
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param Y_metadata: metadata associated with the test points
:param num_samples: number of samples to use in monte carlo integration
:type num_samples: int
"""
mu_star, var_star = self._raw_predict(x_test)
return self.likelihood.log_predictive_density_sampling(y_test, mu_star, var_star, Y_metadata=Y_metadata, num_samples=num_samples)
def posterior_covariance_between_points(self, X1, X2):
"""
Computes the posterior covariance between points.
:param X1: some input observations
:param X2: other input observations
"""
return self.posterior.covariance_between_points(self.kern, self.X, X1, X2)
| befelix/GPy | GPy/core/gp.py | Python | bsd-3-clause | 32,440 | [
"Gaussian"
] | 512f4fd88ce8c3e544f1329c78e61c5c6644f668760bd08d0d220693398f9bb2 |
import math
from scipy.integrate import quad
from scipy.integrate import odeint
from scipy.interpolate import spline
import numpy as np
import matplotlib.pyplot as plt
# System Definitions
# Chemical Properties
MW = 100.0 # g / mol
MW /= 1000 # Convert to kg / mol.
T = 298.15 # K
R = 8.314 # J / mol K
Z_air = 1 / R / T # mol / m^3 Pa
logKaw = -2.13 # Henry's Law Constant or Air-Water Equilibrium Partition Coefficient
Kaw = 10 ** logKaw # non-log
Z_water = Z_air / Kaw # mol / m^3 Pa
# Compartment Properties
area = 1.0e6 # 1,000,000 sq. meters or 1 sq. km
#water
water_depth = 30. # meters
water_volume = area * water_depth # m^3
#air
air_height = 1000.0 # meters
air_volume = area * air_height # m^3
# Time step, in hours.
dt = 24 # hours
# Chemical emission rate into the air
E_day = 1 # kg per day
E_hour = E_day / 24 # kg per hour.
# Mass Transfer Coefficients in m / h
MTC_air_water = 0.5
MTC_water_air = 1.00
# Degradation with OH
k_deg = 5.5e-13 # cm^3 / molecules s
k_deg = k_deg * 86400.0 # now in cm^3 / molecules day
OH = 1.0e-6 # molecules / cm^3
# In[3]:
# ODE
def f(y, t):
f_air_i = y[0]
f_water_i = y[1]
### AIR
f_air = 0.0
# Input
f_air += gaussian(t, 182.5, 0.01) / MW
f_air += MTC_water_air * area * Z_water * f_water_i
# Loss
f_air -= MTC_air_water * area * Z_air * f_air_i
f_air -= air_volume * Z_air * k_deg * OH * f_air_i
# Divide by VZx
f_air /= air_volume * Z_air
### WATER
f_water = 0.0
# Input
f_water += MTC_air_water * area * Z_air * f_air_i
# Loss
f_water -= MTC_water_air * area * Z_water * f_water_i
# Divide by VZ
f_water /= water_volume * Z_water
# ret
return [f_air, f_water]
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / 2 * np.power(sig, 2.))
# In[5]:
f_air_0 = 0.0 # initial contamination in air
f_water_0 = 0.0 # initial contamination in water
y0 = [f_air_0, f_water_0]
t = np.linspace(0, 365, 366)
E = gaussian(t, max(t)/2, 0.01)
soln = odeint(f, y0, t)
f_air = soln[:, 0]
f_water = soln[:, 1]
C_air = f_air * Z_air
C_water = f_water * Z_water
M_air = C_air * air_volume * MW
M_water = C_water * water_volume * MW
plt.plot(t, M_water, '-', color = 'deepskyblue', linewidth = 4)
plt.show()
| StephenAWood/SWEnvironmentalFate | SWEnvironmentalFateLevel4.py | Python | mit | 2,301 | [
"Gaussian"
] | beefa03f725a7d4e1d6a568f6d53b0dbf8824b113bc775fef6169a248ffd6388 |
#!/usr/bin/env python
from optparse import OptionParser
import copy
import math
import os
import random
import string
import subprocess
import sys
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.cluster import hierarchy
from scipy.stats import spearmanr
import seaborn as sns
from sklearn.metrics import roc_auc_score, roc_curve
sns_colors = sns.color_palette('deep')
from dna_io import one_hot_set, vecs2dna
################################################################################
# basset_kmers.py
#
# Generate random sequences and study scores by k-mers.
#
# Draw as graph:
# -construct the graph w/ all single edits as edges.
# -perform a force-directed layout.
# -label the k-mers.
# -color by score.
# -http://networkx.github.io/documentation/latest/gallery.html
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <model_file>'
parser = OptionParser(usage)
parser.add_option('-a', dest='targets_file', default=None, help='File labelings targets in the second column [Default: %default]')
parser.add_option('-c', dest='center_nt', default=50, help='Center nt to consider kmers from [Default: %default]')
parser.add_option('-d', dest='model_out_file', default=None, help='Pre-computed model output table.')
parser.add_option('-k', dest='kmer', default=8, type='int', help='K-mer length [Default: %default]')
parser.add_option('-l', dest='seq_len', default=1000, type='int', help='Input sequence length [Default: %default]')
parser.add_option('-n', dest='num_seqs', default=100000, type='int', help='Number of sequences to predict [Default: %default]')
parser.add_option('-o', dest='out_dir', default='.')
parser.add_option('-r', dest='rc', default=False, action='store_true', help='Consider k-mers w/ their reverse complements [Default: %default]')
parser.add_option('-t', dest='targets', default=None, help='Comma-separated list of targets to analyze in more depth [Default: %default]')
parser.add_option('--top', dest='top_num', default=100, type='int', help='Number of sequences with which to make a multiple sequence alignment')
(options,args) = parser.parse_args()
if len(args) != 1:
parser.error('Must provide Basset model file.')
else:
model_file = args[0]
random.seed(2)
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
if options.model_out_file is not None:
seq_dna = []
for line in open('%s/seqs.fa' % options.out_dir):
if line[0] == '>':
seq_dna.append('')
else:
seq_dna[-1] += line.rstrip()
else:
#################################################################
# generate random sequences
#################################################################
# random sequences
seq_vecs = np.zeros((options.num_seqs,4,1,options.seq_len), dtype='float16')
for si in range(options.num_seqs):
for li in range(options.seq_len):
ni = random.randint(0,3)
seq_vecs[si,ni,0,li] = 1
# create a new HDF5 file
seq_hdf5_file = '%s/seqs.h5' % options.out_dir
seq_hdf5_out = h5py.File(seq_hdf5_file, 'w')
seq_hdf5_out.create_dataset('test_in', data=seq_vecs)
seq_hdf5_out.close()
# get fasta
seq_dna = vecs2dna(seq_vecs)
# print to file
fasta_out = open('%s/seqs.fa' % options.out_dir, 'w')
for i in range(len(seq_dna)):
print >> fasta_out, '>%d\n%s' % (i,seq_dna[i])
fasta_out.close()
#################################################################
# Torch predict
#################################################################
options.model_out_file = '%s/model_out.txt' % options.out_dir
torch_cmd = 'basset_predict.lua -scores %s %s %s' % (model_file, seq_hdf5_file, options.model_out_file)
print torch_cmd
subprocess.call(torch_cmd, shell=True)
# clean up sequence HDF5
os.remove(seq_hdf5_file)
# load scores
seq_scores = np.loadtxt(options.model_out_file, dtype='float32')
# read target labels
if options.targets_file:
target_labels = [line.split()[1] for line in open(options.targets_file)]
else:
target_labels = ['t%d'%(ti+1) for ti in range(seq_scores.shape[1])]
if options.targets is None:
options.targets = range(seq_scores.shape[1])
else:
options.targets = [int(ti) for ti in options.targets.split(',')]
#################################################################
# process and output
#################################################################
kmers_start = (options.seq_len - options.center_nt) / 2
for ti in options.targets:
print 'Working on target %d' % ti
##############################################
# hash scores by k-mer
##############################################
kmer_scores_raw = {}
for si in range(len(seq_dna)):
# get score
sscore = seq_scores[si,ti]
# hash to each center kmer
for ki in range(kmers_start, kmers_start + options.center_nt):
kmer = seq_dna[si][ki:ki+options.kmer]
if options.rc:
kmer = consider_rc(kmer)
kmer_scores_raw.setdefault(kmer,[]).append(sscore)
##############################################
# compute means and print table
##############################################
table_out = open('%s/table%d.txt' % (options.out_dir,ti), 'w')
kmer_means_raw = {}
for kmer in kmer_scores_raw:
kmer_means_raw[kmer] = np.mean(kmer_scores_raw[kmer])
kmer_n = len(kmer_scores_raw[kmer])
cols = (kmer, kmer_n, kmer_means_raw[kmer], np.std(kmer_scores_raw[kmer])/math.sqrt(kmer_n))
print >> table_out, '%s %4d %6.3f %6.3f' % cols
table_out.close()
##############################################
# plot density
##############################################
plt.figure()
sns.distplot(kmer_means_raw.values(), kde=False)
plt.savefig('%s/density%d.pdf' % (options.out_dir,ti))
plt.close()
##############################################
# top k-mers distance matrix
##############################################
kmer_means = {}
kmer_means_mean = np.mean(kmer_means_raw.values())
for kmer in kmer_means_raw:
kmer_means[kmer] = kmer_means_raw[kmer] - kmer_means_mean
# score by score
scores_kmers = [(kmer_means[kmer],kmer) for kmer in kmer_means]
scores_kmers.sort(reverse=True)
# take top k-mers
top_kmers = []
top_kmers_scores = []
for score, kmer in scores_kmers[:options.top_num]:
top_kmers.append(kmer)
top_kmers_scores.append(score)
top_kmers = np.array(top_kmers)
top_kmers_scores = np.array(top_kmers_scores)
# compute distance matrix
top_kmers_dists = np.zeros((options.top_num, options.top_num))
for i in range(options.top_num):
for j in range(i+1,options.top_num):
if options.rc:
top_kmers_dists[i,j] = kmer_distance_rc(top_kmers[i], top_kmers[j])
else:
top_kmers_dists[i,j] = kmer_distance(top_kmers[i], top_kmers[j])
top_kmers_dists[j,i] = top_kmers_dists[i,j]
# clip the distances
np.clip(top_kmers_dists, 0, 3, out=top_kmers_dists)
# plot
plot_kmer_dists(top_kmers_dists, top_kmers_scores, top_kmers, '%s/top_kmers_heat%d.pdf'%(options.out_dir,ti))
# cluster and plot
cluster_kmer_dists(top_kmers_dists, top_kmers_scores, top_kmers, '%s/top_kmers_clust%d.pdf'%(options.out_dir,ti))
def consider_rc(kmer):
rc_kmer = rc(kmer)
if rc_kmer < kmer:
return rc_kmer
else:
return kmer
def kmer_distance(x, y, max_shifts=1):
''' Compute the edit distance between two kmers
Might consider trying scikit-bio global_pairwise_align_nucleotide.
'''
# no shifts
min_d = 0
for i in range(len(x)):
if x[i] != y[i]:
min_d += 1
# shifts
for s in range(1,max_shifts+1):
# shift x
d = 1
for i in range(len(x)-s):
if x[s+i] != y[i]:
d += 1
if d < min_d:
min_d = d
# shift y
d = 1
for i in range(len(y)-s):
if x[i] != y[s+i]:
d += 1
if d < min_d:
min_d = d
return min_d
def kmer_distance_rc(x, y):
''' Compute the edit distance between two kmers,
considering the reverse complements. '''
d_fwd = kmer_distance(x, y)
d_rc = kmer_distance(x, rc(y))
return min(d_fwd, d_rc)
def plot_kmer_dists(kmers_dists, kmers_scores, kmers, out_pdf):
''' Plot a heatmap of k-mer distances and scores.'''
# shape scores
kmers_scores = kmers_scores.reshape((-1,1))
cols = 20
plt.figure()
ax_dist = plt.subplot2grid((1,cols), (0,0), colspan=cols-1)
ax_score = plt.subplot2grid((1,cols), (0,cols-1), colspan=1)
sns.heatmap(kmers_dists, cmap=sns.cubehelix_palette(n_colors=(1+kmers_dists.max()), reverse=True, as_cmap=True), ax=ax_dist, yticklabels=kmers, xticklabels=False)
for tick in ax_dist.get_yticklabels():
if kmers_dists.shape[0] <= 100:
tick.set_fontsize(4)
elif kmers_dists.shape[0] <= 250:
tick.set_fontsize(2.5)
else:
tick.set_fontsize(2)
score_max = kmers_scores.max()
sns.heatmap(kmers_scores, cmap = 'RdBu_r', vmin=-score_max, vmax=score_max, ax=ax_score, yticklabels=False, xticklabels=False)
plt.savefig(out_pdf)
plt.close()
def cluster_kmer_dists(kmers_dists, kmers_scores, kmers, out_pdf):
''' Plot a clustered heatmap of k-mer distances and scores.'''
# cluster
kmer_cluster = hierarchy.linkage(kmers_dists, method='single', metric='euclidean')
order = hierarchy.leaves_list(kmer_cluster)
# re-order distance matrix
kmers_dists_reorder = kmers_dists[order,:]
kmers_dists_reorder = kmers_dists_reorder[:,order]
# plot
plot_kmer_dists(kmers_dists_reorder, kmers_scores[order], kmers[order], out_pdf)
def rc(seq):
''' Reverse complement sequence'''
return seq.translate(string.maketrans("ATCGatcg","TAGCtagc"))[::-1]
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| davek44/Basset | src/dev/basset_kmers.py | Python | mit | 11,132 | [
"scikit-bio"
] | 62e24a2eaa3f30f1a46c752a7b884769e846ce73328a501713398effb3a96df7 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import unittest
import numpy
import scipy.misc
import scipy.special
try:
from scipy.special import factorial2
except ImportError:
from scipy.misc import factorial2
from pyscf import lib
from pyscf import gto
from pyscf.dft import radi
from pyscf.symm import sph
libecp = gto.moleintor.libcgto
mol = gto.M(atom='''
Na 0.5 0.5 0.
H 0. 1. 1.
''',
basis={'Na':'lanl2dz',
'H':[[0,[1.21,1.],[.521,1.]],
[1,[3.12,1.],[.512,1.]],
[2,[2.54,1.],[.554,1.]],
[3,[0.98,1.],[.598,1.]],
[4,[0.79,1.],[.579,1.]]]},
ecp = {'Na': gto.basis.parse_ecp('''
Na nelec 10
Na ul
0 2.0000000 6.0000000
1 175.5502590 -10.0000000
2 2.3365719 -6.0637782
2 0.7799867 -0.7299393
Na S
0 243.3605846 3.0000000
#1 41.5764759 36.2847626
#2 13.2649167 72.9304880
#2 0.9764209 6.0123861
#Na P
#0 1257.2650682 5.0000000
#1 189.6248810 117.4495683
#2 54.5247759 423.3986704
#2 0.9461106 7.1241813
''')})
CHARGE_OF = 0
PTR_COORD = 1
NUC_MOD_OF = 2
PTR_ZETA = 3
ATM_SLOTS = 6
# for _ecpbas
ATOM_OF = 0
ANG_OF = 1 # <0 means local function
NPRIM_OF = 2
RADI_POWER = 3
SO_TYPE_OF = 4
PTR_EXP = 5
PTR_COEFF = 6
BAS_SLOTS = 8
def type1_by_shell(mol, shls, ecpatm_id, ecpbas):
ish, jsh = shls
li = mol.bas_angular(ish)
npi = mol.bas_nprim(ish)
nci = mol.bas_nctr(ish)
ai = mol.bas_exp(ish)
ci = mol._libcint_ctr_coeff(ish)
icart = (li+1) * (li+2) // 2
lj = mol.bas_angular(jsh)
npj = mol.bas_nprim(jsh)
ncj = mol.bas_nctr(jsh)
aj = mol.bas_exp(jsh)
cj = mol._libcint_ctr_coeff(jsh)
jcart = (lj+1) * (lj+2) // 2
rc = mol.atom_coord(ecpatm_id)
rca = rc - mol.bas_coord(ish)
r2ca = numpy.dot(rca, rca)
rcb = rc - mol.bas_coord(jsh)
r2cb = numpy.dot(rcb, rcb)
# Note the Mole._libcint_ctr_coeff are normalized to radial part
cei = numpy.einsum('ij,i->ij', ci, numpy.exp(-ai * r2ca))
cej = numpy.einsum('ij,i->ij', cj, numpy.exp(-aj * r2cb))
#rs, ws = radi.treutler(99)
rs, ws = radi.gauss_chebyshev(99)
ur = rad_part(mol, ecpbas, rs) * ws
rad_ang_all = numpy.zeros((nci,ncj,li+lj+1,li+lj+1,li+lj+1))
for ip in range(npi):
for jp in range(npj):
rij = ai[ip] * rca + aj[jp] * rcb
aij = ai[ip] + aj[jp]
k = 2*numpy.linalg.norm(rij)
rad_all = type1_rad_part(li+lj, k, aij, ur, rs)
#ang_all = type1_ang_part(li+lj, -rij)
#rad_ang = numpy.einsum('pl,lijk->pijk', rad_all, ang_all)
rad_ang = type1_rad_ang(li+lj, rij, rad_all)
for ic in range(nci):
for jc in range(ncj):
rad_ang_all[ic,jc] += rad_ang * cei[ip,ic]*cej[jp,jc] * (4*numpy.pi)**2
ifac = type1_cache_fac(li, rca)
jfac = type1_cache_fac(lj, rcb)
g1 = numpy.zeros((nci,ncj,icart,jcart))
for ic in range(nci):
for jc in range(ncj):
for mi,(ix,iy,iz) in enumerate(loop_cart(li)):
for mj,(jx,jy,jz) in enumerate(loop_cart(lj)):
tmp = 0
for i1, i2, i3 in loop_xyz(ix, iy, iz):
for j1, j2, j3 in loop_xyz(jx, jy, jz):
fac = ifac[mi,i1,i2,i3] * jfac[mj,j1,j2,j3]
tmp += fac * rad_ang_all[ic,jc,i1+j1,i2+j2,i3+j3]
g1[ic,jc,mi,mj] = tmp
gsph = numpy.empty((nci,ncj,li*2+1,lj*2+1))
for ic in range(nci):
for jc in range(ncj):
tmp = c2s_bra(lj, g1[ic,jc].T.copy())
gsph[ic,jc] = c2s_bra(li, tmp.T.copy())
return gsph.transpose(0,2,1,3).reshape(nci*(li*2+1),-1)
def type1_cache_fac(li, ri):
facs = cache_fac(li, ri)
facs4 = numpy.zeros(((li+1)*(li+2)//2,li+1,li+1,li+1))
for mi,(ix,iy,iz) in enumerate(loop_cart(li)):
for i1, i2, i3 in loop_xyz(ix, iy, iz):
facs4[mi,i1,i2,i3] =(facs[0,ix,i1] * facs[1,iy,i2] * facs[2,iz,i3])
return facs4
def type1_rad_part(lmax, k, aij, ur, rs):
rad_all = numpy.empty((lmax+1,lmax+1))
bessel_val = sph_ine(lmax, k*rs)
ur_base = numpy.exp(k**2/(4*aij)) * ur * numpy.exp(-aij*(rs-k/(2*aij))**2)
idx = abs(ur_base) > 1e-80
for lab in range(lmax+1):
val = ur_base[idx] * rs[idx]**lab
for l in range(lmax+1):
if (lab+l) % 2 == 0:
val1 = val * bessel_val[l,idx]
rad_all[lab,l] = val1.sum()
else:
rad_all[lab,l] = 0
return rad_all
def type1_rad_ang(lmax, rij, rad_all):
norm_rij = numpy.linalg.norm(rij)
if norm_rij > 1e-18:
unitr = -rij/norm_rij
else:
unitr = -rij
omega_nuc = []
for lmb in range(lmax+1):
c2smat = c2s_bra(lmb, numpy.eye((lmb+1)*(lmb+2)//2))
omega_nuc.append(numpy.dot(ang_nuc_part(lmb, unitr), c2smat))
rad_ang = numpy.zeros((lmax+1,lmax+1,lmax+1))
for i in range(lmax+1):
for j in range(lmax+1-i):
for k in range(lmax+1-i-j):
for lmb in range(lmax+1):
if (i+j+k+lmb) % 2 == 0:
tmp = 0
for n, (i1, j1, k1) in enumerate(loop_cart(lmb)):
tmp += omega_nuc[lmb][n] * int_unit_xyz(i+i1, j+j1, k+k1)
rad_ang[i,j,k] += rad_all[i+j+k,lmb] * tmp
return rad_ang
def type2_by_shell(mol, shls, ecpatm_id, ecpbas):
ish, jsh = shls
li = mol.bas_angular(ish)
npi = mol.bas_nprim(ish)
nci = mol.bas_nctr(ish)
ai = mol.bas_exp(ish)
ci = mol._libcint_ctr_coeff(ish)
icart = (li+1) * (li+2) // 2
lj = mol.bas_angular(jsh)
npj = mol.bas_nprim(jsh)
ncj = mol.bas_nctr(jsh)
aj = mol.bas_exp(jsh)
cj = mol._libcint_ctr_coeff(jsh)
jcart = (lj+1) * (lj+2) // 2
rc = mol.atom_coord(ecpatm_id)
rcb = rc - mol.bas_coord(jsh)
r_cb = numpy.linalg.norm(rcb)
rca = rc - mol.bas_coord(ish)
r_ca = numpy.linalg.norm(rca)
#rs, ws = radi.treutler(99)
rs, ws = radi.gauss_chebyshev(99)
i_fac_cache = cache_fac(li, rca)
j_fac_cache = cache_fac(lj, rcb)
g1 = numpy.zeros((nci,ncj,icart,jcart))
for lc in range(5): # up to g function
ecpbasi = ecpbas[ecpbas[:,ANG_OF] == lc]
if len(ecpbasi) == 0:
continue
ur = rad_part(mol, ecpbasi, rs) * ws
idx = abs(ur) > 1e-80
rur = numpy.array([ur[idx] * rs[idx]**lab for lab in range(li+lj+1)])
fi = facs_rad(mol, ish, lc, r_ca, rs)[:,:,idx].copy()
fj = facs_rad(mol, jsh, lc, r_cb, rs)[:,:,idx].copy()
angi = facs_ang(type2_ang_part(li, lc, -rca), li, lc, i_fac_cache)
angj = facs_ang(type2_ang_part(lj, lc, -rcb), lj, lc, j_fac_cache)
for ic in range(nci):
for jc in range(ncj):
rad_all = numpy.einsum('pr,ir,jr->pij', rur, fi[ic], fj[jc])
for i1 in range(li+1):
for j1 in range(lj+1):
g1[ic,jc] += numpy.einsum('pq,imp,jmq->ij', rad_all[i1+j1],
angi[i1], angj[j1])
g1 *= (numpy.pi*4)**2
gsph = numpy.empty((nci,ncj,li*2+1,lj*2+1))
for ic in range(nci):
for jc in range(ncj):
tmp = c2s_bra(lj, g1[ic,jc].T.copy())
gsph[ic,jc] = c2s_bra(li, tmp.T.copy())
return gsph.transpose(0,2,1,3).reshape(nci*(li*2+1),-1)
def so_by_shell(mol, shls, ecpatm_id, ecpbas):
'''SO-ECP
i/2 <Pauli_matrix dot l U(r)>
'''
ish, jsh = shls
li = mol.bas_angular(ish)
npi = mol.bas_nprim(ish)
nci = mol.bas_nctr(ish)
ai = mol.bas_exp(ish)
ci = mol._libcint_ctr_coeff(ish)
icart = (li+1) * (li+2) // 2
lj = mol.bas_angular(jsh)
npj = mol.bas_nprim(jsh)
ncj = mol.bas_nctr(jsh)
aj = mol.bas_exp(jsh)
cj = mol._libcint_ctr_coeff(jsh)
jcart = (lj+1) * (lj+2) // 2
rc = mol.atom_coord(ecpatm_id)
rcb = rc - mol.bas_coord(jsh)
r_cb = numpy.linalg.norm(rcb)
rca = rc - mol.bas_coord(ish)
r_ca = numpy.linalg.norm(rca)
#rs, ws = radi.treutler(99)
rs, ws = radi.gauss_chebyshev(99)
i_fac_cache = cache_fac(li, rca)
j_fac_cache = cache_fac(lj, rcb)
g1 = numpy.zeros((nci,ncj,3,icart,jcart), dtype=numpy.complex128)
for lc in range(5): # up to g function
ecpbasi = ecpbas[ecpbas[:,ANG_OF] == lc]
if len(ecpbasi) == 0:
continue
ur = rad_part(mol, ecpbasi, rs) * ws
idx = abs(ur) > 1e-80
rur = numpy.array([ur[idx] * rs[idx]**lab for lab in range(li+lj+1)])
fi = facs_rad(mol, ish, lc, r_ca, rs)[:,:,idx].copy()
fj = facs_rad(mol, jsh, lc, r_cb, rs)[:,:,idx].copy()
angi = facs_ang(type2_ang_part(li, lc, -rca), li, lc, i_fac_cache)
angj = facs_ang(type2_ang_part(lj, lc, -rcb), lj, lc, j_fac_cache)
# Note the factor 2/(2l+1) in JCP 82, 2664 (1985); DOI:10.1063/1.448263 is not multiplied here
# because the ECP parameter has been scaled by 2/(2l+1) in CRENBL
jmm = angular_moment_matrix(lc)
for ic in range(nci):
for jc in range(ncj):
rad_all = numpy.einsum('pr,ir,jr->pij', rur, fi[ic], fj[jc])
for i1 in range(li+1):
for j1 in range(lj+1):
g1[ic,jc] += numpy.einsum('pq,imp,jnq,lmn->lij', rad_all[i1+j1],
angi[i1], angj[j1], jmm)
g1 *= (numpy.pi*4)**2
gspinor = numpy.empty((nci,ncj,li*4+2,lj*4+2), dtype=numpy.complex128)
for ic in range(nci):
for jc in range(ncj):
ui = numpy.asarray(gto.cart2spinor_l(li))
uj = numpy.asarray(gto.cart2spinor_l(lj))
s = lib.PauliMatrices * .5j
gspinor[ic,jc] = numpy.einsum('sxy,spq,xpi,yqj->ij', s,
g1[ic,jc], ui.conj(), uj)
return gspinor.transpose(0,2,1,3).reshape(nci*(li*4+2),-1)
def cache_fac(l, r):
facs = numpy.empty((3,l+1,l+1))
for i in range(l+1):
for j in range(i+1):
facs[0,i,j] = scipy.special.binom(i,j) * r[0]**(i-j)
facs[1,i,j] = scipy.special.binom(i,j) * r[1]**(i-j)
facs[2,i,j] = scipy.special.binom(i,j) * r[2]**(i-j)
return facs
def sph_in(l, xs):
'''Modified spherical Bessel function of the first kind'''
return numpy.asarray([scipy.special.spherical_in(numpy.arange(l+1), x) for x in xs]).T
def sph_ine(l, xs):
'''exponentially scaled modified spherical Bessel function'''
bval = sph_in(l, xs)
return numpy.einsum('ij,j->ij', bval, numpy.exp(-xs))
def loop_xyz(nx, ny, nz):
for ix in range(nx+1):
for iy in range(ny+1):
for iz in range(nz+1):
yield ix, iy, iz
def loop_cart(l):
for ix in reversed(range(l+1)):
for iy in reversed(range(l-ix+1)):
iz = l - ix - iy
yield ix, iy, iz
def rad_part(mol, ecpbas, rs):
ur = numpy.zeros_like(rs)
for ecpsh in ecpbas:
npk = ecpsh[NPRIM_OF]
r_order = ecpsh[RADI_POWER]
ak = mol._env[ecpsh[PTR_EXP]:ecpsh[PTR_EXP]+npk]
ck = mol._env[ecpsh[PTR_COEFF]:ecpsh[PTR_COEFF]+npk]
u1 = numpy.zeros_like(ur)
for kp, a1 in enumerate(ak):
u1 += ck[kp] * numpy.exp(-a1*rs**2)
u1 *= rs**r_order
ur += u1
return ur
def facs_rad(mol, ish, lc, r_ca, rs):
facs = []
li = mol.bas_angular(ish)
ai = mol.bas_exp(ish)
ci = mol._libcint_ctr_coeff(ish)
npi = mol.bas_nprim(ish)
for ip in range(npi):
ka = 2*ai[ip]*r_ca
facs.append(numpy.einsum('ij,j->ij', sph_ine(li+lc, ka*rs),
numpy.exp(-ai[ip]*(rs-r_ca)**2)))
facs = numpy.einsum('pk,pij->kij', ci, facs)
return facs
# x**n*y**n*z**n * c2s * c2s.T, to project out 3s, 4p, ...
def type1_ang_part(lmax, rij):
norm_rij = numpy.linalg.norm(rij)
if norm_rij > 1e-18:
unitr = rij/norm_rij
else:
unitr = rij
omega_nuc = []
for lmb in range(lmax+1):
c2smat = c2s_bra(lmb, numpy.eye((lmb+1)*(lmb+2)//2))
omega_nuc.append(4*numpy.pi * numpy.dot(ang_nuc_part(lmb, unitr), c2smat))
omega = numpy.empty((lmax+1,lmax+1,lmax+1,lmax+1))
for lmb in range(lmax+1):
omega_elec = numpy.empty((lmb+1)*(lmb+2)//2)
for i in range(lmax+1):
for j in range(lmax+1-i):
for k in range(lmax+1-i-j):
if (i+j+k+lmb) % 2 == 0:
for n, (i1, j1, k1) in enumerate(loop_cart(lmb)):
omega_elec[n] = int_unit_xyz(i+i1, j+j1, k+k1)
omega[lmb,i,j,k] = numpy.dot(omega_nuc[lmb], omega_elec)
else:
omega[lmb,i,j,k] = 0
return omega
def type2_ang_part(li, lc, ri):
# [lambda,m,a,b,c]
norm_ri = numpy.linalg.norm(ri)
if norm_ri > 1e-18:
unitr = ri/norm_ri
else:
unitr = ri
omega = numpy.empty((li+1,li+1,li+1,lc*2+1,li+lc+1))
lcart = (lc+1)*(lc+2)//2
omega_nuc = []
for lmb in range(li+lc+1):
c2smat = c2s_bra(lmb, numpy.eye((lmb+1)*(lmb+2)//2))
omega_nuc.append(4*numpy.pi * numpy.dot(ang_nuc_part(lmb, unitr), c2smat))
tmp = numpy.empty((lcart,li+lc+1))
for a in range(li+1):
for b in range(li+1-a):
for c in range(li+1-a-b):
for lmb in range(li+lc+1):
if (lc+a+b+c+lmb) % 2 == 0:
omega_xyz = numpy.empty((lcart, (lmb+1)*(lmb+2)//2))
for m,(u,v,w) in enumerate(loop_cart(lc)):
for n, (i1, j1, k1) in enumerate(loop_cart(lmb)):
omega_xyz[m,n] = int_unit_xyz(a+u+i1, b+v+j1, c+w+k1)
tmp[:,lmb] = numpy.dot(omega_xyz, omega_nuc[lmb])
else:
tmp[:,lmb] = 0
omega[a,b,c,:,:] = c2s_bra(lc, tmp)
return omega
def angular_moment_matrix(l):
'''Matrix of angular moment operator l*1j on the real spherical harmonic
basis'''
lz = numpy.diag(numpy.arange(-l, l+1, dtype=numpy.complex128))
lx = numpy.zeros_like(lz)
ly = numpy.zeros_like(lz)
for mi in range(-l, l+1):
mj = mi + 1
if mj <= l:
lx[l+mi,l+mj] = .5 * ((l+mj)*(l-mj+1))**.5
ly[l+mi,l+mj] = .5j * ((l+mj)*(l-mj+1))**.5
mj = mi - 1
if mj >= -l:
lx[l+mi,l+mj] = .5 * ((l-mj)*(l+mj+1))**.5
ly[l+mi,l+mj] =-.5j * ((l-mj)*(l+mj+1))**.5
u = sph.sph_pure2real(l)
lx = u.conj().T.dot(lx).dot(u)
ly = u.conj().T.dot(ly).dot(u)
lz = u.conj().T.dot(lz).dot(u)
return numpy.array((lx, ly, lz))
def facs_ang(omega, l, lc, fac_cache):
# (a+b+c,cart_nlm, m, lambda )
facs = numpy.zeros((l+1,(l+1)*(l+2)//2,lc*2+1,l+lc+1))
for mi,(ix,iy,iz) in enumerate(loop_cart(l)):
for i1, i2, i3 in loop_xyz(ix, iy, iz):
fac = fac_cache[0,ix,i1] * fac_cache[1,iy,i2] * fac_cache[2,iz,i3]
facs[i1+i2+i3,mi,:,:] += fac * omega[i1,i2,i3]
return facs
def ang_nuc_part(l, rij):
omega_xyz = numpy.empty((l+1)*(l+2)//2)
k = 0
for i1 in reversed(range(l+1)):
for j1 in reversed(range(l-i1+1)):
k1 = l - i1 - j1
omega_xyz[k] = rij[0]**i1 * rij[1]**j1 * rij[2]**k1
k += 1
if l == 0:
return omega_xyz * 0.282094791773878143
elif l == 1:
return omega_xyz * 0.488602511902919921
else:
omega = numpy.empty((2*l+1))
fc2s = libecp.CINTc2s_ket_sph
fc2s(omega.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(1),
omega_xyz.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(l))
return omega
def int_unit_xyz(i, j, k):
if i % 2 or j % 2 or k % 2:
return 0
else:
return (_fac2[i-1] * _fac2[j-1] * _fac2[k-1] / _fac2[i+j+k+1])
_fac2 = factorial2(numpy.arange(80))
_fac2[-1] = 1
def c2s_bra(l, gcart):
if l == 0:
return gcart * 0.282094791773878143
elif l == 1:
return gcart * 0.488602511902919921
else:
m = gcart.shape[1]
gsph = numpy.empty((l*2+1,m))
fc2s = libecp.CINTc2s_ket_sph
fc2s(gsph.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(m),
gcart.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(l))
return gsph
class KnownValues(unittest.TestCase):
def test_bessel(self):
rs = radi.gauss_chebyshev(99)[0]
bessel1 = numpy.empty(8)
for i,x in enumerate(rs):
bessel0 = scipy.special.spherical_in(numpy.arange(7+1), x) * numpy.exp(-x)
libecp.ECPsph_ine(bessel1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(7), ctypes.c_double(x))
self.assertTrue(numpy.allclose(bessel0, bessel1))
def test_gauss_chebyshev(self):
rs0, ws0 = radi.gauss_chebyshev(99)
rs = numpy.empty_like(rs0)
ws = numpy.empty_like(ws0)
libecp.ECPgauss_chebyshev(rs.ctypes.data_as(ctypes.c_void_p),
ws.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(99))
self.assertTrue(numpy.allclose(rs0, rs))
self.assertTrue(numpy.allclose(ws0, ws))
def test_rad_part(self):
rs, ws = radi.gauss_chebyshev(99)
ur0 = rad_part(mol, mol._ecpbas, rs)
ur1 = numpy.empty_like(ur0)
cache = numpy.empty(100000)
libecp.ECPrad_part(ur1.ctypes.data_as(ctypes.c_void_p),
rs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(0), ctypes.c_int(len(rs)), ctypes.c_int(1),
(ctypes.c_int*2)(0, len(mol._ecpbas)),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p),
lib.c_null_ptr(), cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(ur0, ur1))
def test_type2_ang_part(self):
numpy.random.seed(3)
rca = numpy.random.random(3)
cache = numpy.empty(100000)
def type2_facs_ang(li, lc):
i_fac_cache = cache_fac(li, rca)
facs0 = facs_ang(type2_ang_part(li, lc, -rca), li, lc, i_fac_cache)
facs1 = numpy.empty_like(facs0)
libecp.type2_facs_ang(facs1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(li), ctypes.c_int(lc),
rca.ctypes.data_as(ctypes.c_void_p),
cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(facs0, facs1))
for li in range(6):
for lc in range(5):
type2_facs_ang(li, lc)
def test_type2_rad_part(self):
rc = .8712
rs, ws = radi.gauss_chebyshev(99)
cache = numpy.empty(100000)
def type2_facs_rad(ish, lc):
facs0 = facs_rad(mol, ish, lc, rc, rs).transpose(0,2,1).copy()
facs1 = numpy.empty_like(facs0)
libecp.type2_facs_rad(facs1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(ish), ctypes.c_int(lc),
ctypes.c_double(rc),
rs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(rs)), ctypes.c_int(1),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p),
cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(facs0, facs1))
for ish in range(mol.nbas):
for lc in range(5):
type2_facs_rad(ish, lc)
def test_type2(self):
cache = numpy.empty(100000)
def gen_type2(shls):
di = (mol.bas_angular(shls[0])*2+1) * mol.bas_nctr(shls[0])
dj = (mol.bas_angular(shls[1])*2+1) * mol.bas_nctr(shls[1])
mat0 = numpy.zeros((di,dj))
for ia in range(mol.natm):
ecpbas = mol._ecpbas[mol._ecpbas[:,ATOM_OF] == ia]
if len(ecpbas) == 0:
continue
mat0 += type2_by_shell(mol, shls, ia, ecpbas)
mat1 = numpy.empty(mat0.shape, order='F')
libecp.ECPtype2_sph(mat1.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(*shls),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(mol._ecpbas)),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p),
lib.c_null_ptr(), cache.ctypes.data_as(ctypes.c_void_p))
if not numpy.allclose(mat0, mat1, atol=1e-8):
print(i, j, 'error = ', numpy.linalg.norm(mat0-mat1))
self.assertTrue(numpy.allclose(mat0, mat1, atol=1e-6))
mat2 = gto.ecp.type2_by_shell(mol, shls)
self.assertTrue(numpy.allclose(mat0, mat2, atol=1e-6))
for i in range(mol.nbas):
for j in range(mol.nbas):
gen_type2((i,j))
def test_type1_state_fac(self):
numpy.random.seed(3)
ri = numpy.random.random(3) - .5
cache = numpy.empty(100000)
def tfacs(li):
facs0 = type1_cache_fac(li, ri)
facs1 = numpy.zeros_like(facs0)
libecp.type1_static_facs(facs1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(li),
ri.ctypes.data_as(ctypes.c_void_p),
cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(facs0, facs1))
for l in range(6):
tfacs(l)
def test_type1_rad_ang(self):
numpy.random.seed(4)
ri = numpy.random.random(3) - .5
def tfacs(lmax):
rad_all = numpy.random.random((lmax+1,lmax+1))
rad_ang0 = type1_rad_ang(lmax, ri, rad_all)
rad_ang1 = numpy.empty_like(rad_ang0)
libecp.type1_rad_ang(rad_ang1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(lmax),
ri.ctypes.data_as(ctypes.c_void_p),
rad_all.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(rad_ang0, rad_ang1))
for l in range(13):
tfacs(l)
def test_type1_rad(self):
k = 1.621
aij = .792
rs, ws = radi.gauss_chebyshev(99)
ur = rad_part(mol, mol._ecpbas, rs) * ws
cache = numpy.empty(100000)
def gen_type1_rad(li):
rad_all0 = type1_rad_part(li, k, aij, ur, rs)
rad_all1 = numpy.zeros_like(rad_all0)
libecp.type1_rad_part(rad_all1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(li),
ctypes.c_double(k), ctypes.c_double(aij),
ur.ctypes.data_as(ctypes.c_void_p),
rs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(rs)), ctypes.c_int(1),
cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(rad_all0, rad_all1))
for l in range(13):
gen_type1_rad(l)
def test_type1(self):
def gen_type1(shls):
di = (mol.bas_angular(shls[0])*2+1) * mol.bas_nctr(shls[0])
dj = (mol.bas_angular(shls[1])*2+1) * mol.bas_nctr(shls[1])
mat0 = numpy.zeros((di,dj))
for ia in range(mol.natm):
ecpbas = mol._ecpbas[mol._ecpbas[:,ATOM_OF] == ia]
if len(ecpbas) == 0:
continue
ecpbas0 = ecpbas[ecpbas[:,ANG_OF] < 0]
if len(ecpbas0) == 0:
continue
mat0 += type1_by_shell(mol, shls, ia, ecpbas0)
mat1 = numpy.empty(mat0.shape, order='F')
cache = numpy.empty(100000)
libecp.ECPtype1_sph(mat1.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(*shls),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(mol._ecpbas)),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p),
lib.c_null_ptr(), cache.ctypes.data_as(ctypes.c_void_p))
if not numpy.allclose(mat0, mat1, atol=1e-8):
print(i, j, numpy.linalg.norm(mat0-mat1))
self.assertTrue(numpy.allclose(mat0, mat1, atol=1e-6))
mat2 = gto.ecp.type1_by_shell(mol, shls)
self.assertTrue(numpy.allclose(mat0, mat2, atol=1e-6))
for i in range(mol.nbas):
for j in range(mol.nbas):
gen_type1((i,j))
def test_so_1atom(self):
mol = gto.M(atom='''
Na 0.5 0.5 0.
''',
charge=1,
basis={'Na': [(0, (1, 1)), (1, (4, 1)), (1, (1, 1)), (2, (1, 1))]},
ecp = {'Na': gto.basis.parse_ecp('''
Na nelec 8
Na ul
1 0. -3. -3.
Na S
1 0. -3. -3.
Na P
1 0. -3. -3.
Na D
1 0. -3. -3.
Na F
1 0. -3. -3.
''')})
def gen_so(shls):
mat0 = 0
for ia in range(mol.natm):
ecpbas = mol._ecpbas[(mol._ecpbas[:,ATOM_OF]==ia) &
(mol._ecpbas[:,SO_TYPE_OF]==1)]
if len(ecpbas) == 0:
continue
mat0 += so_by_shell(mol, shls, ia, ecpbas)
s = lib.PauliMatrices * .5
ui = numpy.asarray(gto.sph2spinor_l(mol.bas_angular(shls[0])))
uj = numpy.asarray(gto.sph2spinor_l(mol.bas_angular(shls[1])))
ref = numpy.einsum('sxy,spq,xpi,yqj->ij', s,
mol.intor_by_shell('int1e_inuc_rxp', shls),
ui.conj(), uj)
self.assertAlmostEqual(abs(ref-mat0).max(), 0, 12)
mat2 = .5 * gto.ecp.so_by_shell(mol, shls)
self.assertTrue(numpy.allclose(ref, mat2, atol=1e-6))
for i in range(mol.nbas):
for j in range(mol.nbas):
gen_so((i,j))
if __name__ == '__main__':
print('Full Tests for ecp')
unittest.main()
| sunqm/pyscf | pyscf/lib/gto/test/test_ecp.py | Python | apache-2.0 | 28,317 | [
"PySCF"
] | 8205c2d0ece406abfd92879c1dde6e1d63f9daf8a9f6d0e208a8bc6156d8177b |
# -*- coding: utf-8 -*-
#
# brunel_alpha_evolution_strategies.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''Using evolution strategies to find parameters for a random
balanced network with alpha synapses
----------------------------------------------------------------
This script uses an optimization algorithm to find the appropriate
parameter values for the external drive "eta" and the relative ratio
of excitation and inhibition "g" for a balanced random network that
lead to particular population-averaged rates, coefficients of
variation and correlations.
From an initial Gaussian search distribution parameterized with mean
and standard deviation network parameters are sampled. Network
realizations of these parameters are simulated and evaluated according
to an objective function that measures how close the activity
statistics are to their desired values (~fitness). From these fitness
values the approximate natural gradient of the fitness landscape is
computed and used to update the parameters of the search
distribution. This procedure is repeated until the maximal number of
function evaluations is reached or the width of the search
distribution becomes extremely small. We use the following fitness
function:
f = - alpha(r - r*)^2 - beta(cv - cv*)^2 - gamma(corr - corr*)^2
where alpha, beta and gamma are weighting factors, and stars indicate
target values.
The network contains an excitatory and an inhibitory population on
the basis of the network used in
Brunel N (2000). Dynamics of Sparsely Connected Networks of Excitatory
and Inhibitory Spiking Neurons. Journal of Computational Neuroscience
8, 183-208.
The optimization algorithm (evolution strategies) is described in
Wierstra et al. (2014). Natural evolution strategies. Journal of
Machine Learning Research, 15(1), 949-980.
Author: Jakob Jordan
Year: 2018
See Also: brunel_alpha_nest.py
'''
from __future__ import print_function
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
import nest
from numpy import exp
'''
Analysis
'''
def cut_warmup_time(spikes, warmup_time):
'''Removes initial warmup time from recorded spikes'''
spikes['senders'] = spikes['senders'][
spikes['times'] > warmup_time]
spikes['times'] = spikes['times'][
spikes['times'] > warmup_time]
return spikes
def compute_rate(spikes, N_rec, sim_time):
'''Computes average rate from recorded spikes'''
return (1. * len(spikes['times']) / N_rec / sim_time * 1e3)
def sort_spikes(spikes):
'''Sorts recorded spikes by gid'''
unique_gids = sorted(np.unique(spikes['senders']))
spiketrains = []
for gid in unique_gids:
spiketrains.append(spikes['times'][spikes['senders'] == gid])
return unique_gids, spiketrains
def compute_cv(spiketrains):
'''Computes coefficient of variation from sorted spikes'''
if spiketrains:
isis = np.hstack([np.diff(st) for st in spiketrains])
if len(isis) > 1:
return np.std(isis) / np.mean(isis)
else:
return 0.
else:
return 0.
def bin_spiketrains(spiketrains, t_min, t_max, t_bin):
'''Bins sorted spikes'''
bins = np.arange(t_min, t_max, t_bin)
return bins, [np.histogram(s, bins=bins)[0] for s in spiketrains]
def compute_correlations(binned_spiketrains):
'''Computes correlations from binned spiketrains'''
n = len(binned_spiketrains)
if n > 1:
cc = np.corrcoef(binned_spiketrains)
return 1. / (n * (n - 1.)) * (np.sum(cc) - n)
else:
return 0.
def compute_statistics(parameters, espikes, ispikes):
'''Computes population-averaged rates coefficients of variation and
correlations from recorded spikes of excitatory and inhibitory
populations
'''
espikes = cut_warmup_time(espikes, parameters['warmup_time'])
ispikes = cut_warmup_time(ispikes, parameters['warmup_time'])
erate = compute_rate(espikes, parameters['N_rec'], parameters['sim_time'])
irate = compute_rate(espikes, parameters['N_rec'], parameters['sim_time'])
egids, espiketrains = sort_spikes(espikes)
igids, ispiketrains = sort_spikes(ispikes)
ecv = compute_cv(espiketrains)
icv = compute_cv(ispiketrains)
ecorr = compute_correlations(
bin_spiketrains(espiketrains, 0., parameters['sim_time'], 1.)[1])
icorr = compute_correlations(
bin_spiketrains(ispiketrains, 0., parameters['sim_time'], 1.)[1])
return (np.mean([erate, irate]),
np.mean([ecv, icv]),
np.mean([ecorr, icorr]))
'''
Network simulation
'''
def simulate(parameters):
'''Simulates the network and returns recorded spikes for excitatory
and inhibitory population
Code taken from brunel_alpha_nest.py
'''
def LambertWm1(x):
nest.sli_push(x)
nest.sli_run('LambertWm1')
y = nest.sli_pop()
return y
def ComputePSPnorm(tauMem, CMem, tauSyn):
a = (tauMem / tauSyn)
b = (1.0 / tauSyn - 1.0 / tauMem)
# time of maximum
t_max = 1.0 / b * (-LambertWm1(-exp(-1.0 / a) / a) - 1.0 / a)
# maximum of PSP for current of unit amplitude
return (exp(1.0) / (tauSyn * CMem * b) *
((exp(-t_max / tauMem) - exp(-t_max / tauSyn)) / b -
t_max * exp(-t_max / tauSyn)))
# number of excitatory neurons
NE = int(parameters['gamma'] * parameters['N'])
# number of inhibitory neurons
NI = parameters['N'] - NE
# number of excitatory synapses per neuron
CE = int(parameters['epsilon'] * NE)
# number of inhibitory synapses per neuron
CI = int(parameters['epsilon'] * NI)
tauSyn = 0.5 # synaptic time constant in ms
tauMem = 20.0 # time constant of membrane potential in ms
CMem = 250.0 # capacitance of membrane in in pF
theta = 20.0 # membrane threshold potential in mV
neuron_parameters = {
'C_m': CMem,
'tau_m': tauMem,
'tau_syn_ex': tauSyn,
'tau_syn_in': tauSyn,
't_ref': 2.0,
'E_L': 0.0,
'V_reset': 0.0,
'V_m': 0.0,
'V_th': theta
}
J = 0.1 # postsynaptic amplitude in mV
J_unit = ComputePSPnorm(tauMem, CMem, tauSyn)
J_ex = J / J_unit # amplitude of excitatory postsynaptic current
# amplitude of inhibitory postsynaptic current
J_in = -parameters['g'] * J_ex
nu_th = (theta * CMem) / (J_ex * CE * exp(1) * tauMem * tauSyn)
nu_ex = parameters['eta'] * nu_th
p_rate = 1000.0 * nu_ex * CE
nest.ResetKernel()
nest.set_verbosity('M_FATAL')
nest.SetKernelStatus({'rng_seeds': [parameters['seed']],
'resolution': parameters['dt']})
nest.SetDefaults('iaf_psc_alpha', neuron_parameters)
nest.SetDefaults('poisson_generator', {'rate': p_rate})
nodes_ex = nest.Create('iaf_psc_alpha', NE)
nodes_in = nest.Create('iaf_psc_alpha', NI)
noise = nest.Create('poisson_generator')
espikes = nest.Create('spike_detector')
ispikes = nest.Create('spike_detector')
nest.SetStatus(espikes, [{'label': 'brunel-py-ex',
'withtime': True,
'withgid': True,
'to_file': False}])
nest.SetStatus(ispikes, [{'label': 'brunel-py-in',
'withtime': True,
'withgid': True,
'to_file': False}])
nest.CopyModel('static_synapse', 'excitatory',
{'weight': J_ex, 'delay': parameters['delay']})
nest.CopyModel('static_synapse', 'inhibitory',
{'weight': J_in, 'delay': parameters['delay']})
nest.Connect(noise, nodes_ex, syn_spec='excitatory')
nest.Connect(noise, nodes_in, syn_spec='excitatory')
if parameters['N_rec'] > NE:
raise ValueError(
'Requested recording from {} neurons, \
but only {} in excitatory population'.format(
parameters['N_rec'], NE))
if parameters['N_rec'] > NI:
raise ValueError(
'Requested recording from {} neurons, \
but only {} in inhibitory population'.format(
parameters['N_rec'], NI))
nest.Connect(nodes_ex[:parameters['N_rec']], espikes)
nest.Connect(nodes_in[:parameters['N_rec']], ispikes)
conn_parameters_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(
nodes_ex, nodes_ex + nodes_in, conn_parameters_ex, 'excitatory')
conn_parameters_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(
nodes_in, nodes_ex + nodes_in, conn_parameters_in, 'inhibitory')
nest.Simulate(parameters['sim_time'])
return (nest.GetStatus(espikes, 'events')[0],
nest.GetStatus(ispikes, 'events')[0])
'''
Optimization
'''
def default_population_size(dimensions):
'''Returns a population size suited for the given number of dimensions
See Wierstra et al. (2014)
'''
return 4 + int(np.floor(3 * np.log(dimensions)))
def default_learning_rate_mu():
'''Returns a default learning rate for the mean of the search
distribution
See Wierstra et al. (2014)
'''
return 1
def default_learning_rate_sigma(dimensions):
'''Returns a default learning rate for the standard deviation of the
search distribution for the given number of dimensions
See Wierstra et al. (2014)
'''
return (3 + np.log(dimensions)) / (12. * np.sqrt(dimensions))
def compute_utility(fitness):
'''Computes utility and order used for fitness shaping
See Wierstra et al. (2014)
'''
n = len(fitness)
order = np.argsort(fitness)[::-1]
fitness = fitness[order]
utility = [
np.max([0, np.log((n / 2) + 1)]) - np.log(k + 1) for k in range(n)]
utility = utility / np.sum(utility) - 1. / n
return order, utility
def optimize(func, mu, sigma, learning_rate_mu=None, learning_rate_sigma=None,
population_size=None, fitness_shaping=True,
mirrored_sampling=True, record_history=False,
max_generations=2000, min_sigma=1e-8, verbosity=0):
'''Optimizes an objective function via evolution strategies using
the natural gradient of multinormal search distributions in
natural coordinates. Does not consider covariances between
parameters ("Separable natural evolution strategies").
See Wierstra et al. (2014)
Parameters
----------
func: function
The function to be maximized.
mu: float
Initial mean of the search distribution.
sigma: float
Initial standard deviation of the search distribution.
learning_rate_mu: float
Learning rate of mu.
learning_rate_sigma: float
Learning rate of sigma.
population_size: int
Number of individuals sampled in each generation.
fitness_shaping: bool
Whether to use fitness shaping, compensating for large
deviations in fitness, see Wierstra et al. (2014).
mirrored_sampling: bool
Whether to use mirrored sampling, i.e., evaluating a mirrored
sample for each sample, see Wierstra et al. (2014).
record_history: bool
Whether to record history of search distribution parameters,
fitness values and individuals.
max_generations: int
Maximal number of generations.
min_sigma: float
Minimal value for standard deviation of search
distribution. If any dimension has a value smaller than this,
the search is stoppped.
verbosity: bool
Whether to continuously print progress information.
Returns
-------
dict
Dictionary of final parameters of search distribution and
history.
'''
if not isinstance(mu, np.ndarray):
raise TypeError('mu needs to be of type np.ndarray')
if not isinstance(sigma, np.ndarray):
raise TypeError('sigma needs to be of type np.ndarray')
if learning_rate_mu is None:
learning_rate_mu = default_learning_rate_mu()
if learning_rate_sigma is None:
learning_rate_sigma = default_learning_rate_sigma(mu.size)
if population_size is None:
population_size = default_population_size(mu.size)
generation = 0
mu_history = []
sigma_history = []
pop_history = []
fitness_history = []
while True:
# create new population using the search distribution
s = np.random.normal(0, 1, size=(population_size,) + np.shape(mu))
z = mu + sigma * s
# add mirrored perturbations if enabled
if mirrored_sampling:
z = np.vstack([z, mu - sigma * s])
s = np.vstack([s, -s])
# evaluate fitness for every individual in population
fitness = np.fromiter((func(*zi) for zi in z), np.float)
# print status if enabled
if verbosity > 0:
print(
'# Generation {:d} | fitness {:.3f} | mu {} | sigma {}'.format(
generation, np.mean(fitness),
', '.join(str(np.round(mu_i, 3)) for mu_i in mu),
', '.join(str(np.round(sigma_i, 3)) for sigma_i in sigma)
))
# apply fitness shaping if enabled
if fitness_shaping:
order, utility = compute_utility(fitness)
s = s[order]
z = z[order]
else:
utility = fitness
# bookkeeping
if record_history:
mu_history.append(mu.copy())
sigma_history.append(sigma.copy())
pop_history.append(z.copy())
fitness_history.append(fitness)
# exit if max generations reached or search distributions are
# very narrow
if generation == max_generations or np.all(sigma < min_sigma):
break
# update parameter of search distribution via natural gradient
# descent in natural coordinates
mu += learning_rate_mu * sigma * np.dot(utility, s)
sigma *= np.exp(learning_rate_sigma / 2. * np.dot(utility, s**2 - 1))
generation += 1
return {
'mu': mu,
'sigma': sigma,
'fitness_history': np.array(fitness_history),
'mu_history': np.array(mu_history),
'sigma_history': np.array(sigma_history),
'pop_history': np.array(pop_history)
}
def optimize_network(optimization_parameters, simulation_parameters):
'''Searches for suitable network parameters to fulfill defined
constraints
'''
np.random.seed(simulation_parameters['seed'])
def objective_function(g, eta):
'''Returns the fitness of a specific network parametrization'''
# create local copy of parameters that uses parameters given
# by optimization algorithm
simulation_parameters_local = simulation_parameters.copy()
simulation_parameters_local['g'] = g
simulation_parameters_local['eta'] = eta
# perform the network simulation
espikes, ispikes = simulate(simulation_parameters_local)
# analyse the result and compute fitness
rate, cv, corr = compute_statistics(
simulation_parameters, espikes, ispikes)
fitness = \
- optimization_parameters['fitness_weight_rate'] * (
rate - optimization_parameters['target_rate']) ** 2 \
- optimization_parameters['fitness_weight_cv'] * (
cv - optimization_parameters['target_cv']) ** 2 \
- optimization_parameters['fitness_weight_corr'] * (
corr - optimization_parameters['target_corr']) ** 2
return fitness
return optimize(
objective_function,
np.array(optimization_parameters['mu']),
np.array(optimization_parameters['sigma']),
max_generations=optimization_parameters['max_generations'],
record_history=True,
verbosity=optimization_parameters['verbosity']
)
'''
Main
'''
if __name__ == '__main__':
simulation_parameters = {
'seed': 123,
'dt': 0.1, # (ms) simulation resolution
'sim_time': 1000., # (ms) simulation duration
'warmup_time': 300., # (ms) duration ignored during analysis
'delay': 1.5, # (ms) synaptic delay
'g': None, # relative ratio of excitation and inhibition
'eta': None, # relative strength of external drive
'epsilon': 0.1, # average connectivity of network
'N': 400, # number of neurons in network
'gamma': 0.8, # relative size of excitatory and
# inhibitory population
'N_rec': 40, # number of neurons to record activity from
}
optimization_parameters = {
'verbosity': 1, # print progress over generations
'max_generations': 20, # maximal number of generations
'target_rate': 1.89, # (spikes/s) target rate
'target_corr': 0.0, # target correlation
'target_cv': 1., # target coefficient of variation
'mu': [1., 3.], # initial mean for search distribution
# (mu(g), mu(eta))
'sigma': [0.15, 0.05], # initial sigma for search
# distribution (sigma(g), sigma(eta))
# hyperparameters of the fitness function; these are used to
# compensate for the different typical scales of the
# individual measures, rate ~ O(1), cv ~ (0.1), corr ~ O(0.01)
'fitness_weight_rate': 1., # relative weight of rate deviation
'fitness_weight_cv': 10., # relative weight of cv deviation
'fitness_weight_corr': 100., # relative weight of corr deviation
}
# optimize network parameters
optimization_result = optimize_network(optimization_parameters,
simulation_parameters)
simulation_parameters['g'] = optimization_result['mu'][0]
simulation_parameters['eta'] = optimization_result['mu'][1]
espikes, ispikes = simulate(simulation_parameters)
rate, cv, corr = compute_statistics(
simulation_parameters, espikes, ispikes)
print('Statistics after optimization:', end=' ')
print('Rate: {:.3f}, cv: {:.3f}, correlation: {:.3f}'.format(
rate, cv, corr))
# plot results
fig = plt.figure(figsize=(10, 4))
ax1 = fig.add_axes([0.06, 0.12, 0.25, 0.8])
ax2 = fig.add_axes([0.4, 0.12, 0.25, 0.8])
ax3 = fig.add_axes([0.74, 0.12, 0.25, 0.8])
ax1.set_xlabel('Time (ms)')
ax1.set_ylabel('Neuron id')
ax2.set_xlabel(r'Relative strength of inhibition $g$')
ax2.set_ylabel(r'Relative strength of external drive $\eta$')
ax3.set_xlabel('Generation')
ax3.set_ylabel('Fitness')
# raster plot
ax1.plot(espikes['times'], espikes['senders'], ls='', marker='.')
# search distributions and individuals
for mu, sigma in zip(optimization_result['mu_history'],
optimization_result['sigma_history']):
ellipse = Ellipse(
xy=mu, width=2 * sigma[0], height=2 * sigma[1], alpha=0.5, fc='k')
ellipse.set_clip_box(ax2.bbox)
ax2.add_artist(ellipse)
ax2.plot(optimization_result['mu_history'][:, 0],
optimization_result['mu_history'][:, 1],
marker='.', color='k', alpha=0.5)
for generation in optimization_result['pop_history']:
ax2.scatter(generation[:, 0], generation[:, 1])
# fitness over generations
ax3.errorbar(np.arange(len(optimization_result['fitness_history'])),
np.mean(optimization_result['fitness_history'], axis=1),
yerr=np.std(optimization_result['fitness_history'], axis=1))
fig.savefig('brunel_alpha_evolution_strategies.pdf')
| apeyser/nest-simulator | pynest/examples/brunel_alpha_evolution_strategies.py | Python | gpl-2.0 | 20,542 | [
"Gaussian",
"NEURON"
] | 0cd8edf91811c9a102025e2fae85115c9b64d46865e17794bace13a9c1b865de |
#=== CONTROLS ========================================================================================
# Native GUI controls.
# Authors: Tom De Smedt, Frederik De Bleser
# License: BSD (see LICENSE.txt for details).
# Copyright (c) 2008 City In A Bottle (cityinabottle.org)
# http://cityinabottle.org/nodebox
import os
from glob import glob
from time import time
from pyglet.text.layout import IncrementalTextLayout
from pyglet.text.caret import Caret
from nodebox.graphics.geometry import angle, distance, clamp, Bounds
from nodebox.graphics import \
Layer, Color, Image, image, crop, rect, \
Text, font, NORMAL, BOLD, CENTER, DEFAULT_FONT, install_font, \
translate, rotate, \
line, DASHED, DOTTED, \
DEFAULT, HAND, TEXT, \
LEFT, RIGHT, UP, DOWN, TAB, ENTER, BACKSPACE, CTRL, SHIFT, ALT
def popdefault(dict, key, default=None):
""" Pops the given key from the dictionary and returns its value (or default).
"""
if key in dict:
return dict.pop(key)
return default
def find(match=lambda item: False, list=[]):
""" Returns the first item in the list for which match(item)=True, or None.
"""
for item in list:
if match(item): return item
#=====================================================================================================
#--- Theme -------------------------------------------------------------------------------------------
class Theme(dict):
def __init__(self, path, **kwargs):
""" A theme defines the source images for controls and font settings for labels.
A theme is loaded from a given folder path (containing PNG images and TTF font files).
The default theme is in nodebox/graphics/gui/theme/
Copy this folder and modify it to create a custom theme.
"""
images = glob(os.path.join(path, "*.png"))
images = [(os.path.basename(os.path.splitext(f)[0]), f) for f in images]
fonts = glob(os.path.join(path, "*.ttf"))
fonts = [(os.path.basename(os.path.splitext(f)[0]), install_font(f)) for f in fonts]
fonts = [f[0] for f in fonts if f[1]] # Filename is assumed to be fontname.
dict.__init__(self, images)
self["fonts"] = fonts
self["fontname"] = kwargs.get("fontname", fonts and fonts[-1] or DEFAULT_FONT)
self["fontsize"] = kwargs.get("fontsize", 10)
self["fontweight"] = kwargs.get("fontweight", NORMAL)
self["text"] = kwargs.get("text", Color(1.0))
theme = Theme(os.path.join(os.path.dirname(__file__), "theme"))
#=====================================================================================================
#--- Control -----------------------------------------------------------------------------------------
class Control(Layer):
def __init__(self, x=0, y=0, id=None, **kwargs):
""" Base class for GUI controls.
The Control class inherits from Layer so it must be appended to the canvas (or a container)
to receive events and get drawn.
An id can be given to uniquely identify the control.
If the control is part of a Panel, it can be retrieved with Panel.control_id.
"""
Layer.__init__(self, x=x, y=y, **kwargs)
self.id = id
self.src = {} # Collection of source images.
self.enabled = True # Enable event listener.
self.duration = 0 # Disable tweening.
self._controls = {} # Lazy index of (id, control) children, see nested().
self._press = None
# Control width and height can't be modified after creation.
# Internally, use Layer._set_width() and Layer._set_height().
@property
def width(self):
return self._get_width()
@property
def height(self):
return self._get_height()
def on_mouse_enter(self, mouse):
mouse.cursor = HAND
def on_mouse_leave(self, mouse):
mouse.cursor = DEFAULT
def on_mouse_press(self, mouse):
# Fire Control.on_mouse_doubleclick() when mouse is pressed twice in same location.
# Subclasses need to call this method in their overridden on_mouse_press().
if self._press and \
abs(self._press[0] - mouse.x) < 2 and \
abs(self._press[1] - mouse.y) < 2 and \
self._press[2] == mouse.button and \
self._press[3] == mouse.modifiers and \
self._press[4] - time() > -0.4:
self._press = None
self.on_mouse_doubleclick(mouse)
self._press = (mouse.x, mouse.y, mouse.button, mouse.modifiers, time())
def on_mouse_doubleclick(self, mouse):
pass
def on_key_press(self, key):
for control in self:
control.on_key_press(key)
def on_key_release(self, key):
for control in self:
control.on_key_release(key)
def on_action(self):
""" Override this method with a custom action.
"""
pass
def reset(self):
pass
def _draw(self):
Layer._draw(self)
# Control._pack() is called internally to layout child controls.
# This should not happen in Control.update(), which is called every frame.
def _pack(self):
pass
# With transformed=True, expensive matrix transformations are done.
# Turn off, controls are not meant to be rotated or scaled.
def layer_at(self, x, y, clipped=False, enabled=False, transformed=True, _covered=False):
return Layer.layer_at(self, x, y, clipped, enabled, False, _covered)
def origin(self, x=None, y=None, relative=False):
return Layer.origin(self, x, y, relative)
def rotate(self, angle):
pass
def scale(self, f):
pass
def __getattr__(self, k):
# Yields the property with the given name, or
# yields the child control with the given id.
if k in self.__dict__:
return self.__dict__[k]
ctrl = nested(self, k)
if ctrl is not None:
return ctrl
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, k)
def __repr__(self):
return "%s(id=%s%s)" % (
self.__class__.__name__,
repr(self.id),
hasattr(self, "value") and ", value="+repr(self.value) or ""
)
def nested(control, id):
""" Returns the child Control with the given id, or None.
Also searches all child Layout containers.
"""
# First check the Control._controls cache (=> 10x faster).
# Also check if the control's id changed after it was cached (however unlikely).
# If so, the cached entry is no longer valid.
if id in control._controls:
ctrl = control._controls[id]
if ctrl.id == id:
return ctrl
del control._controls[id]
# Nothing in the cache.
# Traverse all child Control and Layout objects.
m = None
for ctrl in control:
if ctrl.__dict__.get("id") == id:
m = ctrl; break
if isinstance(ctrl, Layout):
m = nested(ctrl, id)
if m is not None:
break
# If a control was found, cache it.
if m is not None:
control._controls[id] = m
return m
#=====================================================================================================
#--- Label -------------------------------------------------------------------------------------------
class Label(Control):
def __init__(self, caption, x=0, y=0, width=None, height=None, id=None, **kwargs):
""" A label displaying the given caption, centered in the label's (width, height)-box.
The label does not receive any events.
Optional parameters can include fill, font, fontsize, fontweight.
"""
txt = Text(caption, **{
"fill" : popdefault(kwargs, "fill", theme["text"]),
"font" : popdefault(kwargs, "font", theme["fontname"]),
"fontsize" : popdefault(kwargs, "fontsize", theme["fontsize"]),
"fontweight" : popdefault(kwargs, "fontweight", theme["fontweight"]),
"lineheight" : 1,
"align" : CENTER
})
kwargs.setdefault("width", txt.metrics[0])
kwargs.setdefault("height", txt.metrics[1])
Control.__init__(self, x=x, y=y, id=id, **kwargs)
self.enabled = False # Pass on events to the layers underneath.
self._text = txt
self._pack()
def _get_caption(self):
return self._text.text
def _set_caption(self, string):
self._text.text = string
self._pack()
caption = property(_get_caption, _set_caption)
@property
def fonts(self):
return self._text.font
@property
def fontsize(self):
return self._text.fontsize
@property
def fontweight(self):
return self._text.fontweight
def _pack(self):
# Center the text inside the label.
self._text.x = 0.5 * (self.width - self._text.metrics[0])
self._text.y = 0.5 * (self.height - self._text.metrics[1])
def draw(self):
self._text.draw()
#=====================================================================================================
#--- BUTTON ------------------------------------------------------------------------------------------
class Button(Control):
def __init__(self, caption="", action=None, x=0, y=0, width=125, id=None, **kwargs):
""" A clickable button that will fire Button.on_action() when clicked.
The action handler can be defined in a subclass, or given as a function.
"""
Control.__init__(self, x=x, y=y, width=width, id=id, **kwargs)
img, w = Image(theme["button"]), 20
self.src = {
"face" : crop(img, w, 0, 1, img.height),
"cap1" : crop(img, 0, 0, w, img.height),
"cap2" : crop(img, img.width-w, 0, w, img.height),
}
if action:
# Override the Button.on_action() method from the given function.
self.set_method(action, name="on_action")
popdefault(kwargs, "width")
popdefault(kwargs, "height")
self.append(Label(caption, **kwargs))
self._pack()
def _get_caption(self):
return self[0].caption
def _set_caption(self, string):
self[0].caption = string
self._pack()
caption = property(_get_caption, _set_caption)
def _pack(self):
# Button size can not be smaller than its caption.
w = max(self.width, self[0].width + self[0].fontsize * 2)
self._set_width(w)
self._set_height(self.src["face"].height)
def update(self):
# Center the text inside the button.
# This happens each frame because the position changes when the button is pressed.
self[0].x = 0.5 * (self.width - self[0].width)
self[0].y = 0.5 * (self.height - self[0].height) - self.pressed
def draw(self):
clr = self.pressed and (0.75, 0.75, 0.75) or (1.0, 1.0, 1.0)
im1, im2, im3 = self.src["cap1"], self.src["cap2"], self.src["face"]
image(im1, 0, 0, height=self.height, color=clr)
image(im2, x=self.width-im2.width, height=self.height, color=clr)
image(im3, x=im1.width, width=self.width-im1.width-im2.width, height=self.height, color=clr)
def on_mouse_release(self, mouse):
Control.on_mouse_release(self, mouse)
if self.contains(mouse.x, mouse.y, transformed=False):
# Only fire event if mouse is actually released on the button.
self.on_action()
#--- ACTION ------------------------------------------------------------------------------------------
class Action(Control):
def __init__(self, action=None, x=0, y=0, id=None, **kwargs):
""" A clickable button that will fire Action.on_action() when clicked.
Actions display an icon instead of a text caption.
Actions are meant to be used for interface management:
e.g. closing or minimizing a panel, navigating to the next page, ...
"""
Control.__init__(self, x=x, y=y, id=id, **kwargs)
self.src = {"face": Image(theme["action"])}
self._pack()
if action:
# Override the Button.on_action() method from the given function.
self.set_method(action, name="on_action")
def _pack(self):
self._set_width(self.src["face"].width)
self._set_height(self.src["face"].height)
def draw(self):
clr = self.pressed and (0.75, 0.75, 0.75) or (1.0, 1.0, 1.0)
image(self.src["face"], 0, 0, color=clr)
def on_mouse_release(self, mouse):
Control.on_mouse_release(self, mouse)
if self.contains(mouse.x, mouse.y, transformed=False):
# Only fire event if mouse is actually released on the button.
self.on_action()
class Close(Action):
def __init__(self, action=None, x=0, y=0, id=None, **kwargs):
""" An action that hides the parent control (e.g. a Panel) when pressed.
"""
Action.__init__(self, action, x=x, y=y, id=id, **kwargs)
self.src["face"] = Image(theme["action-close"])
def on_action(self):
self.parent.hidden = True
#=====================================================================================================
#--- SLIDER ------------------------------------------------------------------------------------------
class Handle(Control):
def __init__(self, parent):
# The slider handle can protrude from the slider bar,
# so it is a separate layer that fires its own events.
Control.__init__(self,
width = parent.src["handle"].width,
height = parent.src["handle"].height)
self.parent = parent
def on_mouse_press(self, mouse):
self.parent.on_mouse_press(mouse)
def on_mouse_drag(self, mouse):
self.parent.on_mouse_drag(mouse)
def on_mouse_release(self, mouse):
self.parent.on_mouse_release(mouse)
def draw(self):
clr = self.parent.pressed | self.pressed and (0.75, 0.75, 0.75) or (1.0, 1.0, 1.0)
image(self.parent.src["handle"], 0, 0, color=clr)
class Slider(Control):
def __init__(self, default=0.5, min=0.0, max=1.0, steps=100, x=0, y=0, width=125, id=None, **kwargs):
""" A draggable slider that will fire Slider.on_action() when dragged.
The slider's value can be retrieved with Slider.value.
"""
Control.__init__(self, x=x, y=y, width=width, id=id, **kwargs)
self.min = min # Slider minimum value.
self.max = max # Slider maximum value.
self.default = default # Slider default value.
self.value = default # Slider current value.
self.steps = steps # Number of steps from min to max.
img, w = Image(theme["slider"]), 5
self.src = {
"face1" : crop(img, w, 0, 1, img.height),
"face2" : crop(img, img.width-w, 0, 1, img.height),
"cap1" : crop(img, 0, 0, w, img.height),
"cap2" : crop(img, img.width-w, 0, w, img.height),
"handle" : Image(theme["slider-handle"])
}
# The handle is a separate layer.
self.append(Handle(self))
self._pack()
def _get_value(self):
return self.min + self._t * (self.max-self.min)
def _set_value(self, value):
self._t = clamp(float(value-self.min) / (self.max-self.min or -1), 0.0, 1.0)
value = property(_get_value, _set_value)
@property
def relative(self):
""" Yields the slider position as a relative number (0.0-1.0).
"""
return self._t
def _pack(self):
w = max(self.width, self.src["cap1"].width + self.src["cap2"].width)
self._set_width(w)
self._set_height(self.src["face1"].height)
def reset(self):
Control.reset(self)
self.value = self.default
def update(self):
# Update the handle's position, before Slider.draw() occurs (=smoother).
self[0].x = self._t * self.width - 0.5 * self[0].width
self[0].y = 0.5 * (self.height - self[0].height)
def draw(self):
t = self._t * self.width
im1, im2, im3, im4 = self.src["cap1"], self.src["cap2"], self.src["face1"], self.src["face2"]
image(im1, x=0, y=0)
image(im2, x=self.width-im2.width, y=0)
image(im3, x=im1.width, y=0, width=t-im1.width)
image(im4, x=t, y=0, width=self.width-t-im2.width+1)
def on_mouse_press(self, mouse):
x0, y0 = self.absolute_position() # Can be nested in other layers.
step = 1.0 / max(self.steps, 1)
# Calculate relative value from the slider handle position.
# The inner width is a bit smaller to accomodate for the slider handle.
# Clamp the relative value to the nearest step.
self._t = (mouse.x-x0-self.height*0.5) / float(self.width-self.height)
self._t = self._t - self._t % step + step
self._t = clamp(self._t, 0.0, 1.0)
self.on_action()
def on_mouse_drag(self, mouse):
self.on_mouse_press(mouse)
#=====================================================================================================
#--- KNOB --------------------------------------------------------------------------------------------
class Knob(Control):
def __init__(self, default=0, limit=True, x=0, y=0, id=None, **kwargs):
""" A twistable knob that will fire Knob.on_action() when dragged.
The knob's angle can be retrieved with Knob.value (in degrees, 0-360).
With CTRL pressed, twists by a very small amount.
"""
Control.__init__(self, x=x, y=y, id=id, **kwargs)
self.default = default # Knob default angle.
self.value = default # Knob current angle.
self._limit = limit # Constrain between 0-360 or scroll endlessly?
self.src = {
"face" : Image(theme["knob"]),
"socket" : Image(theme["knob-socket"]),
}
self._pack()
@property
def relative(self):
""" Yields the knob's angle as a relative number (0.0-1.0).
"""
return self.value % 360 / 360.0
def _pack(self):
self._set_width(self.src["socket"].width)
self._set_height(self.src["socket"].height)
def reset(self):
Control.reset(self)
self.value = self.default
def draw(self):
translate(self.width/2, self.height/2)
image(self.src["socket"], -self.width/2, -self.height/2)
rotate(360-self.value)
clr = self.pressed and (0.85, 0.85, 0.85) or (1.0, 1.0, 1.0)
image(self.src["face"], -self.width/2, -self.height/2, color=clr)
def on_mouse_press(self, mouse):
self.value += mouse.dy * (CTRL in mouse.modifiers and 1 or 5)
if self._limit:
self.value %= 360
self.on_action()
def on_mouse_drag(self, mouse):
self.on_mouse_press(mouse)
#=====================================================================================================
#--- FLAG --------------------------------------------------------------------------------------------
class Flag(Control):
def __init__(self, default=False, x=0, y=0, id=None, **kwargs):
""" A checkbox control that fires Flag.on_action() when checked.
The checkbox value can be retrieved with Flag.value.
"""
Control.__init__(self, x=x, y=y, id=id, **kwargs)
self.default = bool(default) # Flag default value.
self.value = bool(default) # Flag current value.
self.src = {
"face" : Image(theme["flag"]),
"checked" : Image(theme["flag-checked"]),
}
self._pack()
def _pack(self):
self._set_width(self.src["face"].width)
self._set_height(self.src["face"].height)
def reset(self):
self.value = self.default
def draw(self):
image(self.value and self.src["checked"] or self.src["face"])
def on_mouse_release(self, mouse):
Control.on_mouse_release(self, mouse)
if self.contains(mouse.x, mouse.y, transformed=False):
# Only change status if mouse is actually released on the button.
self.value = not self.value
self.on_action()
Checkbox = CheckBox = Flag
#=====================================================================================================
#--- PANEL -------------------------------------------------------------------------------------------
class Panel(Control):
def __init__(self, caption="", fixed=False, modal=True, x=0, y=0, width=175, height=250, **kwargs):
""" A panel containing other controls that can be dragged when Panel.fixed=False.
Controls or (Layout groups) can be added with Panel.append().
"""
Control.__init__(self, x=0, y=0, width=max(width,60), height=max(height,60), **kwargs)
img, w = Image(theme["panel"]), 30
self.src = {
"cap1" : crop(img, 0, img.height-w, w, w),
"cap2" : crop(img, img.width-w, img.height-w, w, w),
"cap3" : crop(img, 0, 0, w, w),
"cap4" : crop(img, img.width-w, 0, w, w),
"top" : crop(img, w+1, img.height-w, 1, w),
"bottom" : crop(img, w+1, 0, 1, w),
"left" : crop(img, 0, w+1, w, 1),
"right" : crop(img, img.width-w, w+1, w, 1),
"face" : crop(img, w+1, w+1, 1, 1)
}
popdefault(kwargs, "width")
popdefault(kwargs, "height")
self.append(Label(caption, **kwargs))
self.append(Close())
self.fixed = fixed # Draggable?
self.modal = modal # Closeable?
self._pack()
def _get_caption(self):
return self._caption.text
def _set_caption(self, str):
self._caption.text = str
self._pack()
caption = property(_get_caption, _set_caption)
@property
def controls(self):
return iter(self[2:]) # self[0] is the Label,
# self[1] is the Close action.
def insert(self, i, control):
""" Inserts the control, or inserts all controls in the given Layout.
"""
if isinstance(control, Layout):
# If the control is actually a Layout (e.g. ordered group of controls), apply it.
control.apply()
Layer.insert(self, i, control)
def append(self, control):
self.insert(len(self), control)
def extend(self, controls):
for control in controls:
self.append(control)
def _pack(self):
# Center the caption in the label's header.
# Position the close button in the top right corner.
self[0].x = 0.5 * (self.width - self[0].width)
self[0].y = self.height - self.src["top"].height + 0.5 * (self.src["top"].height - self[0].height)
self[1].x = self.width - self[1].width - 4
self[1].y = self.height - self[1].height - 2
def pack(self, padding=20):
""" Resizes the panel to the most compact size,
based on the position and size of the controls in the panel.
"""
def _visit(control):
if control not in (self, self[0], self[1]):
self._b = self._b and self._b.union(control.bounds) or control.bounds
self._b = None
self.traverse(_visit)
for control in self.controls:
control.x += padding - self._b.x
control.y += padding - self._b.y
self._set_width( padding + self._b.width - self._b.x + padding)
self._set_height(padding + self._b.height - self._b.y + padding + self.src["top"].height)
self._pack()
def update(self):
self[1].hidden = self.modal
def draw(self):
im1, im2, im3 = self.src["cap1"], self.src["cap2"], self.src["top"]
im4, im5, im6 = self.src["cap3"], self.src["cap4"], self.src["bottom"]
im7, im8, im9 = self.src["left"], self.src["right"], self.src["face"]
image(im1, 0, self.height-im1.height)
image(im2, self.width-im2.width, self.height-im2.height)
image(im3, im1.width, self.height-im3.height, width=self.width-im1.width-im2.width)
image(im4, 0, 0)
image(im5, self.width-im5.width, 0)
image(im6, im4.width, 0, width=self.width-im4.width-im5.width)
image(im7, 0, im4.height, height=self.height-im1.height-im4.height)
image(im8, self.width-im8.width, im4.height, height=self.height-im2.height-im5.height)
image(im9, im4.width, im6.height, width=self.width-im7.width-im8.width, height=self.height-im3.height-im6.height)
def on_mouse_enter(self, mouse):
mouse.cursor = DEFAULT
def on_mouse_press(self, mouse):
self.dragged = not self.fixed and mouse.y > self.y+self.height-self.src["top"].height
def on_mouse_drag(self, mouse):
if self.dragged and not self.fixed:
self.x += mouse.dx
self.y += mouse.dy
def open(self):
self.hidden = False
def close(self):
self.hidden = True
#=====================================================================================================
#--- Editable ----------------------------------------------------------------------------------------
EDITING = None
editing = lambda: EDITING
class Editable(Control):
def __init__(self, value="", x=0, y=0, width=125, height=30, padding=(0,0), wrap=True, id=None, **kwargs):
""" An editable text box.
When clicked, it has the focus and can receive keyboard events.
With wrap=True, several lines of text will wrap around the width.
Optional parameters can include fill, font, fontsize, fontweight.
"""
txt = Text(value or " ", **{
"fill" : popdefault(kwargs, "fill", Color(0,0.9)),
"font" : popdefault(kwargs, "font", theme["fontname"]),
"fontsize" : popdefault(kwargs, "fontsize", theme["fontsize"]),
"fontweight" : popdefault(kwargs, "fontweight", theme["fontweight"]),
"lineheight" : 1,
"align" : LEFT
})
kwargs["width"] = width
kwargs["height"] = height
Control.__init__(self, x=x, y=y, id=id, **kwargs)
self._padding = padding
self._i = 0 # Index of character on which the mouse is pressed.
self._empty = value == "" and True or False
self._editor = IncrementalTextLayout(txt._label.document, width, height, multiline=wrap)
self._editor.content_valign = wrap and "top" or "center"
self._editor.selection_background_color = (170, 200, 230, 255)
self._editor.selection_color = txt._label.color
self._editor.caret = Caret(self._editor)
self._editor.caret.visible = False
self._editing = False # When True, cursor is blinking and text can be edited.
Editable._pack(self) # On init, call Editable._pack(), not the derived Field._pack().
def _pack(self):
self._editor.x = self._padding[0]
self._editor.y = self._padding[1]
self._editor.width = max(0, self.width - self._padding[0] * 2)
self._editor.height = max(0, self.height - self._padding[1] * 2)
def _get_value(self):
# IncrementalTextLayout in Pyglet 1.1.4 has a bug with the empty string.
# We keep track of empty strings with Editable._empty to avoid the bug.
return not self._empty and self._editor.document.text or u""
def _set_value(self, string):
self._editor.begin_update()
self._editor.document.text = string or " "
self._editor.end_update()
self._empty = string == "" and True or False
value = property(_get_value, _set_value)
def _get_editing(self):
return self._editing
def _set_editing(self, b):
self._editing = b
self._editor.caret.visible = b
global EDITING
if b is False and EDITING == self:
EDITING = None
if b is True:
EDITING = self
# Cursor is blinking and text can be edited.
# Visit all layers on the canvas.
# Remove the caret from all other Editable controls.
for layer in (self.root.canvas and self.root.canvas.layers or []):
layer.traverse(visit=lambda layer: \
isinstance(layer, Editable) and layer != self and \
setattr(layer, "editing", False))
editing = property(_get_editing, _set_editing)
@property
def selection(self):
# Yields a (start, stop)-tuple with the indices of the current selected text.
return (self._editor.selection_start,
self._editor.selection_end)
@property
def selected(self):
# Yields True when text is currently selected.
return self.selection[0] != self.selection[1]
@property
def cursor(self):
# Yields the index at the text cursor (caret).
return self._editor.caret.position
def index(self, x, y):
""" Returns the index of the character in the text at position x, y.
"""
x0, y0 = self.absolute_position()
i = self._editor.get_position_from_point(x-x0, y-y0)
if self._editor.get_point_from_position(0)[0] > x-x0: # Pyglet bug?
i = 0
if self._empty:
i = 0
return i
def on_mouse_enter(self, mouse):
mouse.cursor = TEXT
def on_mouse_press(self, mouse):
i = self._i = self.index(mouse.x, mouse.y)
self._editor.set_selection(0, 0)
self.editing = True
self._editor.caret.position = i
Control.on_mouse_press(self, mouse)
def on_mouse_release(self, mouse):
if not self.dragged:
self._editor.caret.position = self.index(mouse.x, mouse.y)
Control.on_mouse_release(self, mouse)
def on_mouse_drag(self, mouse):
i = self.index(mouse.x, mouse.y)
self._editor.selection_start = max(min(self._i, i), 0)
self._editor.selection_end = min(max(self._i, i), len(self.value))
self._editor.caret.visible = False
Control.on_mouse_drag(self, mouse)
def on_mouse_doubleclick(self, mouse):
# Select the word at the mouse position.
# Words are delimited by non-alphanumeric characters.
i = self.index(mouse.x, mouse.y)
delimiter = lambda ch: not (ch.isalpha() or ch.isdigit())
if i < len(self.value) and delimiter(self.value[i]):
self._editor.set_selection(i, i+1)
if i == len(self.value) and self.value != "" and delimiter(self.value[i-1]):
self._editor.set_selection(i-1, i)
a = find(lambda (i,ch): delimiter(ch), enumerate(reversed(self.value[:i])))
b = find(lambda (i,ch): delimiter(ch), enumerate(self.value[i:]))
a = a and i-a[0] or 0
b = b and i+b[0] or len(self.value)
self._editor.set_selection(a, b)
def on_key_press(self, key):
if self._editing:
self._editor.caret.visible = True
i = self._editor.caret.position
if key.code == LEFT:
# The left arrow moves the text cursor to the left.
self._editor.caret.position = max(i-1, 0)
elif key.code == RIGHT:
# The right arrow moves the text cursor to the right.
self._editor.caret.position = min(i+1, len(self.value))
elif key.code in (UP, DOWN):
# The up arrows moves the text cursor to the previous line.
# The down arrows moves the text cursor to the next line.
y = key.code == UP and -1 or +1
n = self._editor.get_line_count()
i = self._editor.get_position_on_line(
max(self._editor.get_line_from_position(i)+y, 0),
self._editor.get_point_from_position(i)[0])
self._editor.caret.position = i
elif key.code == TAB:
# The tab key navigates away from the control.
self._editor.caret.position = 0
self.editing = False
elif key.code == ENTER:
# The enter key executes on_action() and navigates away from the control.
self._editor.caret.position = 0
self.editing = False
self.on_action()
elif key.code == BACKSPACE and self.selected:
# The backspace key removes the character at the text cursor.
self.value = self.value[:self.selection[0]] + self.value[self.selection[1]:]
self._editor.caret.position = max(self.selection[0], 0)
elif key.code == BACKSPACE and i > 0:
# The backspace key removes the current text selection.
self.value = self.value[:i-1] + self.value[i:]
self._editor.caret.position = max(i-1, 0)
elif key.char:
if self.selected:
# Typing replaces any text currently selected.
self.value = self.value[:self.selection[0]] + self.value[self.selection[1]:]
self._editor.caret.position = i = max(self.selection[0], 0)
# Character input is inserted at the text cursor.
self.value = self.value[:i] + key.char + self.value[i:]
self._editor.caret.position = min(i+1, len(self.value))
self._editor.set_selection(0, 0)
def draw(self):
self._editor.draw()
#--- Field -------------------------------------------------------------------------------------------
class Field(Editable):
def __init__(self, value="", hint="", action=None, x=0, y=0, width=125, padding=5, id=None, **kwargs):
""" A single-line text input field.
The string value can be retrieved with Field.value.
"""
Editable.__init__(self, value, x=x, y=y, width=width, padding=(padding,0), wrap=False, id=id, **kwargs)
img, w = Image(theme["field"]), 10
self.src = {
"face" : crop(img, w, 0, 1, img.height),
"cap1" : crop(img, 0, 0, w, img.height),
"cap2" : crop(img, img.width-w, 0, w, img.height),
}
if action:
# Override the Button.on_action() method from the given function.
self.set_method(action, name="on_action")
self.default = value
self.append(Label(hint, fill=Color(0, 0.4)))
self._pack()
def _get_hint(self):
return self[0].caption
def _set_hint(self, string):
self[0].caption = string
hint = property(_get_hint, _set_hint)
def reset(self):
self.value = self.default
def _pack(self):
Editable._pack(self)
w = max(self.width, self.src["cap1"].width + self.src["cap2"].width)
self._set_width(w)
self._set_height(self.src["face"].height)
# Position the hint text (if no other text is in the field):
self[0].x = self._padding[0]
self[0]._set_height(self.height)
self[0]._pack()
def on_action(self):
pass
def update(self):
self[0].hidden = self.editing or self.value != ""
def draw(self):
im1, im2, im3 = self.src["cap1"], self.src["cap2"], self.src["face"]
image(im1, 0, 0, height=self.height)
image(im2, x=self.width-im2.width, height=self.height)
image(im3, x=im1.width, width=self.width-im1.width-im2.width, height=self.height)
Editable.draw(self)
#=====================================================================================================
#--- Rulers ------------------------------------------------------------------------------------------
class Rulers(Control):
def __init__(self, step=10, interval=5, crosshair=False, fill=(0,0,0,1)):
""" A horizontal and vertical ruler displaying the width/height of the parent at intervals.
A measurement line is drawn at each step(e.g. at 10 20 30...)
A label with the value is drawn at each interval (e.g. 50 | | | | 100 | | | | 150).
"""
Control.__init__(self, x=0, y=0)
self.enabled = False
self.step = step
self.interval = interval
self.crosshair = crosshair
self._fill = fill
self._dirty = False
self._markers = {}
self._pack()
def _get_step(self):
return self._step
def _set_step(self, v):
self._step = round(v)
self._dirty = True
step = property(_get_step, _set_step)
def _get_interval(self):
return self._interval
def _set_interval(self, v):
self._interval = round(v)
self._dirty = True
interval = property(_get_interval, _set_interval)
def _pack(self):
# Cache Text objects for the measurement markers.
# This happens whenever the canvas resizes, or the step or interval changes.
# This will raise an error if the parent's width or height is None (infinite).
p = self.parent or self.canvas
if p and (self._dirty or self.width != p.width or self.height != p.height):
self._dirty = False
self._set_width(p.width)
self._set_height(p.height)
for i in range(int(round(max(self.width, self.height) / self._step))):
if i % self._interval == 0:
self._markers.setdefault(i*self._step,
Text(str(int(round(i*self._step))),
fontname = theme["fontname"],
fontsize = theme["fontsize"] * 0.6,
fill = self._fill))
def update(self):
self._pack()
def draw(self):
length = 5
# Draw the horizontal ruler.
for i in range(1, int(round(self.height / self._step))):
v, mark = i*self._step, i%self.interval==0
line(0, v, mark and length*3 or length, v,
stroke = self._fill,
strokewidth = 0.5)
if mark:
self._markers[v].draw(length*3-self._markers[v].metrics[0], v+2)
# Draw the vertical ruler.
for i in range(1, int(round(self.width / self._step))):
v, mark = i*self._step, i%self.interval==0
line(v, 0, v, mark and length*3 or length,
stroke = self._fill,
strokewidth = 0.5)
if mark:
self._markers[v].draw(v+2, length*3-self._markers[v].fontsize)
# Draw the crosshair.
if self.crosshair:
line(0, self.canvas.mouse.y, self.width, self.canvas.mouse.y,
stroke = self._fill,
strokewidth = 0.5,
strokestyle = DOTTED)
line(self.canvas.mouse.x, 0, self.canvas.mouse.x, self.height,
stroke = self._fill,
strokewidth = 0.5,
strokestyle = DOTTED)
#=====================================================================================================
#--- Layout ------------------------------------------------------------------------------------------
class Layout(Layer):
def __init__(self, **kwargs):
""" A group of controls with a specific layout.
Controls can be added with Layout.append().
The layout will be applied when Layout.apply() is called.
This happens automatically if a layout is appended to a Panel.
"""
kwargs["x"] = kwargs["y"] = kwargs["width"] = kwargs["height"] = 0
Layer.__init__(self, **kwargs)
self._controls = {} # Lazy cache of (id, control)-children, see nested().
def insert(self, i, control):
if isinstance(control, Layout):
control.apply() # If the control is actually a Layout, apply it.
Layer.insert(self, i, control)
def append(self, control):
self.insert(len(self), control)
def extend(self, controls):
for control in controls:
self.append(control)
def on_key_press(self, key):
for control in self:
control.on_key_press(key)
def on_key_release(self, key):
for control in self:
control.on_key_release(key)
def __getattr__(self, k):
# Yields the property with the given name, or
# yields the child control with the given id.
if k in self.__dict__:
return self.__dict__[k]
ctrl = nested(self, k)
if ctrl is not None:
return ctrl
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, k)
def apply(self, spacing=0):
""" Adjusts the position and size of the controls to match the layout.
"""
pass
def __repr__(self):
return "Layout(type=%s)" % repr(self.__class__.__name__.lower())
# Debug mode:
#def draw(self):
# rect(0, 0, self.width, self.height, fill=None, stroke=(1,1,1,0.5), strokestyle="dotted")
#--- Layout: Labeled ----------------------------------------------------------------------------------
class Labeled(Layout):
def __init__(self, controls=[]):
""" A layout where each control has an associated text label.
"""
Layout.__init__(self)
self.controls = []
self.captions = []
self.extend(controls)
def insert(self, i, control, caption=""):
""" Inserts a new control to the layout, with an associated caption.
Each control will be drawn in a new row.
"""
self.controls.insert(i, control)
self.captions.insert(i, Label(caption.upper(),
fontsize = theme["fontsize"] * 0.8,
fill = theme["text"].rgb+(theme["text"].a * 0.8,)))
Layout.insert(self, i, self.controls[i])
Layout.insert(self, i, self.captions[i])
def append(self, control, caption=""):
self.insert(len(self)/2, control, caption)
def extend(self, controls):
for control in controls:
caption, control = isinstance(control, tuple) and control or ("", control)
self.append(control, caption)
def remove(self, control):
self.pop(self.controls.index(control))
def pop(self, i):
self.captions.pop(i); return self.controls.pop(i)
#--- Layout: Rows ------------------------------------------------------------------------------------
class Rows(Labeled):
def __init__(self, controls=[], width=125):
""" A layout where each control appears on a new line.
Each control has an associated text caption, displayed to the left of the control.
The given width defines the desired width for each control.
"""
Labeled.__init__(self, controls)
self._maxwidth = width
def apply(self, spacing=10):
""" Adjusts the position and width of all the controls in the layout:
- each control is placed next to its caption, with spacing in between,
- each caption is aligned to the right, and centered vertically,
- the width of all Label, Button, Slider, Field controls is evened out.
"""
mw = self._maxwidth
for control in self.controls:
if isinstance(control, Layout):
# Child containers in the layout can be wider than the desired width.
# adjusting mw at the start will controls wider to line out with the total width,
# adjusting it at the end would just ensure that the layout is wide enough.
mw = max(self._maxwidth, control.width)
w1 = max([caption.width for caption in self.captions])
w2 = max([control.width for control in self.controls])
dx = 0
dy = 0
for caption, control in reversed(zip(self.captions, self.controls)):
caption.x = dx + w1 - caption.width # halign right.
control.x = dx + w1 + (w1>0 and spacing)
caption.y = dy + 0.5 * (control.height - caption.height) # valign center.
control.y = dy
if isinstance(control, Layout) and control.height > caption.height * 2:
caption.y = dy + control.height - caption.height # valign top.
if isinstance(control, (Label, Button, Slider, Field)):
control._set_width(mw)
control._pack()
dy += max(caption.height, control.height, 10) + spacing
self.width = w1 + w2 + (w1>0 and spacing)
self.height = dy - spacing
TOP, CENTER = "top", "center"
class Row(Labeled):
def __init__(self, controls=[], width=125, align=CENTER):
""" A layout where each control appears in a new column.
Each control has an associated text caption, displayed on top of the control.
The given width defines the desired width for each control.
"""
Labeled.__init__(self, controls)
self._maxwidth = width
self._align = align
def apply(self, spacing=10):
""" Adjusts the position and width of all the controls in the layout:
- each control is placed centrally below its caption, with spacing in between,
- the width of all Label, Button, Slider, Field controls is evened out.
"""
mw = self._maxwidth
da = self._align==TOP and 1.0 or 0.5
h1 = max([control.height for control in self.controls])
h2 = max([caption.height for caption in self.captions])
dx = 0
dy = 0
for caption, control in zip(self.captions, self.controls):
caption.x = dx + 0.5 * max(control.width - caption.width, 0) # halign center
control.x = dx + 0.5 * max(caption.width - control.width, 0) # halign center
caption.y = dy + h1 + (h2>0 and spacing)
control.y = dy + da * (h1 - control.height) # valign center
if isinstance(control, (Label, Button, Slider, Field)):
control._set_width(mw)
control._pack()
dx += max(caption.width, control.width, 10) + spacing
self.width = dx - spacing
self.height = h1 + h2 + (h2>0 and spacing)
| est/nodebox-gl | gui/controls.py | Python | bsd-3-clause | 47,034 | [
"VisIt"
] | ba45741034e992b2fa5a183ffa1528e70341a0a288773cd1b8401ff1f4ae017d |
from os import path
import sys
import traceback
import cStringIO
import webbrowser
import base64
import urllib
import gzip
from pprint import pformat
from PodSix.Resource import *
from PodSix.Game import Game
from engine.NetMonitor import NetErrorException, NetDisconnectionException, NetBadVersionException
from engine.BuildFile import buildfile
class ExceptionHandler(Game, EventMonitor):
def __init__(self):
# before anything, fetch the exception that was thrown and it's value
exctype, value = sys.exc_info()[:2]
# now print the traceback out as per usual
traceback.print_exc()
#sfx.LoadSound('splash')
#sfx.PlaySound('splash')
self.bgColor = (255, 255, 255)
gfx.Caption('Infinite 8-bit Platformer')
gfx.SetSize([640, 200])
gfx.LoadFont("freaky_fonts_ca", 16.0 / gfx.width, "default")
# by default don't open a browser window
self.destination = None
# if this is not a known exception then we want the crashdump
if not exctype in [NetErrorException, NetDisconnectionException, NetBadVersionException]:
value = "Argh, Infinite 8-Bit Platformer crashed! Click here to send us a crash-report so we can fix the bug. Thank you!"
# now collect the value of the traceback into our file like object
catcherror = cStringIO.StringIO()
# prepare a zipfile filter to push everything through
zipout = gzip.GzipFile(fileobj=catcherror, mode="w")
# wrap the error catcher in gzip
traceback.print_exc(file=zipout)
# append the buildfile information to the zip we are sending
zipout.write("\n\nBuild info JSON:\n" + pformat(buildfile.GetInfo()))
zipout.close()
# get the result out
ziptrace = catcherror.getvalue()
self.destination = "http://infiniteplatformer.com/feedback?" + urllib.urlencode({"trace": base64.b64encode(ziptrace)})
if exctype == NetBadVersionException:
self.message = gfx.WrapText("A new version of the game is available! Click here to visit http://infiniteplatformer.com/download to get the latest version.", 0.8)
self.destination = "http://infiniteplatformer.com/download"
self.face = pygame.image.load(path.join(*["resources", "icons", "happy-invert.png"]))
else:
self.message = gfx.WrapText(str(value), 0.8)
self.face = pygame.image.load(path.join(*["resources", "icons", "sad-invert.png"]))
Game.__init__(self)
EventMonitor.__init__(self)
def Pump(self):
Game.Pump(self)
EventMonitor.Pump(self)
def Run(self):
gfx.screen.blit(self.face, [16, 16])
for l in range(len(self.message)):
gfx.DrawText(self.message[l], pos={"left": 0.1, "top": 0.05 + 0.05 * l}, color=[255, 255, 255])
Game.Run(self)
gfx.Flip()
def KeyDown(self, e):
self.Quit()
def MouseDown(self, e):
if self.destination:
webbrowser.open(self.destination)
self.Quit()
| chr15m/Infinite8BitPlatformer | engine/ExceptionHandler.py | Python | gpl-3.0 | 2,783 | [
"VisIt"
] | 373a906510d9d89612b2a85f84a541fba6e06eb2c8b133ed139a0b037f1859ac |
# -*- coding: utf-8 -*-
"""
Models used to implement SAML SSO support in third_party_auth
(inlcuding Shibboleth support)
"""
import json
import logging
import re
from config_models.models import ConfigurationModel, cache
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from organizations.models import Organization
from social_core.backends.base import BaseAuth
from social_core.backends.oauth import OAuthAuth
from social_core.backends.saml import SAMLAuth
from social_core.exceptions import SocialAuthBaseException
from social_core.utils import module_member
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming.helpers import get_current_request
from openedx.core.lib.hash_utils import create_hash256
from .lti import LTI_PARAMS_KEY, LTIAuthBackend
from .saml import STANDARD_SAML_PROVIDER_KEY, get_saml_idp_choices, get_saml_idp_class
log = logging.getLogger(__name__)
REGISTRATION_FORM_FIELD_BLACKLIST = [
'name',
'username'
]
# A dictionary of {name: class} entries for each python-social-auth backend available.
# Because this setting can specify arbitrary code to load and execute, it is set via
# normal Django settings only and cannot be changed at runtime:
def _load_backend_classes(base_class=BaseAuth):
""" Load the list of python-social-auth backend classes from Django settings """
for class_path in settings.AUTHENTICATION_BACKENDS:
auth_class = module_member(class_path)
if issubclass(auth_class, base_class):
yield auth_class
_PSA_BACKENDS = {backend_class.name: backend_class for backend_class in _load_backend_classes()}
_PSA_OAUTH2_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(OAuthAuth)]
_PSA_SAML_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(SAMLAuth)]
_LTI_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(LTIAuthBackend)]
def clean_json(value, of_type):
""" Simple helper method to parse and clean JSON """
if not value.strip():
return json.dumps(of_type())
try:
value_python = json.loads(value)
except ValueError as err:
raise ValidationError(u"Invalid JSON: {}".format(err))
if not isinstance(value_python, of_type):
raise ValidationError(u"Expected a JSON {}".format(of_type))
return json.dumps(value_python, indent=4)
def clean_username(username=''):
""" Simple helper method to ensure a username is compatible with our system requirements. """
return re.sub(r'[^-\w]+', '_', username)[:30]
class AuthNotConfigured(SocialAuthBaseException):
""" Exception when SAMLProviderData or other required info is missing """
def __init__(self, provider_name):
super(AuthNotConfigured, self).__init__()
self.provider_name = provider_name
def __str__(self):
return _('Authentication with {} is currently unavailable.').format(
self.provider_name
)
class ProviderConfig(ConfigurationModel):
"""
Abstract Base Class for configuring a third_party_auth provider
.. no_pii:
"""
KEY_FIELDS = ('slug',)
icon_class = models.CharField(
max_length=50,
blank=True,
default=u'fa-sign-in',
help_text=(
u'The Font Awesome (or custom) icon class to use on the login button for this provider. '
'Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university'
),
)
# We use a FileField instead of an ImageField here because ImageField
# doesn't support SVG. This means we don't get any image validation, but
# that should be fine because only trusted users should be uploading these
# anyway.
icon_image = models.FileField(
blank=True,
help_text=(
u'If there is no Font Awesome icon available for this provider, upload a custom image. '
'SVG images are recommended as they can scale to any size.'
),
)
name = models.CharField(max_length=50, blank=False, help_text=u"Name of this provider (shown to users)")
slug = models.SlugField(
max_length=30, db_index=True, default=u'default',
help_text=(
u'A short string uniquely identifying this provider. '
'Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"'
))
secondary = models.BooleanField(
default=False,
help_text=_(
'Secondary providers are displayed less prominently, '
'in a separate list of "Institution" login providers.'
),
)
organization = models.ForeignKey(
Organization,
blank=True,
null=True,
on_delete=models.CASCADE,
help_text=_(
'optional. If this provider is an Organization, this attribute '
'can be used reference users in that Organization'
)
)
site = models.ForeignKey(
Site,
default=settings.SITE_ID,
related_name='%(class)ss',
help_text=_(
'The Site that this provider configuration belongs to.'
),
on_delete=models.CASCADE,
)
skip_hinted_login_dialog = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users that visit a \"TPA hinted\" URL for this provider "
"(e.g. a URL ending with `?tpa_hint=[provider_name]`) will be forwarded directly to "
"the login URL of the provider instead of being first prompted with a login dialog."
),
)
skip_registration_form = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users will not be asked to confirm their details "
"(name, email, etc.) during the registration process. Only select this option "
"for trusted providers that are known to provide accurate user information."
),
)
skip_email_verification = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will not be required to confirm their "
"email, and their account will be activated immediately upon registration."
),
)
send_welcome_email = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will be sent a welcome email upon registration."
),
)
visible = models.BooleanField(
default=False,
help_text=_(
"If this option is not selected, users will not be presented with the provider "
"as an option to authenticate with on the login screen, but manual "
"authentication using the correct link is still possible."
),
)
max_session_length = models.PositiveIntegerField(
null=True,
blank=True,
default=None,
verbose_name=u'Max session length (seconds)',
help_text=_(
"If this option is set, then users logging in using this SSO provider will have "
"their session length limited to no longer than this value. If set to 0 (zero), "
"the session will expire upon the user closing their browser. If left blank, the "
"Django platform session default length will be used."
)
)
send_to_registration_first = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will be directed to the registration page "
"immediately after authenticating with the third party instead of the login page."
),
)
sync_learner_profile_data = models.BooleanField(
default=False,
help_text=_(
"Synchronize user profile data received from the identity provider with the edX user "
"account on each SSO login. The user will be notified if the email address associated "
"with their account is changed as a part of this synchronization."
)
)
enable_sso_id_verification = models.BooleanField(
default=False,
help_text=u"Use the presence of a profile from a trusted third party as proof of identity verification.",
)
prefix = None # used for provider_id. Set to a string value in subclass
backend_name = None # Set to a field or fixed value in subclass
accepts_logins = True # Whether to display a sign-in button when the provider is enabled
# "enabled" field is inherited from ConfigurationModel
class Meta(object):
app_label = "third_party_auth"
abstract = True
def clean(self):
""" Ensure that either `icon_class` or `icon_image` is set """
super(ProviderConfig, self).clean()
if bool(self.icon_class) == bool(self.icon_image):
raise ValidationError('Either an icon class or an icon image must be given (but not both)')
@property
def provider_id(self):
""" Unique string key identifying this provider. Must be URL and css class friendly. """
assert self.prefix is not None
return "-".join((self.prefix, ) + tuple(getattr(self, field) for field in self.KEY_FIELDS))
@property
def backend_class(self):
""" Get the python-social-auth backend class used for this provider """
return _PSA_BACKENDS[self.backend_name]
@property
def full_class_name(self):
""" Get the fully qualified class name of this provider. """
return '{}.{}'.format(self.__module__, self.__class__.__name__)
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
return self.backend_name == social_auth.provider
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
# This is generally the same thing as the UID, expect when one backend is used for multiple providers
assert self.match_social_auth(social_auth)
return social_auth.uid
def get_social_auth_uid(self, remote_id):
"""
Return the uid in social auth.
This is default implementation. Subclass may override with a different one.
"""
return remote_id
@classmethod
def get_register_form_data(cls, pipeline_kwargs):
"""Gets dict of data to display on the register form.
register_user uses this to populate
the new account creation form with values supplied by the user's chosen
provider, preventing duplicate data entry.
Args:
pipeline_kwargs: dict of string -> object. Keyword arguments
accumulated by the pipeline thus far.
Returns:
Dict of string -> string. Keys are names of form fields; values are
values for that field. Where there is no value, the empty string
must be used.
"""
registration_form_data = {}
# Details about the user sent back from the provider.
details = pipeline_kwargs.get('details').copy()
# Set the registration form to use the `fullname` detail for the `name` field.
registration_form_data['name'] = details.get('fullname', '')
# Get the username separately to take advantage of the de-duping logic
# built into the pipeline. The provider cannot de-dupe because it can't
# check the state of taken usernames in our system. Note that there is
# technically a data race between the creation of this value and the
# creation of the user object, so it is still possible for users to get
# an error on submit.
registration_form_data['username'] = clean_username(pipeline_kwargs.get('username') or '')
# Any other values that are present in the details dict should be copied
# into the registration form details. This may include details that do
# not map to a value that exists in the registration form. However,
# because the fields that are actually rendered are not based on this
# list, only those values that map to a valid registration form field
# will actually be sent to the form as default values.
for blacklisted_field in REGISTRATION_FORM_FIELD_BLACKLIST:
details.pop(blacklisted_field, None)
registration_form_data.update(details)
return registration_form_data
def get_authentication_backend(self):
"""Gets associated Django settings.AUTHENTICATION_BACKEND string."""
return '{}.{}'.format(self.backend_class.__module__, self.backend_class.__name__)
@property
def display_for_login(self):
"""
Determines whether the provider ought to be shown as an option with
which to authenticate on the login screen, registration screen, and elsewhere.
"""
return bool(self.enabled_for_current_site and self.accepts_logins and self.visible)
@property
def enabled_for_current_site(self):
"""
Determines if the provider is able to be used with the current site.
Appsembler: We will skip same verification if the backend is Auth0.
Auth0 will handle this for us.
"""
if self.is_tahoe_auth0_backend():
# Tahoe: Share one backend between all Tahoe sites.
return self.is_auth0_enabled()
# Tahoe: Normal upstream-logic for other backends.
return self.enabled and self.site_id == Site.objects.get_current(get_current_request()).id
def is_tahoe_auth0_backend(self):
"""
Check if `tahoe-auth0` backend in use to enable spacial handling.
"""
return self.backend_name == "tahoe-auth0"
def is_auth0_enabled(self):
is_auth0_enabled = False
if self.is_tahoe_auth0_backend():
# Tahoe: Local imports to avoid circular import errors.
from openedx.core.djangoapps.site_configuration import tahoe_auth0_helpers
is_auth0_enabled = tahoe_auth0_helpers.is_tahoe_auth0_enabled()
return is_auth0_enabled
class OAuth2ProviderConfig(ProviderConfig):
"""
Configuration Entry for an OAuth2 based provider.
Also works for OAuth1 providers.
.. no_pii:
"""
# We are keying the provider config by backend_name here as suggested in the python social
# auth documentation. In order to reuse a backend for a second provider, a subclass can be
# created with seperate name.
# example:
# class SecondOpenIDProvider(OpenIDAuth):
# name = "second-openId-provider"
KEY_FIELDS = ('backend_name',)
prefix = 'oa2'
backend_name = models.CharField(
max_length=50, blank=False, db_index=True,
help_text=(
u"Which python-social-auth OAuth2 provider backend to use. "
"The list of backend choices is determined by the THIRD_PARTY_AUTH_BACKENDS setting."
# To be precise, it's set by AUTHENTICATION_BACKENDS
# which production.py sets from THIRD_PARTY_AUTH_BACKENDS
)
)
key = models.TextField(blank=True, verbose_name=u"Client ID")
secret = models.TextField(
blank=True,
verbose_name=u"Client Secret",
help_text=(
u'For increased security, you can avoid storing this in your database by leaving '
' this field blank and setting '
'SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} '
'in your instance\'s Django settings (or lms.auth.json)'
)
)
other_settings = models.TextField(blank=True, help_text=u"Optional JSON object with advanced settings, if any.")
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"Provider Configuration (OAuth)"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(OAuth2ProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "KEY":
return self.key
if name == "SECRET":
if self.secret:
return self.secret
# To allow instances to avoid storing secrets in the DB, the secret can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_OAUTH_SECRETS', {}).get(self.backend_name, '')
if self.other_settings:
other_settings = json.loads(self.other_settings)
assert isinstance(other_settings, dict), "other_settings should be a JSON object (dictionary)"
return other_settings[name]
raise KeyError
class SAMLConfiguration(ConfigurationModel):
"""
General configuration required for this edX instance to act as a SAML
Service Provider and allow users to authenticate via third party SAML
Identity Providers (IdPs)
.. no_pii:
"""
KEY_FIELDS = ('site_id', 'slug')
site = models.ForeignKey(
Site,
default=settings.SITE_ID,
related_name='%(class)ss',
help_text=_(
'The Site that this SAML configuration belongs to.'
),
on_delete=models.CASCADE,
)
slug = models.SlugField(
max_length=30,
default=u'default',
help_text=(
u'A short string uniquely identifying this configuration. '
'Cannot contain spaces. Examples: "ubc", "mit-staging"'
),
)
private_key = models.TextField(
help_text=(
u'To generate a key pair as two files, run '
'"openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". '
'Paste the contents of saml.key here. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
public_key = models.TextField(
help_text=(
u'Public key certificate. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
entity_id = models.CharField(max_length=255, default="http://saml.example.com", verbose_name=u"Entity ID")
org_info_str = models.TextField(
verbose_name=u"Organization Info",
default=u'{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}',
help_text=u"JSON dictionary of 'url', 'displayname', and 'name' for each language",
)
other_config_str = models.TextField(
default=u'{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}',
help_text=(
u"JSON object defining advanced settings that are passed on to python-saml. "
"Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA"
),
)
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"SAML Configuration"
verbose_name_plural = verbose_name
def __str__(self):
"""
Return human-readable string representation.
"""
return u"SAMLConfiguration {site}: {slug} on {date:%Y-%m-%d %H:%M:%S}".format(
site=self.site.name,
slug=self.slug,
date=self.change_date,
)
def clean(self):
""" Standardize and validate fields """
super(SAMLConfiguration, self).clean()
self.org_info_str = clean_json(self.org_info_str, dict)
self.other_config_str = clean_json(self.other_config_str, dict)
self.private_key = (
self.private_key
.replace("-----BEGIN RSA PRIVATE KEY-----", "")
.replace("-----BEGIN PRIVATE KEY-----", "")
.replace("-----END RSA PRIVATE KEY-----", "")
.replace("-----END PRIVATE KEY-----", "")
.strip()
)
self.public_key = (
self.public_key
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.strip()
)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
default_saml_contact = {
# Default contact information to put into the SAML metadata that gets generated by python-saml.
"givenName": _(u"{platform_name} Support").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
),
"emailAddress": configuration_helpers.get_value('TECH_SUPPORT_EMAIL', settings.TECH_SUPPORT_EMAIL),
}
if name == "ORG_INFO":
return json.loads(self.org_info_str)
if name == "SP_ENTITY_ID":
return self.entity_id
if name == "SP_PUBLIC_CERT":
if self.public_key:
return self.public_key
# To allow instances to avoid storing keys in the DB, the key pair can also be set via Django:
if self.slug == 'default':
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', '')
else:
public_certs = getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT', {})
return public_certs.get(self.slug, '')
if name == "SP_PRIVATE_KEY":
if self.private_key:
return self.private_key
# To allow instances to avoid storing keys in the DB, the private key can also be set via Django:
if self.slug == 'default':
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', '')
else:
private_keys = getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT', {})
return private_keys.get(self.slug, '')
other_config = {
# These defaults can be overriden by self.other_config_str
"GET_ALL_EXTRA_DATA": True, # Save all attribute values the IdP sends into the UserSocialAuth table
"TECHNICAL_CONTACT": default_saml_contact,
"SUPPORT_CONTACT": default_saml_contact,
}
other_config.update(json.loads(self.other_config_str))
return other_config[name] # SECURITY_CONFIG, SP_EXTRA, or similar extra settings
def active_saml_configurations_filter():
"""
Returns a mapping to be used for the SAMLProviderConfig to limit the SAMLConfiguration choices to the current set.
"""
query_set = SAMLConfiguration.objects.current_set()
return {'id__in': query_set.values_list('id', flat=True)}
class SAMLProviderConfig(ProviderConfig):
"""
Configuration Entry for a SAML/Shibboleth provider.
.. no_pii:
"""
prefix = 'saml'
backend_name = models.CharField(
max_length=50, default=u'tpa-saml', blank=False,
help_text=u"Which python-social-auth provider backend to use. 'tpa-saml' is the standard edX SAML backend.")
entity_id = models.CharField(
max_length=255, verbose_name=u"Entity ID", help_text=u"Example: https://idp.testshib.org/idp/shibboleth")
metadata_source = models.CharField(
max_length=255,
help_text=(
u"URL to this provider's XML metadata. Should be an HTTPS URL. "
"Example: https://www.testshib.org/metadata/testshib-providers.xml"
))
attr_user_permanent_id = models.CharField(
max_length=128, blank=True, verbose_name=u"User ID Attribute",
help_text=(
u"URN of the SAML attribute that we can use as a unique, "
"persistent user ID. Leave blank for default."
))
attr_full_name = models.CharField(
max_length=128, blank=True, verbose_name=u"Full Name Attribute",
help_text=u"URN of SAML attribute containing the user's full name. Leave blank for default.")
default_full_name = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Full Name",
help_text=u"Default value for full name to be used if not present in SAML response.")
attr_first_name = models.CharField(
max_length=128, blank=True, verbose_name=u"First Name Attribute",
help_text=u"URN of SAML attribute containing the user's first name. Leave blank for default.")
default_first_name = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for First Name",
help_text=u"Default value for first name to be used if not present in SAML response.")
attr_last_name = models.CharField(
max_length=128, blank=True, verbose_name=u"Last Name Attribute",
help_text=u"URN of SAML attribute containing the user's last name. Leave blank for default.")
default_last_name = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Last Name",
help_text=u"Default value for last name to be used if not present in SAML response.")
attr_username = models.CharField(
max_length=128, blank=True, verbose_name=u"Username Hint Attribute",
help_text=u"URN of SAML attribute to use as a suggested username for this user. Leave blank for default.")
default_username = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Username",
help_text=u"Default value for username to be used if not present in SAML response.")
attr_email = models.CharField(
max_length=128, blank=True, verbose_name=u"Email Attribute",
help_text=u"URN of SAML attribute containing the user's email address[es]. Leave blank for default.")
default_email = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Email",
help_text=u"Default value for email to be used if not present in SAML response.")
automatic_refresh_enabled = models.BooleanField(
default=True, verbose_name=u"Enable automatic metadata refresh",
help_text=u"When checked, the SAML provider's metadata will be included "
"in the automatic refresh job, if configured."
)
identity_provider_type = models.CharField(
max_length=128, blank=False, verbose_name=u"Identity Provider Type", default=STANDARD_SAML_PROVIDER_KEY,
choices=get_saml_idp_choices(), help_text=(
u"Some SAML providers require special behavior. For example, SAP SuccessFactors SAML providers require an "
"additional API call to retrieve user metadata not provided in the SAML response. Select the provider type "
"which best matches your use case. If in doubt, choose the Standard SAML Provider type."
)
)
debug_mode = models.BooleanField(
default=False, verbose_name=u"Debug Mode",
help_text=(
u"In debug mode, all SAML XML requests and responses will be logged. "
"This is helpful for testing/setup but should always be disabled before users start using this provider."
),
)
other_settings = models.TextField(
verbose_name=u"Advanced settings", blank=True,
help_text=(
u'For advanced use cases, enter a JSON object with addtional configuration. '
'The tpa-saml backend supports {"requiredEntitlements": ["urn:..."]}, '
'which can be used to require the presence of a specific eduPersonEntitlement, '
'and {"extra_field_definitions": [{"name": "...", "urn": "..."},...]}, which can be '
'used to define registration form fields and the URNs that can be used to retrieve '
'the relevant values from the SAML response. Custom provider types, as selected '
'in the "Identity Provider Type" field, may make use of the information stored '
'in this field for additional configuration.'
))
archived = models.BooleanField(default=False)
saml_configuration = models.ForeignKey(
SAMLConfiguration,
on_delete=models.SET_NULL,
limit_choices_to=active_saml_configurations_filter,
null=True,
blank=True,
)
def clean(self):
""" Standardize and validate fields """
super(SAMLProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"Provider Configuration (SAML IdP)"
verbose_name_plural = "Provider Configuration (SAML IdPs)"
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {'idp': self.slug}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend'] and self.slug == pipeline['kwargs']['response']['idp_name']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.slug + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.slug) + 1:]
def get_social_auth_uid(self, remote_id):
""" Get social auth uid from remote id by prepending idp_slug to the remote id """
return '{}:{}'.format(self.slug, remote_id)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if self.other_settings:
other_settings = json.loads(self.other_settings)
return other_settings[name]
raise KeyError
def get_config(self):
"""
Return a SAMLIdentityProvider instance for use by SAMLAuthBackend.
Essentially this just returns the values of this object and its
associated 'SAMLProviderData' entry.
"""
if self.other_settings:
conf = json.loads(self.other_settings)
else:
conf = {}
attrs = (
'attr_user_permanent_id', 'attr_full_name', 'attr_first_name',
'attr_last_name', 'attr_username', 'attr_email', 'entity_id')
attr_defaults = {
'attr_full_name': 'default_full_name',
'attr_first_name': 'default_first_name',
'attr_last_name': 'default_last_name',
'attr_username': 'default_username',
'attr_email': 'default_email',
}
# Defaults for missing attributes in SAML Response
conf['attr_defaults'] = {}
for field in attrs:
field_name = attr_defaults.get(field)
val = getattr(self, field)
if val:
conf[field] = val
# Default values for SAML attributes
default = getattr(self, field_name) if field_name else None
conf['attr_defaults'][field] = default
# Now get the data fetched automatically from the metadata.xml:
data = SAMLProviderData.current(self.entity_id)
if not data or not data.is_valid():
log.error(
'No SAMLProviderData found for provider "%s" with entity id "%s" and IdP slug "%s". '
'Run "manage.py saml pull" to fix or debug.',
self.name, self.entity_id, self.slug
)
raise AuthNotConfigured(provider_name=self.name)
conf['x509cert'] = data.public_key
conf['url'] = data.sso_url
# Add SAMLConfiguration appropriate for this IdP
conf['saml_sp_configuration'] = (
self.saml_configuration or
SAMLConfiguration.current(self.site.id, 'default')
)
idp_class = get_saml_idp_class(self.identity_provider_type)
return idp_class(self.slug, **conf)
class SAMLProviderData(models.Model):
"""
Data about a SAML IdP that is fetched automatically by 'manage.py saml pull'
This data is only required during the actual authentication process.
.. no_pii:
"""
cache_timeout = 600
fetched_at = models.DateTimeField(db_index=True, null=False)
expires_at = models.DateTimeField(db_index=True, null=True)
entity_id = models.CharField(max_length=255, db_index=True) # This is the key for lookups in this table
sso_url = models.URLField(verbose_name=u"SSO URL")
public_key = models.TextField()
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"SAML Provider Data"
verbose_name_plural = verbose_name
ordering = ('-fetched_at', )
def is_valid(self):
""" Is this data valid? """
if self.expires_at and timezone.now() > self.expires_at:
return False
return bool(self.entity_id and self.sso_url and self.public_key)
is_valid.boolean = True
@classmethod
def cache_key_name(cls, entity_id):
""" Return the name of the key to use to cache the current data """
return 'configuration/{}/current/{}'.format(cls.__name__, entity_id)
@classmethod
def current(cls, entity_id):
"""
Return the active data entry, if any, otherwise None
"""
cached = cache.get(cls.cache_key_name(entity_id))
if cached is not None:
return cached
try:
current = cls.objects.filter(entity_id=entity_id).order_by('-fetched_at')[0]
except IndexError:
current = None
cache.set(cls.cache_key_name(entity_id), current, cls.cache_timeout)
return current
class LTIProviderConfig(ProviderConfig):
"""
Configuration required for this edX instance to act as a LTI
Tool Provider and allow users to authenticate and be enrolled in a
course via third party LTI Tool Consumers.
.. no_pii:
"""
prefix = 'lti'
backend_name = 'lti'
# This provider is not visible to users
icon_class = None
icon_image = None
secondary = False
# LTI login cannot be initiated by the tool provider
accepts_logins = False
KEY_FIELDS = ('lti_consumer_key', )
lti_consumer_key = models.CharField(
max_length=255,
help_text=(
u'The name that the LTI Tool Consumer will use to identify itself'
)
)
lti_hostname = models.CharField(
default=u'localhost',
max_length=255,
help_text=(
u'The domain that will be acting as the LTI consumer.'
),
db_index=True
)
lti_consumer_secret = models.CharField(
default=create_hash256,
max_length=255,
help_text=(
u'The shared secret that the LTI Tool Consumer will use to '
'authenticate requests. Only this edX instance and this '
'tool consumer instance should know this value. '
'For increased security, you can avoid storing this in '
'your database by leaving this field blank and setting '
'SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} '
'in your instance\'s Django setttigs (or lms.auth.json)'
),
blank=True,
)
lti_max_timestamp_age = models.IntegerField(
default=10,
help_text=(
u'The maximum age of oauth_timestamp values, in seconds.'
)
)
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.lti_consumer_key + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.lti_consumer_key) + 1:]
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
try:
return (
self.backend_name == pipeline['backend'] and
self.lti_consumer_key == pipeline['kwargs']['response'][LTI_PARAMS_KEY]['oauth_consumer_key']
)
except KeyError:
return False
def get_lti_consumer_secret(self):
""" If the LTI consumer secret is not stored in the database, check Django settings instead """
if self.lti_consumer_secret:
return self.lti_consumer_secret
return getattr(settings, 'SOCIAL_AUTH_LTI_CONSUMER_SECRETS', {}).get(self.lti_consumer_key, '')
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"Provider Configuration (LTI)"
verbose_name_plural = verbose_name
| appsembler/edx-platform | common/djangoapps/third_party_auth/models.py | Python | agpl-3.0 | 37,862 | [
"VisIt"
] | a6d5307072333f2c8b280cf09fe4c662eff5352a638e22f6355c2f19ac3c5a98 |
# SVI for a mixture of 3 Gaussians in 2d
# https://github.com/brendanhasz/svi-gaussian-mixture-model/blob/master/BayesianGaussianMixtureModel.ipynb
import superimport
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
import svi_gmm_model_tfp as GMM
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "../figures"
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
# Random seed
np.random.seed(12345)
tf.random.set_seed(12345)
# Generate some data
N = 3000
X = np.random.randn(N, 2).astype('float32')
X[:1000, :] += [2, 0]
X[1000:2000, :] -= [2, 4]
X[2000:, :] += [-2, 4]
# Plot the data
plt.figure()
plt.plot(X[:, 0], X[:, 1], '.')
plt.axis('equal')
save_fig('svi_gmm_2d_data.pdf')
plt.show()
# Make a TensorFlow Dataset from that data
batch_size = 500
dataset = tf.data.Dataset.from_tensor_slices(
(X)).shuffle(10000).batch(batch_size)
# A GMM with 3 components in 2 dimensions
model = GMM.GaussianMixtureModel(3, 2)
nepochs = 1000
model.fit(dataset, N, nepochs)
# Compute log likelihood at each point on a grid
Np = 100 #number of grid points
Xp, Yp = np.meshgrid(np.linspace(-6, 6, Np), np.linspace(-6, 6, Np))
Pp = np.column_stack([Xp.flatten(), Yp.flatten()]).astype('float32')
Z, _ = model(Pp, sampling=False)
Z = np.reshape(Z, (Np, Np))
# Show the fit mixture density
plt.figure()
plt.imshow(np.exp(Z),
extent=(-6, 6, -6, 6),
origin='lower')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Likelihood')
save_fig('svi_gmm_2d_fit.pdf')
plt.show()
# Sample from the mean variational posterior
means = tfd.Normal(model.locs, model.scales).sample(10000)
# Plot the mean samples for a single
plt.figure()
sns.kdeplot(means[:, 0, 0].numpy(),
means[:, 0, 1].numpy(),
n_levels=10)
save_fig('svi_gmm_2d_post_mean_comp0.pdf')
plt.show()
| probml/pyprobml | scripts/svi_gmm_demo_2d_tfp.py | Python | mit | 1,943 | [
"Gaussian"
] | 7975572afd49fd8e50fbe54e537396f352117ee509ac454894cf0e0c4ca17968 |
#!/usr/bin/env python
"""
@package coverage_model.persistence_helpers
@file coverage_model/persistence_helpers.py
@author Christopher Mueller
@brief Helper functions and classes for the PersistenceLayer
"""
from pyon.core.interceptor.encode import encode_ion, decode_ion
from ooi.logging import log
from coverage_model.basic_types import Dictable
from coverage_model import utils
from coverage_model.metadata import MetadataManager
from coverage_model.data_span import SpanStats, SpanStatsCollection
from coverage_model.address import BrickAddress
import os
import h5py
from coverage_model.hdf_utils import HDFLockingFile
import msgpack
import numpy as np
def pack(payload):
return msgpack.packb(payload, default=encode_ion).replace('\x01','\x01\x02').replace('\x00','\x01\x01')
def unpack(msg):
return msgpack.unpackb(msg.replace('\x01\x01','\x00').replace('\x01\x02','\x01'), object_hook=decode_ion)
def get_coverage_type(path):
ctype = 'simplex'
if os.path.exists(path):
with HDFLockingFile(path) as f:
if 'coverage_type' in f.attrs:
ctype = unpack(f.attrs['coverage_type'][0])
return ctype
class RTreeItem(object):
def __init__(self, item_id, obj):
self.id = item_id
self.object = obj
# Proxy the properties.dimension property, pia...
class HoldDim(object):
def __init__(self, dim=2):
self.dimension = dim
class RTreeProxy(object):
def __init__(self):
self._spans = []
self.properties = HoldDim()
def insert(self, count, extents, obj=None):
# The extents from the old rtree impl are [xmin,ymin,xmax,ymax]
minval = extents[0]
maxval = extents[2]
from coverage_model.basic_types import Span
span = Span(minval, maxval, value=obj)
if span not in self._spans:
self._spans.append(span)
def intersection(self, coords, objects=True):
minval = coords[0]
maxval = coords[2]
si = 0
ei = len(self._spans)
for i, s in enumerate(self._spans):
if minval in s:
si = i
break
for i, s in enumerate(self._spans):
if maxval in s:
ei = i+1
break
ret = []
for i, s in enumerate(self._spans[si:ei]):
ret.append(RTreeItem(si+i, s.value))
return ret
@property
def bounds(self):
lb = float(self._spans[0].lower_bound) if len(self._spans) > 0 else 0.0
ub = float(self._spans[0].upper_bound) if len(self._spans) > 0 else 0.0
return [lb, 0.0, ub, 0.0]
def serialize(self):
out = ''
if len(self._spans) > 0:
out = self.__class__.__name__
for span in self._spans:
tup_str = ''
for i in span.tuplize(with_value=True):
tup_str = '%(orig)s_%(val)s' % {'orig': tup_str, 'val': i}
out = '%(orig)s%(sep)s%(new)s' % {'orig': out, 'sep': '::span::', 'new': tup_str}
return out
@classmethod
def deserialize(cls, src_str):
if isinstance(src_str, basestring) and src_str.startswith(cls.__name__):
tmp = src_str.strip(cls.__name__)
rtp = RTreeProxy()
for span_tpl in tmp.split('::span::'):
if span_tpl == '':
continue
a, b, c, d, e = span_tpl.split('_')
span_tpl = (int(b), int(c), int(d), e)
from coverage_model.basic_types import Span
span = Span.from_iterable(span_tpl)
rtp._spans.append(span)
return rtp
raise TypeError('Improper formatting for RTreeProxy deserialization: %s' % src_str)
def __eq__(self, other):
return self.serialize() == other.serialize()
def __ne__(self, other):
return not self.__eq__(other)
class BaseManager(MetadataManager):
@staticmethod
def dirExists(directory):
os.path.exists(directory)
@staticmethod
def isPersisted(directory, guid):
if os.path.exists(directory):
file_path = os.path.join(directory, guid)
if os.path.exists(file_path):
return True
return False
def storage_type(self):
return 'hdf'
@staticmethod
def getCoverageType(directory, guid):
return get_coverage_type(os.path.join(directory, guid, '{0}_master.hdf5'.format(guid)))
def __init__(self, root_dir, file_name, **kwargs):
MetadataManager.__init__(self, **kwargs)
self.root_dir = root_dir
self.file_path = os.path.join(root_dir, file_name)
if not os.path.exists(self.root_dir):
os.makedirs(self.root_dir)
if os.path.exists(self.file_path):
self._load()
for k, v in kwargs.iteritems():
# Don't overwrite with None
if hasattr(self, k) and v is None:
continue
setattr(self, k, v)
def flush(self):
if self.is_dirty(True):
try:
with HDFLockingFile(self.file_path, 'a') as f:
for k in list(self._dirty):
v = getattr(self, k)
# log.debug('FLUSH: key=%s v=%s', k, v)
if isinstance(v, Dictable):
prefix='DICTABLE|{0}:{1}|'.format(v.__module__, v.__class__.__name__)
value = prefix + pack(v.dump())
else:
value = pack(v)
f.attrs[k] = np.array([value])
# Update the hash_value in _hmap
self._hmap[k] = utils.hash_any(v)
# Remove the key from the _dirty set
self._dirty.remove(k)
except IOError, ex:
if "unable to create file (File accessability: Unable to open file)" in ex.message:
log.info('Issue writing to hdf file during master_manager.flush - this is not likely a huge problem: %s', ex.message)
else:
raise
super(BaseManager, self).__setattr__('_is_dirty',False)
def _load(self):
raise NotImplementedError('Not implemented by base class')
def _base_load(self, f):
for key, val in f.attrs.iteritems():
val = val[0]
if isinstance(val, basestring) and val.startswith('DICTABLE'):
i = val.index('|', 9)
smod, sclass = val[9:i].split(':')
value = unpack(val[i+1:])
module = __import__(smod, fromlist=[sclass])
classobj = getattr(module, sclass)
value = classobj._fromdict(value)
elif key in ('root_dir', 'file_path'):
# No op - set in constructor
continue
else:
value = unpack(val)
if isinstance(value, tuple):
value = list(value)
setattr(self, key, value)
def is_dirty(self, force_deep=False):
"""
Tells if the object has attributes that have changed since the last flush
@return: True if the BaseMananager object is dirty and should be flushed
"""
if not force_deep and self._is_dirty: # Something new was set, easy-peasy
return True
else: # Nothing new has been set, need to check hashes
self._dirty.difference_update(self._ignore) # Ensure any ignored attrs are gone...
for k, v in [(k,v) for k, v in self.__dict__.iteritems() if not k in self._ignore and not k.startswith('_')]:
chv = utils.hash_any(v)
# log.trace('key=%s: cached hash value=%s current hash value=%s', k, self._hmap[k], chv)
if self._hmap[k] != chv:
self._dirty.add(k)
return len(self._dirty) != 0
def __setattr__(self, key, value):
super(BaseManager, self).__setattr__(key, value)
if not key in self._ignore and not key.startswith('_'):
self._hmap[key] = utils.hash_any(value)
self._dirty.add(key)
super(BaseManager, self).__setattr__('_is_dirty',True)
class MasterManager(BaseManager):
def __init__(self, root_dir, guid, **kwargs):
BaseManager.__init__(self, root_dir=os.path.join(root_dir,guid), file_name='{0}_master.hdf5'.format(guid), **kwargs)
self.guid = guid
if hasattr(self, 'parameter_bounds') and self.parameter_bounds is None:
self.parameter_bounds = {}
# Add attributes that should NEVER be flushed
self._ignore.update(['param_groups', 'guid', 'file_path', 'root_dir', 'brick_tree'])
if not hasattr(self, 'param_groups'):
self.param_groups = set()
def update_rtree(self, count, extents, obj):
log.debug('MM count: {0}'.format(count))
if not hasattr(self, 'brick_tree'):
raise AttributeError('Cannot update rtree; object does not have a \'brick_tree\' attribute!!')
log.debug('self.file_path: {0}'.format(self.file_path))
with HDFLockingFile(self.file_path, 'a') as f:
rtree_ds = f.require_dataset('rtree', shape=(count,), dtype=h5py.special_dtype(vlen=str), maxshape=(None,))
rtree_ds.resize((count+1,))
rtree_ds[count] = pack((extents, obj))
self.brick_tree.insert(count, extents, obj=obj)
def _init_rtree(self, bD):
self.brick_tree = RTreeProxy()
def _load(self):
with HDFLockingFile(self.file_path, 'r') as f:
self._base_load(f)
self.param_groups = set()
f.visit(self.param_groups.add)
# TODO: Use valid parameter list to compare against inspected param_groups and discard all that are invalid
self.param_groups.discard('rtree')
# Don't forget brick_tree!
if 'rtree' in f.keys():
# Populate brick tree from the 'rtree' dataset
ds = f['/rtree']
def tree_loader(darr):
for i, x in enumerate(darr):
ext, obj = unpack(x)
yield (i, ext, obj)
rtp = RTreeProxy()
for x in tree_loader(ds[:]):
rtp.insert(*x)
setattr(self, 'brick_tree', rtp)
else:
setattr(self, 'brick_tree', RTreeProxy())
def add_external_link(self, link_path, rel_ext_path, link_name):
with HDFLockingFile(self.file_path, 'r+') as f:
f[link_path] = h5py.ExternalLink(rel_ext_path, link_name)
def create_group(self, group_path):
with HDFLockingFile(self.file_path, 'r+') as f:
f.create_group(group_path)
class ParameterManager(BaseManager):
def __init__(self, root_dir, parameter_name, read_only=True, **kwargs):
BaseManager.__init__(self, root_dir=root_dir, file_name='{0}.hdf5'.format(parameter_name), **kwargs)
self.parameter_name = parameter_name
self.read_only = read_only
# Add attributes that should NEVER be flushed
self._ignore.update(['brick_tree', 'file_path', 'root_dir', 'read_only'])
def thin_origins(self, origins):
pass
def flush(self):
if not self.read_only:
super(ParameterManager, self).flush()
def _load(self):
with HDFLockingFile(self.file_path, 'r') as f:
self._base_load(f)
| ooici/coverage-model | coverage_model/persistence_helpers.py | Python | bsd-2-clause | 11,604 | [
"VisIt"
] | c3f993cb402434f85b811b32a2eeefdcd383b564b847e4d21b142bf8358a7535 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, \
HasAggregationDepth, HasWeightCol, HasTol, HasProbabilityCol, HasDistanceMeasure, \
HasCheckpointInterval, Param, Params, TypeConverters
from pyspark.ml.util import JavaMLWritable, JavaMLReadable, GeneralJavaMLWritable, \
HasTrainingSummary, SparkContext
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.common import inherit_doc, _java2py
from pyspark.ml.stat import MultivariateGaussian
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel', 'KMeansSummary',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
@inherit_doc
class _GaussianMixtureParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasProbabilityCol, HasTol, HasAggregationDepth, HasWeightCol):
"""
Params for :py:class:`GaussianMixture` and :py:class:`GaussianMixtureModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_GaussianMixtureParams, self).__init__(*args)
self._setDefault(k=2, tol=0.01, maxIter=100, aggregationDepth=2)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureModel(JavaModel, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("3.0.0")
def gaussians(self):
"""
Array of :py:class:`MultivariateGaussian` where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i
"""
sc = SparkContext._active_spark_context
jgaussians = self._java_obj.gaussians()
return [
MultivariateGaussian(_java2py(sc, jgaussian.mean()), _java2py(sc, jgaussian.cov()))
for jgaussian in jgaussians]
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@since("3.0.0")
def predictProbability(self, value):
"""
Predict probability for the given features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class GaussianMixture(JavaEstimator, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. versionadded:: 2.0.0
Notes
-----
For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> gm.getMaxIter()
100
>>> gm.setMaxIter(30)
GaussianMixture...
>>> gm.getMaxIter()
30
>>> model = gm.fit(df)
>>> model.getAggregationDepth()
2
>>> model.getFeaturesCol()
'features'
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
>>> model.predict(df.head().features)
2
>>> model.predictProbability(df.head().features)
DenseVector([0.0, 0.0, 1.0])
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> weights = model.weights
>>> len(weights)
3
>>> gaussians = model.gaussians
>>> len(gaussians)
3
>>> gaussians[0].mean
DenseVector([0.825, 0.8675])
>>> gaussians[0].cov
DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], 0)
>>> gaussians[1].mean
DenseVector([-0.87, -0.72])
>>> gaussians[1].cov
DenseMatrix(2, 2, [0.0016, 0.0016, 0.0016, 0.0016], 0)
>>> gaussians[2].mean
DenseVector([-0.055, -0.075])
>>> gaussians[2].cov
DenseMatrix(2, 2, [0.002, -0.0011, -0.0011, 0.0006], 0)
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[4].newPrediction == rows[5].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussians[0].mean == model.gaussians[0].mean
True
>>> model2.gaussians[0].cov == model.gaussians[0].cov
True
>>> model2.gaussians[1].mean == model.gaussians[1].mean
True
>>> model2.gaussians[1].cov == model.gaussians[1].cov
True
>>> model2.gaussians[2].mean == model.gaussians[2].mean
True
>>> model2.gaussians[2].cov == model.gaussians[2].cov
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
>>> gm2.setWeightCol("weight")
GaussianMixture...
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GaussianMixtureSummary(ClusteringSummary):
"""
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _KMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, HasTol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`KMeans` and :py:class:`KMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_KMeansParams, self).__init__(*args)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class KMeansModel(JavaModel, _KMeansParams, GeneralJavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class KMeans(JavaEstimator, _KMeansParams, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> kmeans = KMeans(k=2)
>>> kmeans.setSeed(1)
KMeans...
>>> kmeans.setWeightCol("weighCol")
KMeans...
>>> kmeans.setMaxIter(10)
KMeans...
>>> kmeans.getMaxIter()
10
>>> kmeans.clear(kmeans.maxIter)
>>> model = kmeans.fit(df)
>>> model.getDistanceMeasure()
'euclidean'
>>> model.setPredictionCol("newPrediction")
KMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.0
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("1.5.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.5.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.5.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.5.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class _BisectingKMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`BisectingKMeans` and :py:class:`BisectingKMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_BisectingKMeansParams, self).__init__(*args)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
class BisectingKMeansModel(JavaModel, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
.. deprecated:: 3.0.0
It will be removed in future versions. Use :py:class:`ClusteringEvaluator` instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 3.0.0. It will be removed in future versions. Use "
"ClusteringEvaluator instead. You can also get the cost on the training "
"dataset in the summary.", FutureWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class BisectingKMeans(JavaEstimator, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> bkm.setMaxIter(10)
BisectingKMeans...
>>> bkm.getMaxIter()
10
>>> bkm.clear(bkm.maxIter)
>>> bkm.setSeed(1)
BisectingKMeans...
>>> bkm.setWeightCol("weighCol")
BisectingKMeans...
>>> bkm.getSeed()
1
>>> bkm.clear(bkm.seed)
>>> model = bkm.fit(df)
>>> model.getMaxIter()
20
>>> model.setPredictionCol("newPrediction")
BisectingKMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.0
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.000...
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("3.0.0")
def trainingCost(self):
"""
Sum of squared distances to the nearest centroid for all points in the training dataset.
This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _LDAParams(HasMaxIter, HasFeaturesCol, HasSeed, HasCheckpointInterval):
"""
Params for :py:class:`LDA` and :py:class:`LDAModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
def __init__(self, *args):
super(_LDAParams, self).__init__(*args)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class LDAModel(JavaModel, _LDAParams):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
.. warning:: If this model is actually a :py:class:`DistributedLDAModel`
instance produced by the Expectation-Maximization ("em") `optimizer`,
then this method could involve collecting a large amount of data
to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
.. warning:: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes
-----
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. versionadded:: 2.0.0
Returns
-------
list
List of checkpoint files from training
Notes
-----
Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, _LDAParams, JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> lda.setMaxIter(10)
LDA...
>>> lda.getMaxIter()
10
>>> lda.clear(lda.maxIter)
>>> model = lda.fit(df)
>>> model.setSeed(1)
DistributedLDAModel...
>>> model.getTopicDistributionCol()
'topicDistribution'
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
>>> model.transform(df).take(1) == sameLocalModel.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
Examples
--------
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
Examples
--------
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
Examples
--------
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
Examples
--------
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
Examples
--------
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
Examples
--------
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
Examples
--------
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
Examples
--------
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
Examples
--------
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@inherit_doc
class _PowerIterationClusteringParams(HasMaxIter, HasWeightCol):
"""
Params for :py:class:`PowerIterationClustering`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_PowerIterationClusteringParams, self).__init__(*args)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@inherit_doc
class PowerIterationClustering(_PowerIterationClusteringParams, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
`Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the
abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. versionadded:: 2.4.0
Notes
-----
See `Wikipedia on Spectral clustering <http://en.wikipedia.org/wiki/Spectral_clustering>`_
Examples
--------
>>> data = [(1, 0, 0.5),
... (2, 0, 0.5), (2, 1, 0.7),
... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),
... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),
... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight").repartition(1)
>>> pic = PowerIterationClustering(k=2, weightCol="weight")
>>> pic.setMaxIter(40)
PowerIterationClustering...
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |0 |
|1 |0 |
|2 |0 |
|3 |0 |
|4 |0 |
|5 |1 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
>>> pic2.assignClusters(df).take(6) == assignments.take(6)
True
"""
@keyword_only
def __init__(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.4.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| maropu/spark | python/pyspark/ml/clustering.py | Python | apache-2.0 | 62,447 | [
"Gaussian"
] | 59ac943638e9741ed9c12ff73ad5fc0377e5382779617af627ff4a870caf9d0e |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import pytestrunner
if __name__ == '__main__':
pytestrunner()
| gregcaporaso/scikit-bio | skbio/test.py | Python | bsd-3-clause | 437 | [
"scikit-bio"
] | b4fe5423379981e33bafecf97e8f4fd3176852850a368de5a67e8dee24f59af2 |
# coding=utf8
#
# Copyright 2013 Dreamlab Onet.pl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 3.0.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, visit
#
# http://www.gnu.org/licenses/lgpl.txt
#
import rmock
from rmock.tools import find_random_port
import rmock.tools.net
from rmock.tools.net import find_port
from rmock.tools.net import RANDOM_PORT
from rmock.errors import RmockError
import mock
from nose.tools import assert_raises
from nose.tools import assert_equals
def test_find_random_port_ok():
port = find_random_port()
with rmock.run(port=port):
pass
def test_find_random_port_fail():
port = find_random_port()
with mock.patch("random.randint") as randint:
randint.return_value = port
with rmock.run(port=port):
assert_raises(RmockError, find_random_port)
def test_find_port():
assert_equals(find_port(10), 10)
port = find_random_port()
with mock.patch("random.randint") as randint:
randint.return_value = port
assert_equals(find_port(RANDOM_PORT), port)
assert_equals(find_port('random'), port)
assert_equals(find_port('RANDOM'), port)
| tikan/rmock | tests/unit_tests/test_random_port.py | Python | lgpl-3.0 | 1,677 | [
"VisIt"
] | 8d302cc3bf1ce52e3d051b87ce273cdc5e248bf8267c4da77e04501950508535 |
## \file
## \ingroup tutorial_roofit
## \notebook
## Addition and convolution: decay function pdfs with optional B physics effects (mixing
## and CP violation) that can be analytically convolved with e.g. Gaussian resolution functions
##
## ```
## pdf1 = decay(t,tau) (x) delta(t)
## pdf2 = decay(t,tau) (x) gauss(t,m,s)
## pdf3 = decay(t,tau) (x) (f*gauss1(t,m1,s1) + (1-f)*gauss2(t,m1,s1))
## ```
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# B-physics pdf with truth resolution
# ---------------------------------------------------------------------
# Variables of decay pdf
dt = ROOT.RooRealVar("dt", "dt", -10, 10)
tau = ROOT.RooRealVar("tau", "tau", 1.548)
# Build a truth resolution model (delta function)
tm = ROOT.RooTruthModel("tm", "truth model", dt)
# Construct decay(t) (x) delta(t)
decay_tm = ROOT.RooDecay("decay_tm", "decay", dt,
tau, tm, ROOT.RooDecay.DoubleSided)
# Plot pdf (dashed)
frame = dt.frame(ROOT.RooFit.Title("Bdecay (x) resolution"))
decay_tm.plotOn(frame, ROOT.RooFit.LineStyle(ROOT.kDashed))
# B-physics pdf with Gaussian resolution
# ----------------------------------------------------------------------------
# Build a gaussian resolution model
bias1 = ROOT.RooRealVar("bias1", "bias1", 0)
sigma1 = ROOT.RooRealVar("sigma1", "sigma1", 1)
gm1 = ROOT.RooGaussModel("gm1", "gauss model 1", dt, bias1, sigma1)
# Construct decay(t) (x) gauss1(t)
decay_gm1 = ROOT.RooDecay("decay_gm1", "decay",
dt, tau, gm1, ROOT.RooDecay.DoubleSided)
# Plot pdf
decay_gm1.plotOn(frame)
# B-physics pdf with double Gaussian resolution
# ------------------------------------------------------------------------------------------
# Build another gaussian resolution model
bias2 = ROOT.RooRealVar("bias2", "bias2", 0)
sigma2 = ROOT.RooRealVar("sigma2", "sigma2", 5)
gm2 = ROOT.RooGaussModel("gm2", "gauss model 2", dt, bias2, sigma2)
# Build a composite resolution model f*gm1+(1-f)*gm2
gm1frac = ROOT.RooRealVar("gm1frac", "fraction of gm1", 0.5)
gmsum = ROOT.RooAddModel(
"gmsum",
"sum of gm1 and gm2",
ROOT.RooArgList(
gm1,
gm2),
ROOT.RooArgList(gm1frac))
# Construct decay(t) (x) (f*gm1 + (1-f)*gm2)
decay_gmsum = ROOT.RooDecay(
"decay_gmsum", "decay", dt, tau, gmsum, ROOT.RooDecay.DoubleSided)
# Plot pdf (red)
decay_gmsum.plotOn(frame, ROOT.RooFit.LineColor(ROOT.kRed))
# Draw all frames on canvas
c = ROOT.TCanvas("rf209_anaconv", "rf209_anaconv", 600, 600)
ROOT.gPad.SetLeftMargin(0.15)
frame.GetYaxis().SetTitleOffset(1.6)
frame.Draw()
c.SaveAs("rf209_anaconv.png")
| root-mirror/root | tutorials/roofit/rf209_anaconv.py | Python | lgpl-2.1 | 2,658 | [
"Gaussian"
] | 736eb5bc72f7ae56bce9990bae77f0609a616113cb77e756e6dd37a52f3e956d |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = linalg_ops.cholesky
cholesky_solve = linalg_ops.cholesky_solve
det = linalg_ops.matrix_determinant
slogdet = gen_linalg_ops.log_matrix_determinant
tf_export('linalg.slogdet')(dispatch.add_dispatch_support(slogdet))
diag = array_ops.matrix_diag
diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
logm = gen_linalg_ops.matrix_logarithm
lu = gen_linalg_ops.lu
tf_export('linalg.logm')(dispatch.add_dispatch_support(logm))
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = linalg_ops.matrix_solve
sqrtm = linalg_ops.matrix_square_root
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = math_ops.trace
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
@tf_export('linalg.logdet')
@dispatch.add_dispatch_support
def logdet(matrix, name=None):
"""Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.linalg.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility
"""
# This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
axis=[-1])
@tf_export('linalg.adjoint')
@dispatch.add_dispatch_support
def adjoint(matrix, name=None):
"""Transposes the last two dimensions of and conjugates tensor `matrix`.
For example:
```python
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
matrix.
"""
with ops.name_scope(name, 'adjoint', [matrix]):
matrix = ops.convert_to_tensor(matrix, name='matrix')
return array_ops.matrix_transpose(matrix, conjugate=True)
# This section is ported nearly verbatim from Eigen's implementation:
# https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html
def _matrix_exp_pade3(matrix):
"""3rd-order Pade approximant for matrix exponential."""
b = [120.0, 60.0, 12.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
tmp = matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade5(matrix):
"""5th-order Pade approximant for matrix exponential."""
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade7(matrix):
"""7th-order Pade approximant for matrix exponential."""
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade9(matrix):
"""9th-order Pade approximant for matrix exponential."""
b = [
17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0,
2162160.0, 110880.0, 3960.0, 90.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
matrix_8 = math_ops.matmul(matrix_6, matrix_2)
tmp = (
matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 +
b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = (
b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 +
b[0] * ident)
return matrix_u, matrix_v
def _matrix_exp_pade13(matrix):
"""13th-order Pade approximant for matrix exponential."""
b = [
64764752532480000.0, 32382376266240000.0, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0,
33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp_u = (
math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) +
b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp_u)
tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2
matrix_v = (
math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 +
b[2] * matrix_2 + b[0] * ident)
return matrix_u, matrix_v
@tf_export('linalg.expm')
@dispatch.add_dispatch_support
def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin
r"""Computes the matrix exponential of one or more square matrices.
$$exp(A) = \sum_{n=0}^\infty A^n/n!$$
The exponential is computed using a combination of the scaling and squaring
method and the Pade approximation. Details can be found in:
Nicholas J. Higham, "The scaling and squaring method for the matrix
exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
form square matrices. The output is a tensor of the same shape as the input
containing the exponential for all input submatrices `[..., :, :]`.
Args:
input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or
`complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
the matrix exponential of the input.
Raises:
ValueError: An unsupported type is provided as input.
@compatibility(scipy)
Equivalent to scipy.linalg.expm
@end_compatibility
"""
with ops.name_scope(name, 'matrix_exponential', [input]):
matrix = ops.convert_to_tensor(input, name='input')
if matrix.shape[-2:] == [0, 0]:
return matrix
batch_shape = matrix.shape[:-2]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(matrix)[:-2]
# reshaping the batch makes the where statements work better
matrix = array_ops.reshape(
matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0))
l1_norm = math_ops.reduce_max(
math_ops.reduce_sum(
math_ops.abs(matrix),
axis=array_ops.size(array_ops.shape(matrix)) - 2),
axis=-1)[..., array_ops.newaxis, array_ops.newaxis]
const = lambda x: constant_op.constant(x, l1_norm.dtype)
def _nest_where(vals, cases):
assert len(vals) == len(cases) - 1
if len(vals) == 1:
return array_ops.where_v2(
math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1])
else:
return array_ops.where_v2(
math_ops.less(l1_norm, const(vals[0])), cases[0],
_nest_where(vals[1:], cases[1:]))
if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]:
maxnorm = const(3.925724783138660)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(
matrix /
math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
conds = (4.258730016922831e-001, 1.880152677804762e+000)
u = _nest_where(conds, (u3, u5, u7))
v = _nest_where(conds, (v3, v5, v7))
elif matrix.dtype in [dtypes.float64, dtypes.complex128]:
maxnorm = const(5.371920351148152)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(matrix)
u9, v9 = _matrix_exp_pade9(matrix)
u13, v13 = _matrix_exp_pade13(
matrix /
math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
conds = (1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000)
u = _nest_where(conds, (u3, u5, u7, u9, u13))
v = _nest_where(conds, (v3, v5, v7, v9, v13))
else:
raise ValueError('tf.linalg.expm does not support matrices of type %s' %
matrix.dtype)
is_finite = math_ops.is_finite(math_ops.reduce_max(l1_norm))
nan = constant_op.constant(np.nan, matrix.dtype)
result = control_flow_ops.cond(
is_finite, lambda: linalg_ops.matrix_solve(-u + v, u + v),
lambda: array_ops.fill(array_ops.shape(matrix), nan))
max_squarings = math_ops.reduce_max(squarings)
i = const(0.0)
def c(i, _):
return control_flow_ops.cond(is_finite,
lambda: math_ops.less(i, max_squarings),
lambda: constant_op.constant(False))
def b(i, r):
return i + 1, array_ops.where_v2(
math_ops.less(i, squarings), math_ops.matmul(r, r), r)
_, result = control_flow_ops.while_loop(c, b, [i, result])
if not matrix.shape.is_fully_defined():
return array_ops.reshape(
result,
array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0))
return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:]))
@tf_export('linalg.banded_triangular_solve', v1=[])
def banded_triangular_solve(
bands,
rhs,
lower=True,
adjoint=False, # pylint: disable=redefined-outer-name
name=None):
r"""Solve triangular systems of equations with a banded solver.
`bands` is a tensor of shape `[..., K, M]`, where `K` represents the number
of bands stored. This corresponds to a batch of `M` by `M` matrices, whose
`K` subdiagonals (when `lower` is `True`) are stored.
This operator broadcasts the batch dimensions of `bands` and the batch
dimensions of `rhs`.
Examples:
Storing 2 bands of a 3x3 matrix.
Note that first element in the second row is ignored due to
the 'LEFT_RIGHT' padding.
>>> x = [[2., 3., 4.], [1., 2., 3.]]
>>> x2 = [[2., 3., 4.], [10000., 2., 3.]]
>>> y = tf.zeros([3, 3])
>>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(-1, 0))
>>> z
<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[2., 0., 0.],
[2., 3., 0.],
[0., 3., 4.]], dtype=float32)>
>>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([3, 1]))
>>> soln
<tf.Tensor: shape=(3, 1), dtype=float32, numpy=
array([[0.5 ],
[0. ],
[0.25]], dtype=float32)>
>>> are_equal = soln == tf.linalg.banded_triangular_solve(x2, tf.ones([3, 1]))
>>> tf.reduce_all(are_equal).numpy()
True
>>> are_equal = soln == tf.linalg.triangular_solve(z, tf.ones([3, 1]))
>>> tf.reduce_all(are_equal).numpy()
True
Storing 2 superdiagonals of a 4x4 matrix. Because of the 'LEFT_RIGHT' padding
the last element of the first row is ignored.
>>> x = [[2., 3., 4., 5.], [-1., -2., -3., -4.]]
>>> y = tf.zeros([4, 4])
>>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(0, 1))
>>> z
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[-1., 2., 0., 0.],
[ 0., -2., 3., 0.],
[ 0., 0., -3., 4.],
[ 0., 0., -0., -4.]], dtype=float32)>
>>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([4, 1]), lower=False)
>>> soln
<tf.Tensor: shape=(4, 1), dtype=float32, numpy=
array([[-4. ],
[-1.5 ],
[-0.6666667],
[-0.25 ]], dtype=float32)>
>>> are_equal = (soln == tf.linalg.triangular_solve(
... z, tf.ones([4, 1]), lower=False))
>>> tf.reduce_all(are_equal).numpy()
True
Args:
bands: A `Tensor` describing the bands of the left hand side, with shape
`[..., K, M]`. The `K` rows correspond to the diagonal to the `K - 1`-th
diagonal (the diagonal is the top row) when `lower` is `True` and
otherwise the `K - 1`-th superdiagonal to the diagonal (the diagonal is
the bottom row) when `lower` is `False`. The bands are stored with
'LEFT_RIGHT' alignment, where the superdiagonals are padded on the right
and subdiagonals are padded on the left. This is the alignment cuSPARSE
uses. See `tf.linalg.set_diag` for more details.
rhs: A `Tensor` of shape [..., M] or [..., M, N] and with the same dtype as
`diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known
statically, `rhs` will be treated as a matrix rather than a vector.
lower: An optional `bool`. Defaults to `True`. Boolean indicating whether
`bands` represents a lower or upper triangular matrix.
adjoint: An optional `bool`. Defaults to `False`. Boolean indicating whether
to solve with the matrix's block-wise adjoint.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M] or [..., M, N] containing the solutions.
"""
with ops.name_scope(name, 'banded_triangular_solve', [bands, rhs]):
return gen_linalg_ops.banded_triangular_solve(
bands, rhs, lower=lower, adjoint=adjoint)
@tf_export('linalg.tridiagonal_solve')
@dispatch.add_dispatch_support
def tridiagonal_solve(diagonals,
rhs,
diagonals_format='compact',
transpose_rhs=False,
conjugate_rhs=False,
name=None,
partial_pivoting=True,
perturb_singular=False):
r"""Solves tridiagonal systems of equations.
The input can be supplied in various formats: `matrix`, `sequence` and
`compact`, specified by the `diagonals_format` arg.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
In `sequence` format, `diagonals` are supplied as a tuple or list of three
tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing
superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either
`M-1` or `M`; in the latter case, the last element of superdiagonal and the
first element of subdiagonal will be ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `compact` format is recommended as the one with best performance. In case
you need to cast a tensor into a compact format manually, use `tf.gather_nd`.
An example for a tensor of shape [m, m]:
```python
rhs = tf.constant([...])
matrix = tf.constant([[...]])
m = matrix.shape[0]
dummy_idx = [0, 0] # An arbitrary element to use as a dummy
indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal
[[i, i] for i in range(m)], # Diagonal
[dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal
diagonals=tf.gather_nd(matrix, indices)
x = tf.linalg.tridiagonal_solve(diagonals, rhs)
```
Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or
`[..., M, K]`. The latter allows to simultaneously solve K systems with the
same left-hand sides and K different right-hand sides. If `transpose_rhs`
is set to `True` the expected shape is `[..., M]` or `[..., K, M]`.
The batch dimensions, denoted as `...`, must be the same in `diagonals` and
`rhs`.
The output is a tensor of the same shape as `rhs`: either `[..., M]` or
`[..., M, K]`.
The op isn't guaranteed to raise an error if the input matrix is not
invertible. `tf.debugging.check_numerics` can be applied to the output to
detect invertibility problems.
**Note**: with large batch sizes, the computation on the GPU may be slow, if
either `partial_pivoting=True` or there are multiple right-hand sides
(`K > 1`). If this issue arises, consider if it's possible to disable pivoting
and have `K = 1`, or, alternatively, consider using CPU.
On CPU, solution is computed via Gaussian elimination with or without partial
pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE
library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as
`diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known
statically, `rhs` will be treated as a matrix rather than a vector.
diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is
`compact`.
transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect
if the shape of rhs is [..., M]).
conjugate_rhs: If `True`, `rhs` is conjugated before solving.
name: A name to give this `Op` (optional).
partial_pivoting: whether to perform partial pivoting. `True` by default.
Partial pivoting makes the procedure more stable, but slower. Partial
pivoting is unnecessary in some cases, including diagonally dominant and
symmetric positive definite matrices (see e.g. theorem 9.12 in [1]).
perturb_singular: whether to perturb singular matrices to return a finite
result. `False` by default. If true, solutions to systems involving
a singular matrix will be computed by perturbing near-zero pivots in
the partially pivoted LU decomposition. Specifically, tiny pivots are
perturbed by an amount of order `eps * max_{ij} |U(i,j)|` to avoid
overflow. Here `U` is the upper triangular part of the LU decomposition,
and `eps` is the machine precision. This is useful for solving
numerically singular systems when computing eigenvectors by inverse
iteration.
If `partial_pivoting` is `False`, `perturb_singular` must be `False` as
well.
Returns:
A `Tensor` of shape [..., M] or [..., M, K] containing the solutions.
If the input matrix is singular, the result is undefined.
Raises:
ValueError: Is raised if any of the following conditions hold:
1. An unsupported type is provided as input,
2. the input tensors have incorrect shapes,
3. `perturb_singular` is `True` but `partial_pivoting` is not.
UnimplementedError: Whenever `partial_pivoting` is true and the backend is
XLA, or whenever `perturb_singular` is true and the backend is
XLA or GPU.
[1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms:
Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7.
"""
if perturb_singular and not partial_pivoting:
raise ValueError('partial_pivoting must be True if perturb_singular is.')
if diagonals_format == 'compact':
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name)
if diagonals_format == 'sequence':
if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3:
raise ValueError('Expected diagonals to be a sequence of length 3.')
superdiag, maindiag, subdiag = diagonals
if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or
not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])):
raise ValueError(
'Tensors representing the three diagonals must have the same shape,'
'except for the last dimension, got {}, {}, {}'.format(
subdiag.shape, maindiag.shape, superdiag.shape))
m = tensor_shape.dimension_value(maindiag.shape[-1])
def pad_if_necessary(t, name, last_dim_padding):
n = tensor_shape.dimension_value(t.shape[-1])
if not n or n == m:
return t
if n == m - 1:
paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] +
[last_dim_padding])
return array_ops.pad(t, paddings)
raise ValueError('Expected {} to be have length {} or {}, got {}.'.format(
name, m, m - 1, n))
subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0])
superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1])
diagonals = array_ops.stack((superdiag, maindiag, subdiag), axis=-2)
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name)
if diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
m = m1 or m2
diagonals = array_ops.matrix_diag_part(
diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name)
raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format))
def _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name):
"""Helper function used after the input has been cast to compact form."""
diags_rank, rhs_rank = diagonals.shape.rank, rhs.shape.rank
# If we know the rank of the diagonal tensor, do some static checking.
if diags_rank:
if diags_rank < 2:
raise ValueError(
'Expected diagonals to have rank at least 2, got {}'.format(
diags_rank))
if rhs_rank and rhs_rank != diags_rank and rhs_rank != diags_rank - 1:
raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(
diags_rank - 1, diags_rank, rhs_rank))
if (rhs_rank and not diagonals.shape[:-2].is_compatible_with(
rhs.shape[:diags_rank - 2])):
raise ValueError('Batch shapes {} and {} are incompatible'.format(
diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))
if diagonals.shape[-2] and diagonals.shape[-2] != 3:
raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))
def check_num_lhs_matches_num_rhs():
if (diagonals.shape[-1] and rhs.shape[-2] and
diagonals.shape[-1] != rhs.shape[-2]):
raise ValueError('Expected number of left-hand sided and right-hand '
'sides to be equal, got {} and {}'.format(
diagonals.shape[-1], rhs.shape[-2]))
if rhs_rank and diags_rank and rhs_rank == diags_rank - 1:
# Rhs provided as a vector, ignoring transpose_rhs
if conjugate_rhs:
rhs = math_ops.conj(rhs)
rhs = array_ops.expand_dims(rhs, -1)
check_num_lhs_matches_num_rhs()
return array_ops.squeeze(
linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting,
perturb_singular, name), -1)
if transpose_rhs:
rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)
elif conjugate_rhs:
rhs = math_ops.conj(rhs)
check_num_lhs_matches_num_rhs()
return linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting,
perturb_singular, name)
@tf_export('linalg.tridiagonal_matmul')
@dispatch.add_dispatch_support
def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None):
r"""Multiplies tridiagonal matrix by matrix.
`diagonals` is representation of 3-diagonal NxN matrix, which depends on
`diagonals_format`.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
If `sequence` format, `diagonals` is list or tuple of three tensors:
`[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element
of `superdiag` first element of `subdiag` are ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `sequence` format is recommended as the one with the best performance.
`rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`.
Example:
```python
superdiag = tf.constant([-1, -1, 0], dtype=tf.float64)
maindiag = tf.constant([2, 2, 2], dtype=tf.float64)
subdiag = tf.constant([0, -1, -1], dtype=tf.float64)
diagonals = [superdiag, maindiag, subdiag]
rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64)
x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence')
```
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`.
diagonals_format: one of `sequence`, or `compact`. Default is `compact`.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M, N] containing the result of multiplication.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
"""
if diagonals_format == 'compact':
superdiag = diagonals[..., 0, :]
maindiag = diagonals[..., 1, :]
subdiag = diagonals[..., 2, :]
elif diagonals_format == 'sequence':
superdiag, maindiag, subdiag = diagonals
elif diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
diags = array_ops.matrix_diag_part(
diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
superdiag = diags[..., 0, :]
maindiag = diags[..., 1, :]
subdiag = diags[..., 2, :]
else:
raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format)
# C++ backend requires matrices.
# Converting 1-dimensional vectors to matrices with 1 row.
superdiag = array_ops.expand_dims(superdiag, -2)
maindiag = array_ops.expand_dims(maindiag, -2)
subdiag = array_ops.expand_dims(subdiag, -2)
return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name)
def _maybe_validate_matrix(a, validate_args):
"""Checks that input is a `float` matrix."""
assertions = []
if not a.dtype.is_floating:
raise TypeError('Input `a` must have `float`-like `dtype` '
'(saw {}).'.format(a.dtype.name))
if a.shape is not None and a.shape.rank is not None:
if a.shape.rank < 2:
raise ValueError('Input `a` must have at least 2 dimensions '
'(saw: {}).'.format(a.shape.rank))
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(
a, rank=2, message='Input `a` must have at least 2 dimensions.'))
return assertions
@tf_export('linalg.matrix_rank')
@dispatch.add_dispatch_support
def matrix_rank(a, tol=None, validate_args=False, name=None):
"""Compute the matrix rank of one or more matrices.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
tol: Threshold below which the singular value is counted as 'zero'.
Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'matrix_rank'.
Returns:
matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
singular values.
"""
with ops.name_scope(name or 'matrix_rank'):
a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
s = svd(a, compute_uv=False)
if tol is None:
if (a.shape[-2:]).is_fully_defined():
m = np.max(a.shape[-2:].as_list())
else:
m = math_ops.reduce_max(array_ops.shape(a)[-2:])
eps = np.finfo(a.dtype.as_numpy_dtype).eps
tol = (
eps * math_ops.cast(m, a.dtype) *
math_ops.reduce_max(s, axis=-1, keepdims=True))
return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)
@tf_export('linalg.pinv')
@dispatch.add_dispatch_support
def pinv(a, rcond=None, validate_args=False, name=None):
"""Compute the Moore-Penrose pseudo-inverse of one or more matrices.
Calculate the [generalized inverse of a matrix](
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its
singular-value decomposition (SVD) and including all large singular values.
The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves'
[the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then
`A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if
`U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then
`A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]
This function is analogous to [`numpy.linalg.pinv`](
https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).
It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the
default `rcond` is `1e-15`. Here the default is
`10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
rcond: `Tensor` of small singular value cutoffs. Singular values smaller
(in modulus) than `rcond` * largest_singular_value (again, in modulus) are
set to zero. Must broadcast against `tf.shape(a)[:-2]`.
Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'pinv'.
Returns:
a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except
rightmost two dimensions are transposed.
Raises:
TypeError: if input `a` does not have `float`-like `dtype`.
ValueError: if input `a` has fewer than 2 dimensions.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
a = tf.constant([[1., 0.4, 0.5],
[0.4, 0.2, 0.25],
[0.5, 0.25, 0.35]])
tf.matmul(tf.linalg.pinv(a), a)
# ==> array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
a = tf.constant([[1., 0.4, 0.5, 1.],
[0.4, 0.2, 0.25, 2.],
[0.5, 0.25, 0.35, 3.]])
tf.matmul(tf.linalg.pinv(a), a)
# ==> array([[ 0.76, 0.37, 0.21, -0.02],
[ 0.37, 0.43, -0.33, 0.02],
[ 0.21, -0.33, 0.81, 0.01],
[-0.02, 0.02, 0.01, 1. ]], dtype=float32)
```
#### References
[1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press,
Inc., 1980, pp. 139-142.
"""
with ops.name_scope(name or 'pinv'):
a = ops.convert_to_tensor(a, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
dtype = a.dtype.as_numpy_dtype
if rcond is None:
def get_dim_size(dim):
dim_val = tensor_shape.dimension_value(a.shape[dim])
if dim_val is not None:
return dim_val
return array_ops.shape(a)[dim]
num_rows = get_dim_size(-2)
num_cols = get_dim_size(-1)
if isinstance(num_rows, int) and isinstance(num_cols, int):
max_rows_cols = float(max(num_rows, num_cols))
else:
max_rows_cols = math_ops.cast(
math_ops.maximum(num_rows, num_cols), dtype)
rcond = 10. * max_rows_cols * np.finfo(dtype).eps
rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond')
# Calculate pseudo inverse via SVD.
# Note: if a is Hermitian then u == v. (We might observe additional
# performance by explicitly setting `v = u` in such cases.)
[
singular_values, # Sigma
left_singular_vectors, # U
right_singular_vectors, # V
] = svd(
a, full_matrices=False, compute_uv=True)
# Saturate small singular values to inf. This has the effect of make
# `1. / s = 0.` while not resulting in `NaN` gradients.
cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1)
singular_values = array_ops.where_v2(
singular_values > array_ops.expand_dims_v2(cutoff, -1), singular_values,
np.array(np.inf, dtype))
# By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse
# is defined as `pinv(a) == v @ inv(s) @ u^H`.
a_pinv = math_ops.matmul(
right_singular_vectors / array_ops.expand_dims_v2(singular_values, -2),
left_singular_vectors,
adjoint_b=True)
if a.shape is not None and a.shape.rank is not None:
a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]]))
return a_pinv
@tf_export('linalg.lu_solve')
@dispatch.add_dispatch_support
def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None):
"""Solves systems of linear eqns `A X = RHS`, given LU factorizations.
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
`A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[...,
tf.newaxis])[..., 0]`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_solve').
Returns:
x: The `X` in `A @ X = RHS`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[1., 2],
[3, 4]],
[[7, 8],
[3, 4]]]
inv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_solve'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
rhs = ops.convert_to_tensor(rhs, dtype_hint=lower_upper.dtype, name='rhs')
assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
rhs = array_ops.identity(rhs)
if (rhs.shape.rank == 2 and perm.shape.rank == 1):
# Both rhs and perm have scalar batch_shape.
permuted_rhs = array_ops.gather(rhs, perm, axis=-2)
else:
# Either rhs or perm have non-scalar batch_shape or we can't determine
# this information statically.
rhs_shape = array_ops.shape(rhs)
broadcast_batch_shape = array_ops.broadcast_dynamic_shape(
rhs_shape[:-2],
array_ops.shape(perm)[:-1])
d, m = rhs_shape[-2], rhs_shape[-1]
rhs_broadcast_shape = array_ops.concat([broadcast_batch_shape, [d, m]],
axis=0)
# Tile out rhs.
broadcast_rhs = array_ops.broadcast_to(rhs, rhs_broadcast_shape)
broadcast_rhs = array_ops.reshape(broadcast_rhs, [-1, d, m])
# Tile out perm and add batch indices.
broadcast_perm = array_ops.broadcast_to(perm, rhs_broadcast_shape[:-1])
broadcast_perm = array_ops.reshape(broadcast_perm, [-1, d])
broadcast_batch_size = math_ops.reduce_prod(broadcast_batch_shape)
broadcast_batch_indices = array_ops.broadcast_to(
math_ops.range(broadcast_batch_size)[:, array_ops.newaxis],
[broadcast_batch_size, d])
broadcast_perm = array_ops.stack(
[broadcast_batch_indices, broadcast_perm], axis=-1)
permuted_rhs = array_ops.gather_nd(broadcast_rhs, broadcast_perm)
permuted_rhs = array_ops.reshape(permuted_rhs, rhs_broadcast_shape)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(
array_ops.shape(lower_upper)[:-1], dtype=lower_upper.dtype))
return triangular_solve(
lower_upper, # Only upper is accessed.
triangular_solve(lower, permuted_rhs),
lower=False)
@tf_export('linalg.lu_matrix_inverse')
@dispatch.add_dispatch_support
def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):
"""Computes the inverse given the LU decomposition(s) of one or more matrices.
This op is conceptually identical to,
```python
inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X))
tf.assert_near(tf.matrix_inverse(X), inv_X)
# ==> True
```
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_matrix_inverse').
Returns:
inv_x: The matrix_inv, i.e.,
`tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
inv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_matrix_inverse'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
return lu_solve(
lower_upper,
perm,
rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype),
validate_args=False)
@tf_export('linalg.lu_reconstruct')
@dispatch.add_dispatch_support
def lu_reconstruct(lower_upper, perm, validate_args=False, name=None):
"""The reconstruct one or more matrices from their LU decomposition(s).
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_reconstruct').
Returns:
x: The original input to `tf.linalg.lu`, i.e., `x` as in,
`lu_reconstruct(*tf.linalg.lu(x))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x))
tf.assert_near(x, x_reconstructed)
# ==> True
```
"""
with ops.name_scope(name or 'lu_reconstruct'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(shape[:-1], dtype=lower_upper.dtype))
upper = band_part(lower_upper, num_lower=0, num_upper=-1)
x = math_ops.matmul(lower, upper)
if (lower_upper.shape is None or lower_upper.shape.rank is None or
lower_upper.shape.rank != 2):
# We either don't know the batch rank or there are >0 batch dims.
batch_size = math_ops.reduce_prod(shape[:-2])
d = shape[-1]
x = array_ops.reshape(x, [batch_size, d, d])
perm = array_ops.reshape(perm, [batch_size, d])
perm = map_fn.map_fn(array_ops.invert_permutation, perm)
batch_indices = array_ops.broadcast_to(
math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d])
x = array_ops.gather_nd(x, array_ops.stack([batch_indices, perm],
axis=-1))
x = array_ops.reshape(x, shape)
else:
x = array_ops.gather(x, array_ops.invert_permutation(perm))
x.set_shape(lower_upper.shape)
return x
def lu_reconstruct_assertions(lower_upper, perm, validate_args):
"""Returns list of assertions related to `lu_reconstruct` assumptions."""
assertions = []
message = 'Input `lower_upper` must have at least 2 dimensions.'
if lower_upper.shape.rank is not None and lower_upper.shape.rank < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least_v2(lower_upper, rank=2, message=message))
message = '`rank(lower_upper)` must equal `rank(perm) + 1`'
if lower_upper.shape.rank is not None and perm.shape.rank is not None:
if lower_upper.shape.rank != perm.shape.rank + 1:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank(
lower_upper, rank=array_ops.rank(perm) + 1, message=message))
message = '`lower_upper` must be square.'
if lower_upper.shape[:-2].is_fully_defined():
if lower_upper.shape[-2] != lower_upper.shape[-1]:
raise ValueError(message)
elif validate_args:
m, n = array_ops.split(
array_ops.shape(lower_upper)[-2:], num_or_size_splits=2)
assertions.append(check_ops.assert_equal(m, n, message=message))
return assertions
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
"""Returns list of assertions related to `lu_solve` assumptions."""
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
message = 'Input `rhs` must have at least 2 dimensions.'
if rhs.shape.ndims is not None:
if rhs.shape.ndims < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(rhs, rank=2, message=message))
message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
if (lower_upper.shape[-1] is not None and rhs.shape[-2] is not None):
if lower_upper.shape[-1] != rhs.shape[-2]:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_equal(
array_ops.shape(lower_upper)[-1],
array_ops.shape(rhs)[-2],
message=message))
return assertions
@tf_export('linalg.eigh_tridiagonal')
@dispatch.add_dispatch_support
def eigh_tridiagonal(alpha,
beta,
eigvals_only=True,
select='a',
select_range=None,
tol=None,
name=None):
"""Computes the eigenvalues of a Hermitian tridiagonal matrix.
Args:
alpha: A real or complex tensor of shape (n), the diagonal elements of the
matrix. NOTE: If alpha is complex, the imaginary part is ignored (assumed
zero) to satisfy the requirement that the matrix be Hermitian.
beta: A real or complex tensor of shape (n-1), containing the elements of
the first super-diagonal of the matrix. If beta is complex, the first
sub-diagonal of the matrix is assumed to be the conjugate of beta to
satisfy the requirement that the matrix be Hermitian
eigvals_only: If False, both eigenvalues and corresponding eigenvectors are
computed. If True, only eigenvalues are computed. Default is True.
select: Optional string with values in {‘a’, ‘v’, ‘i’} (default is 'a') that
determines which eigenvalues to calculate:
'a': all eigenvalues.
‘v’: eigenvalues in the interval (min, max] given by `select_range`.
'i’: eigenvalues with indices min <= i <= max.
select_range: Size 2 tuple or list or tensor specifying the range of
eigenvalues to compute together with select. If select is 'a',
select_range is ignored.
tol: Optional scalar. The absolute tolerance to which each eigenvalue is
required. An eigenvalue (or cluster) is considered to have converged if it
lies in an interval of this width. If tol is None (default), the value
eps*|T|_2 is used where eps is the machine precision, and |T|_2 is the
2-norm of the matrix T.
name: Optional name of the op.
Returns:
eig_vals: The eigenvalues of the matrix in non-decreasing order.
eig_vectors: If `eigvals_only` is False the eigenvectors are returned in
the second output argument.
Raises:
ValueError: If input values are invalid.
NotImplemented: Computing eigenvectors for `eigvals_only` = False is
not implemented yet.
This op implements a subset of the functionality of
scipy.linalg.eigh_tridiagonal.
Note: The result is undefined if the input contains +/-inf or NaN, or if
any value in beta has a magnitude greater than
`numpy.sqrt(numpy.finfo(beta.dtype.as_numpy_dtype).max)`.
TODO(b/187527398):
Add support for outer batch dimensions.
#### Examples
```python
import numpy
eigvals = tf.linalg.eigh_tridiagonal([0.0, 0.0, 0.0], [1.0, 1.0])
eigvals_expected = [-numpy.sqrt(2.0), 0.0, numpy.sqrt(2.0)]
tf.assert_near(eigvals_expected, eigvals)
# ==> True
```
"""
with ops.name_scope(name or 'eigh_tridiagonal'):
def _compute_eigenvalues(alpha, beta):
"""Computes all eigenvalues of a Hermitian tridiagonal matrix."""
def _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, x):
"""Implements the Sturm sequence recurrence."""
with ops.name_scope('sturm'):
n = alpha.shape[0]
zeros = array_ops.zeros(array_ops.shape(x), dtype=dtypes.int32)
ones = array_ops.ones(array_ops.shape(x), dtype=dtypes.int32)
# The first step in the Sturm sequence recurrence
# requires special care if x is equal to alpha[0].
def sturm_step0():
q = alpha[0] - x
count = array_ops.where(q < 0, ones, zeros)
q = array_ops.where(
math_ops.equal(alpha[0], x), alpha0_perturbation, q)
return q, count
# Subsequent steps all take this form:
def sturm_step(i, q, count):
q = alpha[i] - beta_sq[i - 1] / q - x
count = array_ops.where(q <= pivmin, count + 1, count)
q = array_ops.where(q <= pivmin, math_ops.minimum(q, -pivmin), q)
return q, count
# The first step initializes q and count.
q, count = sturm_step0()
# Peel off ((n-1) % blocksize) steps from the main loop, so we can run
# the bulk of the iterations unrolled by a factor of blocksize.
blocksize = 16
i = 1
peel = (n - 1) % blocksize
unroll_cnt = peel
def unrolled_steps(start, q, count):
for j in range(unroll_cnt):
q, count = sturm_step(start + j, q, count)
return start + unroll_cnt, q, count
i, q, count = unrolled_steps(i, q, count)
# Run the remaining steps of the Sturm sequence using a partially
# unrolled while loop.
unroll_cnt = blocksize
cond = lambda i, q, count: math_ops.less(i, n)
_, _, count = control_flow_ops.while_loop(
cond, unrolled_steps, [i, q, count], back_prop=False)
return count
with ops.name_scope('compute_eigenvalues'):
if alpha.dtype.is_complex:
alpha = math_ops.real(alpha)
beta_sq = math_ops.real(math_ops.conj(beta) * beta)
beta_abs = math_ops.sqrt(beta_sq)
else:
beta_sq = math_ops.square(beta)
beta_abs = math_ops.abs(beta)
# Estimate the largest and smallest eigenvalues of T using the
# Gershgorin circle theorem.
finfo = np.finfo(alpha.dtype.as_numpy_dtype)
off_diag_abs_row_sum = array_ops.concat(
[beta_abs[:1], beta_abs[:-1] + beta_abs[1:], beta_abs[-1:]], axis=0)
lambda_est_max = math_ops.minimum(
finfo.max, math_ops.reduce_max(alpha + off_diag_abs_row_sum))
lambda_est_min = math_ops.maximum(
finfo.min, math_ops.reduce_min(alpha - off_diag_abs_row_sum))
# Upper bound on 2-norm of T.
t_norm = math_ops.maximum(
math_ops.abs(lambda_est_min), math_ops.abs(lambda_est_max))
# Compute the smallest allowed pivot in the Sturm sequence to avoid
# overflow.
one = np.ones([], dtype=alpha.dtype.as_numpy_dtype)
safemin = np.maximum(one / finfo.max, (one + finfo.eps) * finfo.tiny)
pivmin = safemin * math_ops.maximum(one, math_ops.reduce_max(beta_sq))
alpha0_perturbation = math_ops.square(finfo.eps * beta_abs[0])
abs_tol = finfo.eps * t_norm
if tol:
abs_tol = math_ops.maximum(tol, abs_tol)
# In the worst case, when the absolute tolerance is eps*lambda_est_max
# and lambda_est_max = -lambda_est_min, we have to take as many
# bisection steps as there are bits in the mantissa plus 1.
max_it = finfo.nmant + 1
# Determine the indices of the desired eigenvalues, based on select
# and select_range.
asserts = None
if select == 'a':
target_counts = math_ops.range(n)
elif select == 'i':
asserts = check_ops.assert_less_equal(
select_range[0],
select_range[1],
message='Got empty index range in select_range.')
target_counts = math_ops.range(select_range[0], select_range[1] + 1)
elif select == 'v':
asserts = check_ops.assert_less(
select_range[0],
select_range[1],
message='Got empty interval in select_range.')
else:
raise ValueError("'select must have a value in {'a', 'i', 'v'}.")
if asserts:
with ops.control_dependencies([asserts]):
alpha = array_ops.identity(alpha)
# Run binary search for all desired eigenvalues in parallel, starting
# from an interval slightly wider than the estimated
# [lambda_est_min, lambda_est_max].
fudge = 2.1 # We widen starting interval the Gershgorin interval a bit.
norm_slack = math_ops.cast(n, alpha.dtype) * fudge * finfo.eps * t_norm
if select in {'a', 'i'}:
lower = lambda_est_min - norm_slack - 2 * fudge * pivmin
upper = lambda_est_max + norm_slack + fudge * pivmin
else:
# Count the number of eigenvalues in the given range.
lower = select_range[0] - norm_slack - 2 * fudge * pivmin
upper = select_range[1] + norm_slack + fudge * pivmin
first = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, lower)
last = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, upper)
target_counts = math_ops.range(first, last)
# Pre-broadcast the scalars used in the Sturm sequence for improved
# performance.
upper = math_ops.minimum(upper, finfo.max)
lower = math_ops.maximum(lower, finfo.min)
target_shape = array_ops.shape(target_counts)
lower = array_ops.broadcast_to(lower, shape=target_shape)
upper = array_ops.broadcast_to(upper, shape=target_shape)
pivmin = array_ops.broadcast_to(pivmin, target_shape)
alpha0_perturbation = array_ops.broadcast_to(alpha0_perturbation,
target_shape)
# We compute the midpoint as 0.5*lower + 0.5*upper to avoid overflow in
# (lower + upper) or (upper - lower) when the matrix has eigenvalues
# with magnitude greater than finfo.max / 2.
def midpoint(lower, upper):
return (0.5 * lower) + (0.5 * upper)
def continue_binary_search(i, lower, upper):
return math_ops.logical_and(
math_ops.less(i, max_it),
math_ops.less(abs_tol, math_ops.reduce_max(upper - lower)))
def binary_search_step(i, lower, upper):
mid = midpoint(lower, upper)
counts = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, mid)
lower = array_ops.where(counts <= target_counts, mid, lower)
upper = array_ops.where(counts > target_counts, mid, upper)
return i + 1, lower, upper
# Start parallel binary searches.
_, lower, upper = control_flow_ops.while_loop(continue_binary_search,
binary_search_step,
[0, lower, upper])
return midpoint(lower, upper)
def _compute_eigenvectors(alpha, beta, eigvals):
"""Implements inverse iteration to compute eigenvectors."""
with ops.name_scope('compute_eigenvectors'):
k = array_ops.size(eigvals)
n = array_ops.size(alpha)
alpha = math_ops.cast(alpha, dtype=beta.dtype)
# Eigenvectors corresponding to cluster of close eigenvalues are
# not unique and need to be explicitly orthogonalized. Here we
# identify such clusters. Note: This function assumes that
# eigenvalues are sorted in non-decreasing order.
gap = eigvals[1:] - eigvals[:-1]
eps = np.finfo(eigvals.dtype.as_numpy_dtype).eps
t_norm = math_ops.maximum(
math_ops.abs(eigvals[0]), math_ops.abs(eigvals[-1]))
gaptol = np.sqrt(eps) * t_norm
# Find the beginning and end of runs of eigenvectors corresponding
# to eigenvalues closer than "gaptol", which will need to be
# orthogonalized against each other.
close = math_ops.less(gap, gaptol)
left_neighbor_close = array_ops.concat([[False], close], axis=0)
right_neighbor_close = array_ops.concat([close, [False]], axis=0)
ortho_interval_start = math_ops.logical_and(
math_ops.logical_not(left_neighbor_close), right_neighbor_close)
ortho_interval_start = array_ops.squeeze(
array_ops.where_v2(ortho_interval_start), axis=-1)
ortho_interval_end = math_ops.logical_and(
left_neighbor_close, math_ops.logical_not(right_neighbor_close))
ortho_interval_end = array_ops.squeeze(
array_ops.where_v2(ortho_interval_end), axis=-1) + 1
num_clusters = array_ops.size(ortho_interval_end)
# We perform inverse iteration for all eigenvectors in parallel,
# starting from a random set of vectors, until all have converged.
v0 = math_ops.cast(
stateless_random_ops.stateless_random_normal(
shape=(k, n), seed=[7, 42]),
dtype=beta.dtype)
nrm_v = norm(v0, axis=1)
v0 = v0 / nrm_v[:, array_ops.newaxis]
zero_nrm = constant_op.constant(0, shape=nrm_v.shape, dtype=nrm_v.dtype)
# Replicate alpha-eigvals(ik) and beta across the k eigenvectors so we
# can solve the k systems
# [T - eigvals(i)*eye(n)] x_i = r_i
# simultaneously using the batching mechanism.
eigvals_cast = math_ops.cast(eigvals, dtype=beta.dtype)
alpha_shifted = (
alpha[array_ops.newaxis, :] - eigvals_cast[:, array_ops.newaxis])
beta = array_ops.tile(beta[array_ops.newaxis, :], [k, 1])
diags = [beta, alpha_shifted, math_ops.conj(beta)]
def orthogonalize_close_eigenvectors(eigenvectors):
# Eigenvectors corresponding to a cluster of close eigenvalues are not
# uniquely defined, but the subspace they span is. To avoid numerical
# instability, we explicitly mutually orthogonalize such eigenvectors
# after each step of inverse iteration. It is customary to use
# modified Gram-Schmidt for this, but this is not very efficient
# on some platforms, so here we defer to the QR decomposition in
# TensorFlow.
def orthogonalize_cluster(cluster_idx, eigenvectors):
start = ortho_interval_start[cluster_idx]
end = ortho_interval_end[cluster_idx]
update_indices = array_ops.expand_dims(
math_ops.range(start, end), -1)
vectors_in_cluster = eigenvectors[start:end, :]
# We use the builtin QR factorization to orthonormalize the
# vectors in the cluster.
q, _ = qr(transpose(vectors_in_cluster))
vectors_to_update = transpose(q)
eigenvectors = array_ops.tensor_scatter_nd_update(
eigenvectors, update_indices, vectors_to_update)
return cluster_idx + 1, eigenvectors
_, eigenvectors = control_flow_ops.while_loop(
lambda i, ev: math_ops.less(i, num_clusters),
orthogonalize_cluster, [0, eigenvectors])
return eigenvectors
def continue_iteration(i, _, nrm_v, nrm_v_old):
max_it = 5 # Taken from LAPACK xSTEIN.
min_norm_growth = 0.1
norm_growth_factor = constant_op.constant(
1 + min_norm_growth, dtype=nrm_v.dtype)
# We stop the inverse iteration when we reach the maximum number of
# iterations or the norm growths is less than 10%.
return math_ops.logical_and(
math_ops.less(i, max_it),
math_ops.reduce_any(
math_ops.greater_equal(
math_ops.real(nrm_v),
math_ops.real(norm_growth_factor * nrm_v_old))))
def inverse_iteration_step(i, v, nrm_v, nrm_v_old):
v = tridiagonal_solve(
diags,
v,
diagonals_format='sequence',
partial_pivoting=True,
perturb_singular=True)
nrm_v_old = nrm_v
nrm_v = norm(v, axis=1)
v = v / nrm_v[:, array_ops.newaxis]
v = orthogonalize_close_eigenvectors(v)
return i + 1, v, nrm_v, nrm_v_old
_, v, nrm_v, _ = control_flow_ops.while_loop(continue_iteration,
inverse_iteration_step,
[0, v0, nrm_v, zero_nrm])
return transpose(v)
alpha = ops.convert_to_tensor(alpha, name='alpha')
n = alpha.shape[0]
if n <= 1:
return math_ops.real(alpha)
beta = ops.convert_to_tensor(beta, name='beta')
if alpha.dtype != beta.dtype:
raise ValueError("'alpha' and 'beta' must have the same type.")
eigvals = _compute_eigenvalues(alpha, beta)
if eigvals_only:
return eigvals
eigvectors = _compute_eigenvectors(alpha, beta, eigvals)
return eigvals, eigvectors
| tensorflow/tensorflow | tensorflow/python/ops/linalg/linalg_impl.py | Python | apache-2.0 | 65,567 | [
"Gaussian"
] | 54cdf7370d29a6f69d549e88ab9768773a89f49eb72dc27432ff3197fae81798 |
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2006,2009,2019
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: bcotton@nrao.edu.
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
# Python utility package for convolving images
from __future__ import absolute_import
from __future__ import print_function
import Obit, Image, FArray, FArrayUtil, OErr
def PConv(inImage, convFn, doDivide, rescale, outImage, err):
"""
(de)Convolve an Image with an FArray and write outImage
This routine convolves all selected planes in inImage with convFn if
doDivide is FALSE, else it does a linear deconvolution
Operations are performed using FFTs
* inImage = Obit Image to be convolved
* convFn = Obit/FArray Convolving Function
* doDovide = If true divide FT of convFn into FT of inImage, else multiply.
* rescale = Multiplication factor to scale output to correct units
* outImage = Output ObitImage must be a clone of inImage
Actual convolution size must be set externally
* err = Python Obit Error/message stack
"""
################################################################
# Checks
if not Image.PIsA(inImage):
print("Actually ",inImage.__class__)
raise TypeError("inImage MUST be a Python Obit Image")
if not FArray.PIsA(convFn):
print("Actually ",convFn.__class__)
raise TypeError("convFn MUST be a Python Obit FArray")
if not Image.PIsA(outImage):
print("Actually ",outImage.__class__)
raise TypeError("outImage MUST be a Python Obit Image")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
Obit.ConvUtilConv (inImage.me, convFn.me, doDivide, rescale, outImage.me, err.me)
# end PConv
def PConvGauss(inImage, maj, min, pa, rescale, outImage, err):
"""
Convolve an Image with a Gaussian and write outImage
This routine convolves all selected planes in inImage with a Gaussian
Operations are performed using FFTs
* inImage = Obit Image to be convolved
* maj = Major axis of Gaussian in image plane (arcsec)
* min = Minor axis of Gaussian in image plane (arcsec)
* pa = Position angle of Gaussian in image plane, from N thru E, (deg)
* rescale = Multiplication factor to scale output to correct units
* outImage = Output ObitImage must be a clone of inImage
Actual convolution size must be set externally
* err = Python Obit Error/message stack
"""
################################################################
# Checks
if not Image.PIsA(inImage):
print("Actually ",inImage.__class__)
raise TypeError("inImage MUST be a Python Obit Image")
if not Image.PIsA(outImage):
print("Actually ",outImage.__class__)
raise TypeError("outImage MUST be a Python Obit Image")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
Obit.ConvUtilConvGauss (inImage.me, maj, min, pa, rescale, outImage.me, err.me)
# end PConvGauss
def PGaus(inImage, Beam):
"""
Create an ObitFArray containing a unit area Gaussian in the center
returns Python FArray object with normalized Gaussian
* inImage = Obit Image with geometry
* Beam = [maj, min, PA] defining eliptical Gaussian
size in image cell units or pixels if none given
"""
################################################################
# Checks
if not Image.PIsA(inImage):
print("Actually ",inImage.__class__)
raise TypeError("inImage MUST be a Python Obit Image")
if len(Beam) < 3:
raise TypeError("Beam MUST have 3 elements")
#
outFA = FArray.FArray("None")
outFA.me = Obit.ConvUtilGaus (inImage.me, Beam[0], Beam[1], Beam[2])
return outFA
# end PGaus
def Deconv(fBeam, cBeam):
"""
Deconvolves a Gaussian "beam" from a Gaussian component.
Returns list of deconvolved [Maj, Min, PA], Maj,Min=0 -> unable to fit
Can also be used to determine the Gaussian parameters which when
convolved with (cMaj, cMin, cPA) gives fMaj, fMin, fPA).
* fBeam = Convolved [major axis, minor axis, position angle of major axis]
* cBeam = Beam [major axis, minor axis, position angle of major axis]
"""
################################################################
return Obit.ConvUtilDeconv (fBeam[0], fBeam[1], fBeam[2], \
cBeam[0], cBeam[1], cBeam[2])
# end PGaus
| kernsuite-debian/obit | python/ConvUtil.py | Python | gpl-2.0 | 5,627 | [
"Gaussian"
] | 6cd659f51091c922cfa464642f4dcdd85df50dc868e55848ad29cf1b8d694a13 |
"""
Migration script to add the ctx_rev column to the tool_shed_repository table.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import datetime
now = datetime.datetime.utcnow
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
ToolShedRepository_table = Table( "tool_shed_repository", metadata, autoload=True )
col = Column( "ctx_rev", TrimmedString( 10 ) )
try:
col.create( ToolShedRepository_table )
assert col is ToolShedRepository_table.c.ctx_rev
except Exception, e:
print "Adding ctx_rev column to the tool_shed_repository table failed: %s" % str( e )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
ToolShedRepository_table = Table( "tool_shed_repository", metadata, autoload=True )
try:
ToolShedRepository_table.c.ctx_rev.drop()
except Exception, e:
print "Dropping column ctx_rev from the tool_shed_repository table failed: %s" % str( e )
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/model/migrate/versions/0097_add_ctx_rev_column.py | Python | gpl-3.0 | 1,498 | [
"Galaxy"
] | f0d23cdd478b5b0421696a32051785d4dafe4571f7fee4e2daf3b642acfba10a |
"""
Voxelize Points
~~~~~~~~~~~~~~~
This example will demonstrate how to connect a set of points defined on a
regular grid to create a `vtkUnstructuredGrid` which can be used to perform
volumetric operations.
This example demos :class:`PVGeo.filters.VoxelizePoints`
"""
# sphinx_gallery_thumbnail_number = 2
import pyvista
from pyvista import examples
import numpy as np
import pandas as pd
import PVGeo
from PVGeo.filters import VoxelizePoints
###############################################################################
# Download sample data files and keep track of names:
url = 'https://github.com/OpenGeoVis/PVGeo/raw/master/tests/data/fault_points.csv'
fault_file, _ = examples.downloads._retrieve_file(url, 'fault_points.csv')
###############################################################################
# Let's go ahead and load a simple file that has XYZ coordinates and a boolean
# array for fault presence. This point cloud makes some sort of regular grid,
# but we have forgotten the deatials of the cell spacings and local coordinate
# rotations.
#
# We will read in this data with ``pandas`` and send it to the
# :func:`PVGeo.points_to_poly_data` helper to create a :class:`pyvista.PolyData`
# object (essentially a point cloud).
points = pd.read_csv(fault_file)
print(points[0:2])
###############################################################################
vtkpoints = PVGeo.points_to_poly_data(points)
print(vtkpoints)
###############################################################################
# Note that we have a :class:`pyvista.PolyData` object now which allows us to do
# all types of immediate plotting of our data. First, lets threshold our points
# as the point cloud has a bunch of zeros and ones throughout the dataspace to
# describe the presence of a fault.
#
# To threshold the points, we call the threshold filter directly on our data
# object and pass the thresholding value. We can then plot the result by
# calling the plot function. (Note: change the notebook parameter to
# ``False`` for an interactive window)
vtkpoints.plot(clim=[0, 1], point_size=1)
###############################################################################
# Points to Voxelized Volume
# ++++++++++++++++++++++++++
#
# The above figure is pretty cool! But its a point cloud which means out
# filtering options are pretty limited. Fortunately, we know that the point
# cloud represents some sort of regularized gridded volume of data and PVGeo
# has a filter to recover that volume. This will allow further volumetric
# operations can be performed with other PVGeo or VTK filters.
#
# Remember that these points are rotated and we do not know the cell sizes...
# this is okay! The VoxelizePoints filter from PVGeo will handle the recovery of
# the coordinate rotation and grid our data without running an interpolation
# scheme. The VoxelizePoints filter assumes that the points are structured on some
# rotated XY-plane with regular cell spacings and does the rest on its own!
# Check out VoxelizePoints code docs for more details.
# The full pipeline method
print('Voxelizing... ', end='')
voxelizer = PVGeo.filters.VoxelizePoints()
grid = voxelizer.apply(vtkpoints)
print('done.')
# Output the results
print('Recovered Angle (deg.): %.3f' % voxelizer.get_angle())
print('Recovered Cell Sizes: (%.2f, %.2f, %.2f)' % voxelizer.get_spacing())
print(grid)
###############################################################################
# And now we can plot the voxelized volume
grid.plot()
###############################################################################
# Filter Volumetric Data
# ++++++++++++++++++++++
#
# Now lets use one of `PyVista`'s filters to create slices of the thresholded
# dataset. Specifically, we are using the ``slice_orthogonal`` filter that will
# create 3 orthogonal slices through a data volume.
slices = grid.slice_orthogonal()
print(slices)
###############################################################################
# And let's use a ``clip`` filter:
clip = grid.clip(normal='x').clip(normal='-y').threshold(0.5)
###############################################################################
# Now display the slices and clipped volume
p = pyvista.Plotter()
p.add_mesh(slices)
p.add_mesh(clip)
p.show_grid()
p.show()
| banesullivan/ParaViewGeophysics | examples/filters-general/voxelize-points.py | Python | bsd-3-clause | 4,302 | [
"VTK"
] | 41adc54f92daaabeb7704e19a5c19cef14159c38ba0778e0b10ef4aa63841195 |
#!/usr/bin/env python
'''
Looks at a hydrogen metallic lattice and looks at using the level shift in
k-point ccsd. While for most systems the level shift will not affect results,
this is one instance where the system will converge on a different ccsd solution
depending on the initial guess and whether one is using a level shift.
'''
import numpy as np
from pyscf.lib import finger
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import scf as pbcscf
import pyscf.cc
import pyscf.pbc.cc as pbcc
from pyscf.pbc.lib import kpts_helper
import pyscf.pbc.cc.kccsd_t_rhf as kccsd_t_rhf
cell = pbcgto.Cell()
cell.atom = [['H', (0.000000000, 0.000000000, 0.000000000)],
['H', (0.000000000, 0.500000000, 0.250000000)],
['H', (0.500000000, 0.500000000, 0.500000000)],
['H', (0.500000000, 0.000000000, 0.750000000)]]
cell.unit = 'Bohr'
cell.a = [[1.,0.,0.],[0.,1.,0],[0,0,2.2]]
cell.verbose = 3
cell.spin = 0
cell.charge = 0
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
for i in range(len(cell.atom)):
cell.atom[i][1] = tuple(np.dot(np.array(cell.atom[i][1]),np.array(cell.a)))
cell.build()
nmp = [2, 1, 1]
kmf = pbcscf.KRHF(cell)
kmf.kpts = cell.make_kpts(nmp, scaled_center=[0.0,0.0,0.0])
e = kmf.kernel() # 2.30510338236481
mycc = pbcc.KCCSD(kmf)
eris = mycc.ao2mo(kmf.mo_coeff)
eris.mo_energy = [eris.fock[k].diagonal() for k in range(mycc.nkpts)]
print('\nCCSD energy w/o level shift and MP2 initial guess:') # 0.02417522810234485
ekccsd, t1, t2 = mycc.kernel(eris=eris)
# Use a level shift with a level shift equal to the Madelung
# constant for this system. Using the previous t1/t2 as an initial
# guess, we see that these amplitudes still solve the CCSD amplitude
# equations.
def _adjust_occ(mo_energy, nocc, shift):
'''Modify occupied orbital energy'''
mo_energy = mo_energy.copy()
mo_energy[:nocc] += shift
return mo_energy
madelung = 1.36540204381
eris.mo_energy = [_adjust_occ(mo_e, mycc.nocc, madelung) for mo_e in eris.mo_energy]
print('\nCCSD energy w/o level shift and previous t1/t2 as initial guess:') # 0.02417522810234485
ekccsd, _, _ = mycc.kernel(t1=t1, t2=t2, eris=eris)
# Use level shift with an MP2 guess. Here the results will differ from
# those before.
print('\nCCSD energy w/ level shift and MP2 initial guess:') # -0.11122802032348603
ekccsd, t1, t2 = mycc.kernel(eris=eris)
# Check to see it satisfies the CCSD amplitude equations.
print('\nCCSD energy w/ level shift and previous t1/t2 as initial guess:') # -0.11122802032348603
ekccsd, _, _ = mycc.kernel(t1=t1, t2=t2, eris=eris)
| gkc1000/pyscf | examples/pbc/36-ccsd_level_shift.py | Python | apache-2.0 | 2,593 | [
"PySCF"
] | eea00ae44ba820f0fb80b02e64f4b9ffbb4e8e55a8fd88ac9347f4de4765617a |
import argparse
import datetime
import time
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.datasets import fashion_mnist
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=200)
parser.add_argument('--gpu_option', action='store_true')
parser.add_argument('--gpu_id', type=int, default=3)
parser.add_argument("--num_epochs", type=int, default=100)
parser.add_argument("--reconstruction_loss_weight", type=float, default=1.0)
parser.add_argument("--distance_correlation_weight", type=float, default=0.0)
parser.add_argument("--log_distance_correlation", action='store_true')
parser.add_argument("--reconstruction_stablizer_noise_weight", type=float,
default=0.0)
parser.add_argument("--grl", type=float, default=0.0)
parser.add_argument("--no_reshuffle", action='store_true')
parser.add_argument("--dis_kernel_regularizer", type=float, default=0.001)
parser.add_argument("--rec_kernel_regularizer", type=float, default=0.001)
# using sigmoid at the reconstructor
parser.add_argument("--sigmoid", action='store_true')
parser.add_argument("--rec_lr", type=float, default=1e-3)
parser.add_argument("--rec_beta1", type=float, default=0.9)
parser.add_argument("--dis_lr", type=float, default=1e-3)
parser.add_argument("--dis_beta1", type=float, default=0.9)
# only for the gradient reversal layer
parser.add_argument("--clip_norm", type=float, default=0.0)
parser.add_argument("--clip_norm_adam", type=float, default=0.0)
# for all network parameters
parser.add_argument("--clip_global_norm_adam", type=float, default=0.0)
parser.add_argument("--clip_global_value_adam", type=float, default=0.0)
parser.add_argument("--clip_value", type=float, default=0.0)
parser.add_argument("--clip_value_adam", type=float, default=0.0)
# standard deviation of Gaussian noise added to the split layer
parser.add_argument("--noise_stddev", type=float, default=0.0)
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
if args.gpu_option:
gpus = tf.config.experimental.list_physical_devices('GPU')
print("Num GPUs Available: ", len(gpus))
if gpus:
# Restrict TensorFlow to only use the first GPU
try:
tf.config.experimental.set_visible_devices(gpus[args.gpu_id], 'GPU')
tf.config.experimental.set_memory_growth(gpus[args.gpu_id], True)
print("Using GPU: {}".format(args.gpu_id))
except RuntimeError as e:
# Visible devices must be set at program startup
print(e)
else:
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
batch_size = args.batch_size
print("batch_size: {}, reconstruction_loss_weight: {}, grl: {}".format(
batch_size, args.reconstruction_loss_weight, args.grl))
def make_discriminator_model_passive():
model = tf.keras.Sequential()
output_size = 784
model.add(layers.Dense(output_size, use_bias=False, input_shape=(784,),
kernel_regularizer=tf.keras.regularizers.l2(
args.dis_kernel_regularizer)))
return model
def generate_gaussian_noise(input_emb, stddev=1):
noise = tf.random.normal(shape=tf.shape(input_emb),
mean=0, stddev=stddev, dtype=tf.float32)
return noise
def make_reconstruction_model():
model = tf.keras.Sequential()
input_size, output_size = 784, 784
if args.sigmoid:
model.add(layers.Dense(output_size, use_bias=False,
input_shape=(input_size,),
kernel_regularizer=tf.keras.regularizers.l2(
args.rec_kernel_regularizer),
activation=tf.nn.sigmoid))
else:
model.add(layers.Dense(output_size, use_bias=False,
input_shape=(input_size,),
kernel_regularizer=tf.keras.regularizers.l2(
args.rec_kernel_regularizer)))
return model
def make_discriminator_model_active():
model = tf.keras.Sequential()
model.add(layers.Dense(256, use_bias=True, input_shape=(784,),
kernel_regularizer=tf.keras.regularizers.l2(
args.dis_kernel_regularizer)))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.2))
model.add(layers.Dense(10, kernel_regularizer=tf.keras.regularizers.l2(
args.dis_kernel_regularizer)))
return model
# cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
# cross_entropy = tf.keras.losses.sparse_categorical_crossentropy(
# from_logits = True)
def cross_entropy_loss(prediction_logits, labels):
prediction_logits = tf.cast(tf.reshape(prediction_logits, shape=[-1, 10]),
dtype=tf.float32)
labels = tf.cast(tf.reshape(labels, shape=[-1, 1]), dtype=tf.float32)
cro_ent = tf.reduce_sum(tf.keras.losses.sparse_categorical_crossentropy(
labels, prediction_logits, from_logits=True))
return cro_ent
def discriminator_loss(prediction_logits, labels, reconstructed_emb,
reconstructed_emb_for_noise,
reconstructed_emb_independent_attacker,
protected_emb, raw_emb,
reconstruction_loss_weight,
distance_correlation_weight,
reconstruction_stablizer_noise_weight=0.0,
log_distance_correlation=False):
dist_cov, dist_cor = tf_distance_cov_cor(raw_emb, protected_emb,
debug=False)
dis_loss = cross_entropy_loss(prediction_logits, labels)
rec_loss = reconstruction_loss(reconstructed_emb, raw_emb)
rec_noise_loss = reconstruction_stablizer_noise(reconstructed_emb_for_noise)
total_loss = dis_loss + rec_loss * reconstruction_loss_weight
independent_attacker_rec_loss = reconstruction_loss(
reconstructed_emb_independent_attacker,
raw_emb)
total_loss += independent_attacker_rec_loss
if log_distance_correlation:
total_loss += distance_correlation_weight * tf.math.log(dist_cor)
else:
total_loss += distance_correlation_weight * dist_cor
return total_loss, dis_loss, rec_loss, (dist_cov, dist_cor), \
rec_noise_loss, independent_attacker_rec_loss
def reconstruction_stablizer_quad(reconstructed_emb, protected_emb):
return tf.reduce_sum(tf.math.square(tf.reduce_sum(tf.math.square(
tf.reshape(protected_emb, shape=tf.shape(reconstructed_emb))
- reconstructed_emb), axis=-1)))
def reconstruction_stablizer_noise(reconstructed_emb,
noise_type="uniform",
normal_mu=1.0,
normal_std=1.0,
uniform_min=0.0,
uniform_max=1.0):
if noise_type == "uniform":
noise = tf.random.uniform(
shape=tf.shape(reconstructed_emb),
minval=uniform_min,
maxval=uniform_max,
dtype=tf.float32)
elif noise_type == "normal":
noise = tf.random.normal(
shape=tf.shape(reconstructed_emb),
mean=normal_mu,
stddev=normal_std,
dtype=tf.float32)
return reconstruction_loss(reconstructed_emb, noise)
def reconstruction_loss(reconstructed_emb, protected_emb):
return tf.reduce_sum(tf.reshape(tf.reduce_sum(tf.math.square(
tf.reshape(protected_emb, shape=tf.shape(reconstructed_emb))
- reconstructed_emb), axis=-1), shape=[-1, 1]))
@tf.custom_gradient
def stop_gradient_layer(x):
def grad_fn(g):
return g * 0.0
return x, grad_fn
@tf.custom_gradient
def gradient_reversal_layer(x):
# global _global_step
def grad_fn(g):
clip_norm = args.clip_norm
clip_value = args.clip_value
if clip_value > 0.0:
clipped_g = tf.clip_by_value(g, clip_value_min=-1.0 * clip_value,
clip_value_max=clip_value)
else:
clipped_g = g
if clip_norm > 0.0:
clipped_g = tf.clip_by_norm(clipped_g, clip_norm)
res = args.grl * clipped_g
# with writer.as_default():
# tf.summary.scalar('split_layer_gradient/original', tf.norm(g),
# step=_global_step)
# tf.summary.scalar('split_layer_gradient/clipped', tf.norm(res),
# step=_global_step)
return res
return x, grad_fn
def pairwise_dist(A, B):
"""
Computes pairwise distances between each elements of A and each elements of
B.
Args:
A, [m,d] matrix
B, [n,d] matrix
Returns:
D, [m,n] matrix of pairwise distances
"""
# with tf.variable_scope('pairwise_dist'):
# squared norms of each row in A and B
na = tf.reduce_sum(tf.square(A), 1)
nb = tf.reduce_sum(tf.square(B), 1)
# na as a row and nb as a column vectors
na = tf.reshape(na, [-1, 1])
nb = tf.reshape(nb, [1, -1])
# return pairwise euclidead difference matrix
D = tf.sqrt(tf.maximum(na - 2 * tf.matmul(A, B, False, True) + nb + 1e-20,
0.0))
return D
def tf_distance_cov_cor(input1, input2, debug=False):
start = time.time()
input1 = tf.debugging.check_numerics(input1, "input1 contains nan/inf")
input2 = tf.debugging.check_numerics(input2, "input2 contains nan/inf")
n = tf.cast(tf.shape(input1)[0], tf.float32)
a = pairwise_dist(input1, input1)
b = pairwise_dist(input2, input2)
A = a - tf.reduce_mean(a,
axis=1) - tf.expand_dims(tf.reduce_mean(a,
axis=0),
axis=1) + tf.reduce_mean(a)
B = b - tf.reduce_mean(b,
axis=1) - tf.expand_dims(tf.reduce_mean(b,
axis=0),
axis=1) + tf.reduce_mean(b)
dCovXY = tf.sqrt(tf.reduce_sum(A * B) / (n ** 2))
dVarXX = tf.sqrt(tf.reduce_sum(A * A) / (n ** 2))
dVarYY = tf.sqrt(tf.reduce_sum(B * B) / (n ** 2))
dCorXY = dCovXY / tf.sqrt(dVarXX * dVarYY)
end = time.time()
if debug:
print(("tf distance cov: {} and cor: {}, dVarXX: {}, "
"dVarYY:{} uses: {}").format(
dCovXY, dCorXY,
dVarXX, dVarYY,
end - start))
return dCovXY, dCorXY
# @tf.function
def train_step(X, Y, discriminator_passive, reconstruction,
discriminator_active, independent_attacker,
reconstruction_optimizer, discriminator_optimizer,
step,
idx=0,
pre_train_discriminator_early_stopping=False,
pre_train_reconstruction_early_stopping=False,
update_discriminator=False,
update_reconstruction=True):
with tf.GradientTape() as reco_tape, tf.GradientTape() as disc_tape:
dis_cov_cors = []
X = tf.reshape(X, shape=(-1, 784))
emb = discriminator_passive(X, training=True)
if args.noise_stddev > 1e-8:
emb += generate_gaussian_noise(emb, stddev=args.noise_stddev)
protected_emb = emb
logits = discriminator_active(protected_emb, training=True)
protected_emb_GRL = gradient_reversal_layer(protected_emb)
protected_emb_stop_g = stop_gradient_layer(protected_emb)
reconstructed_emb = reconstruction(protected_emb_GRL, training=True)
reconstructed_emb_for_noise = reconstruction(protected_emb,
training=True)
reconstructed_emb_independent_attacker = independent_attacker(
protected_emb_stop_g, training=True)
disc_loss, cross_entropy_loss_train, reco_loss, \
(dist_cov_X_Protected_emb, dist_cor_X_Protected_emb), \
rec_noise_loss, independent_attacker_rec_loss = \
discriminator_loss(logits, Y,
reconstructed_emb,
reconstructed_emb_for_noise,
reconstructed_emb_independent_attacker,
protected_emb, X,
reconstruction_loss_weight=
args.reconstruction_loss_weight,
distance_correlation_weight=
args.distance_correlation_weight,
log_distance_correlation=
args.log_distance_correlation,
reconstruction_stablizer_noise_weight=
args.reconstruction_stablizer_noise_weight)
rec_noise_loss_w = args.reconstruction_stablizer_noise_weight \
* rec_noise_loss
dis_cov_cors.append((dist_cov_X_Protected_emb,
dist_cor_X_Protected_emb))
dist_cov_X_Rec_emb, dist_cor_X_Rec_emb = tf_distance_cov_cor(
X, reconstructed_emb)
dis_cov_cors.append((dist_cov_X_Rec_emb, dist_cor_X_Rec_emb))
gradients_of_discriminator_passive_rec_noise = reco_tape.gradient(
rec_noise_loss_w, discriminator_passive.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(
disc_loss,
reconstruction.trainable_variables +
discriminator_active.trainable_variables +
discriminator_passive.trainable_variables +
independent_attacker.trainable_variables)
if args.clip_global_norm_adam > 0.0:
gradients_of_discriminator, global_norm = tf.clip_by_global_norm(
gradients_of_discriminator,
clip_norm=args.clip_global_norm_adam)
with writer.as_default():
tf.summary.scalar('global_norm', global_norm,
step=step * args.batch_size + idx)
if args.clip_global_value_adam > 0.0:
gradients_of_discriminator = \
[None if gradient is None else tf.clip_by_value(gradient,
clip_value_min=-1.0 * args.clip_global_value_adam,
clip_value_max=args.clip_global_value_adam)
for gradient in gradients_of_discriminator]
discriminator_optimizer.apply_gradients(
zip(
gradients_of_discriminator,
reconstruction.trainable_variables +
discriminator_active.trainable_variables +
discriminator_passive.trainable_variables +
independent_attacker.trainable_variables))
if args.reconstruction_stablizer_noise_weight > 0.0:
discriminator_optimizer.apply_gradients(zip(
gradients_of_discriminator_passive_rec_noise,
discriminator_passive.trainable_variables))
return logits, reco_loss, disc_loss, cross_entropy_loss_train, \
dis_cov_cors, rec_noise_loss, independent_attacker_rec_loss
def train(train_dataset, val_dataset, test_dataset, epochs):
reconstruction_optimizer = tf.keras.optimizers.Adam(args.rec_lr,
beta_1=args.rec_beta1)
if args.clip_norm_adam > 0.0:
if args.clip_value_adam > 0.0:
discriminator_optimizer = tf.keras.optimizers.Adam(args.dis_lr,
clipnorm=args.clip_norm_adam, clipvalue=args.clip_value_adam)
else:
discriminator_optimizer = tf.keras.optimizers.Adam(
args.dis_lr, clipnorm=args.clip_norm_adam)
else:
if args.clip_value_adam > 0.0:
discriminator_optimizer = tf.keras.optimizers.Adam(
args.dis_lr, clipvalue=args.clip_value_adam)
else:
discriminator_optimizer = tf.keras.optimizers.Adam(args.dis_lr)
reconstruction = make_reconstruction_model()
independent_attacker = make_reconstruction_model()
discriminator_passive = make_discriminator_model_passive()
discriminator_active = make_discriminator_model_active()
train_acc = tf.keras.metrics.Accuracy()
max_stagnation = 5 # number of epochs without improvement to tolerate
best_val_ent_loss, best_val_epoch = None, None
pre_train_dis_early_stopping = False
best_val_rec_loss, best_val_rec_epoch = None, None
pre_train_rec_early_stopping = False
alt_update_dis, alt_update_rec = True, True
# global _global_step
for epoch in range(epochs):
start = time.time()
train_reco_noise_loss_sum, train_reco_loss_sum, train_dis_loss_sum, \
train_ent_loss_sum, independent_attacker_rec_loss_sum, n = \
0.0, 0.0, 0.0, 0.0, 0.0, 0
train_reco_loss_last_batch_sum, train_dis_loss_last_batch_sum, \
train_ent_loss_last_batch_sum, last_n = 0.0, 0.0, 0.0, 0
dist_cov_X_Protected_emb_sum, dist_cor_X_Protected_emb_sum = 0.0, 0.0
dist_cov_X_Rec_emb_sum, dist_cor_X_Rec_emb_sum = 0.0, 0.0
num_batchs = 0
train_acc.reset_states()
for (idx, (X, Y)) in enumerate(train_dataset):
# _global_step = epoch * args.batch_size + idx
num_batchs += 1
logits, reco_loss, dis_loss, cross_entropy_loss_train, \
dis_cov_cors, \
rec_noise_loss, independent_attacker_rec_loss = \
train_step(X, Y,
discriminator_passive,
reconstruction,
discriminator_active,
independent_attacker,
reconstruction_optimizer,
discriminator_optimizer,
step=epoch,
idx=idx,
pre_train_discriminator_early_stopping=
pre_train_dis_early_stopping,
pre_train_reconstruction_early_stopping=
pre_train_rec_early_stopping,
update_discriminator=alt_update_dis,
update_reconstruction=alt_update_rec)
train_acc.update_state(tf.reshape(Y, [-1, 1]), tf.argmax(
tf.reshape(logits, [-1, 10]), axis=-1))
train_dis_loss_sum += dis_loss
train_reco_loss_sum += reco_loss
train_ent_loss_sum += cross_entropy_loss_train
train_reco_noise_loss_sum += rec_noise_loss
independent_attacker_rec_loss_sum += independent_attacker_rec_loss
dist_cov_X_Protected_emb_sum += dis_cov_cors[0][0]
dist_cor_X_Protected_emb_sum += dis_cov_cors[0][1]
dist_cov_X_Rec_emb_sum += dis_cov_cors[1][0]
dist_cor_X_Rec_emb_sum += dis_cov_cors[1][1]
n += Y.shape[0]
with writer.as_default():
tf.summary.scalar('train/acc', train_acc.result(), step=epoch)
tf.summary.scalar('train/reconstruction_loss_mean',
train_reco_loss_sum / n, step=epoch)
tf.summary.scalar('train/discriminator_loss_mean',
train_dis_loss_sum / n, step=epoch)
tf.summary.scalar('train/cross_entropy_loss_mean',
train_ent_loss_sum / n, step=epoch)
tf.summary.scalar('train/reconstruction_noise_loss_mean',
train_reco_noise_loss_sum / n, step=epoch)
tf.summary.scalar('train/independent_attacker_rec_loss_mean',
independent_attacker_rec_loss_sum / n, step=epoch)
# tf.summary.scalar('lr/decayed_dis_lr',
# discriminator_optimizer._decayed_lr(tf.float32),
# step=epoch)
tf.summary.scalar("train//dcor/X_and_Protected_emb_cov",
dist_cov_X_Protected_emb_sum / num_batchs,
step=epoch)
tf.summary.scalar("train//dcor/X_and_Protected_emb_cor",
dist_cor_X_Protected_emb_sum / num_batchs,
step=epoch)
tf.summary.scalar("train//dcor/X_and_Rec_emb_cov",
dist_cov_X_Rec_emb_sum / num_batchs,
step=epoch)
tf.summary.scalar("train//dcor/X_and_Rec_emb_cor",
dist_cor_X_Rec_emb_sum / num_batchs,
step=epoch)
print(("epoch: {}, train_acc: {}, train_reconstruction_loss_mean: {},"
" train_indepedent_attack_rec_loss_mean: {},"
"train_discriminator_loss_mean: {}, "
"train_cross_entropy_loss_mean: {}, "
"reconstruction_noise_loss_mean: {}, "
"X_and_Protected_emb_cov: {}, "
"X_and_Protected_emb_cor: {}, "
"X_and_Rec_emb_cov: {}, X_and_Rec_emb_cor: {}").format(
epoch, train_acc.result(),
train_reco_loss_sum / n,
independent_attacker_rec_loss_sum / n,
train_dis_loss_sum / n,
train_ent_loss_sum / n,
train_reco_noise_loss_sum / n,
dist_cov_X_Protected_emb_sum / num_batchs,
dist_cor_X_Protected_emb_sum / num_batchs,
dist_cov_X_Rec_emb_sum / num_batchs,
dist_cor_X_Rec_emb_sum / num_batchs))
val_rec_loss_mean, val_ent_loss_mean, _ = test(
test_dataset,
discriminator_passive,
reconstruction, independent_attacker,
discriminator_active,
is_validation=True, epoch=epoch)
test(val_dataset, discriminator_passive, reconstruction,
independent_attacker, discriminator_active,
is_validation=False, epoch=epoch)
rec_norm = tf.sqrt(tf.reduce_sum(
[tf.norm(w)**2 for w in reconstruction.trainable_variables]))
dis_passive_norm = tf.sqrt(tf.reduce_sum(
[tf.norm(w)**2 for w in discriminator_passive.trainable_variables]))
dis_active_norm = tf.sqrt(tf.reduce_sum(
[tf.norm(w)**2 for w in discriminator_active.trainable_variables]))
dis_norm = tf.sqrt(dis_active_norm ** 2 + dis_passive_norm ** 2)
dis_rec_norm = tf.sqrt(rec_norm ** 2 + dis_norm ** 2)
with writer.as_default():
tf.summary.scalar('norm_kernel/rec_norm', rec_norm, step=epoch)
tf.summary.scalar('norm_kernel/dis_passive_norm', dis_passive_norm,
step=epoch)
tf.summary.scalar('norm_kernel/dis_active_norm', dis_active_norm,
step=epoch)
tf.summary.scalar('norm_kernel/dis_norm', dis_norm, step=epoch)
tf.summary.scalar('norm_kernel/dis_rec_norm', dis_rec_norm,
step=epoch)
print('Time for epoch {} is {} sec, program config: {}'.format(
epoch + 1, time.time() - start, stamp))
# @tf.function
def test(dataset, discriminator_passive, reconstruction,
independent_attacker,
discriminator_active,
is_validation=True,
epoch=1):
test_reco_noise_loss_sum, test_reco_loss_sum, test_dis_loss_sum, \
test_ent_loss_sum, independent_attacker_rec_loss_sum, n = \
0.0, 0.0, 0.0, 0.0, 0.0, 0
test_acc = tf.keras.metrics.Accuracy()
test_acc.reset_states()
dist_cov_X_Protected_emb_sum, dist_cor_X_Protected_emb_sum = 0.0, 0.0
dist_cov_X_Rec_emb_sum, dist_cor_X_Rec_emb_sum = 0.0, 0.0
num_batchs = 0
for (X, Y) in dataset:
X = tf.reshape(X, shape=(-1, 784))
emb = discriminator_passive(X, training=False)
protected_emb = emb
logits = discriminator_active(protected_emb, training=False)
reconstructed_emb = reconstruction(protected_emb, training=False)
reconstructed_emb_independent_attacker = independent_attacker(
protected_emb, training=False)
disc_loss, cross_entropy_loss_test, reco_loss, \
(dist_cov_X_Protected_emb, dist_cor_X_Protected_emb), \
rec_noise_loss, independent_attacker_rec_loss = \
discriminator_loss(
logits, Y,
reconstructed_emb,
reconstructed_emb,
reconstructed_emb_independent_attacker,
protected_emb, X,
reconstruction_loss_weight=args.reconstruction_loss_weight,
distance_correlation_weight=args.distance_correlation_weight,
log_distance_correlation=args.log_distance_correlation,
reconstruction_stablizer_noise_weight=
args.reconstruction_stablizer_noise_weight)
n += Y.shape[0]
test_reco_loss_sum += reco_loss
test_dis_loss_sum += disc_loss
test_ent_loss_sum += cross_entropy_loss_test
test_reco_noise_loss_sum += rec_noise_loss
independent_attacker_rec_loss_sum += independent_attacker_rec_loss
test_acc.update_state(tf.reshape(Y, [-1, 1]),
tf.argmax(tf.reshape(logits, [-1, 10]), axis=-1))
dist_cov_X_Rec_emb, dist_cor_X_Rec_emb = tf_distance_cov_cor(
X, reconstructed_emb)
dist_cov_X_Protected_emb_sum += dist_cov_X_Protected_emb
dist_cor_X_Protected_emb_sum += dist_cor_X_Protected_emb
dist_cov_X_Rec_emb_sum += dist_cov_X_Rec_emb
dist_cor_X_Rec_emb_sum += dist_cor_X_Rec_emb
num_batchs += 1
with writer.as_default():
if is_validation:
prefix = "val/"
else:
prefix = "test/"
tf.summary.scalar(prefix + 'acc', test_acc.result(), step=epoch)
tf.summary.scalar(prefix + 'reconstruction_loss_mean',
test_reco_loss_sum / n, step=epoch)
tf.summary.scalar(prefix + 'reconstruction_noise_loss_mean',
independent_attacker_rec_loss_sum / n, step=epoch)
tf.summary.scalar(prefix + 'discriminator_loss_mean',
test_dis_loss_sum / n, step=epoch)
tf.summary.scalar(prefix + 'cross_entropy_loss_mean',
test_ent_loss_sum / n, step=epoch)
tf.summary.scalar(prefix + 'independent_attacker_rec_loss_mean',
test_ent_loss_sum / n, step=epoch)
tf.summary.scalar(prefix + '/dcor/dist_cov_X_Protected_emb',
dist_cov_X_Protected_emb_sum / num_batchs, step=epoch)
tf.summary.scalar(prefix + '/dcor/dist_cor_X_Protected_emb',
dist_cor_X_Protected_emb_sum / num_batchs, step=epoch)
tf.summary.scalar(prefix + '/dcor/dist_cov_X_Rec_emb',
dist_cov_X_Rec_emb_sum / num_batchs, step=epoch)
tf.summary.scalar(prefix + '/dcor/dist_cor_X_Rec_emb',
dist_cor_X_Rec_emb_sum / num_batchs, step=epoch)
if is_validation:
print(("epoch: {}, val_acc: {}, val_reconstruction_loss_mean: {}, "
"val_reconstruction_noise_loss_mean: {}, "
"val_discriminator_loss_mean: {}, "
"val_cross_entropy_loss_mean: {}").format(
epoch, test_acc.result(), test_reco_loss_sum / n,
test_reco_noise_loss_sum / n, test_dis_loss_sum / n,
test_ent_loss_sum / n))
else:
print(("epoch: {}, test_acc: {}, test_reconstruction_loss_mean: {},"
"test_reconstruction_noise_loss_mean: {},"
"test_discriminator_loss_mean: {}, "
"test_cross_entropy_loss_mean: {}").format(
epoch, test_acc.result(), test_reco_loss_sum / n,
test_reco_noise_loss_sum / n, test_dis_loss_sum / n,
test_ent_loss_sum / n))
return test_reco_loss_sum / n, test_ent_loss_sum / n, test_dis_loss_sum / n
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = tf.cast(x_train, tf.float32) / 255
x_test = tf.cast(x_test, tf.float32) / 255
x_train = tf.reshape(x_train, shape=[-1, 28, 28, 1])
x_test = tf.reshape(x_test, shape=[-1, 28, 28, 1])
# Reserve 10,000 samples for validation
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
input_shape = (28, 28, 1)
total_training_instances = len(x_train)
total_val_instances = len(x_val)
total_test_instances = len(x_test)
print(("total_training_instances: {}, total_test_instances: {}, "
"num_batchs: {}").format(total_training_instances, total_test_instances,
total_training_instances // batch_size))
if not args.no_reshuffle:
train_iter = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(
total_training_instances + 1, reshuffle_each_iteration=True).batch(
batch_size)
val_iter = tf.data.Dataset.from_tensor_slices((x_val, y_val)).shuffle(
total_val_instances + 1, reshuffle_each_iteration=True).batch(
batch_size)
test_iter = tf.data.Dataset.from_tensor_slices((x_test, y_test)).shuffle(
total_test_instances + 1, reshuffle_each_iteration=True).batch(
batch_size)
else:
train_iter = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(
batch_size)
val_iter = tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(
batch_size)
test_iter = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(
batch_size)
# Set up logging.
if args.debug:
stamp = "debug_" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
else:
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
stamp += "_bs_" + str(args.batch_size) + "_es_" + str(args.num_epochs) + \
"_rec_loss_w_" + str(args.reconstruction_loss_weight) + \
"_dcor_weight_" + str(args.distance_correlation_weight)
stamp += "_rec_noise_loss_w_" + str(args.reconstruction_stablizer_noise_weight)
if args.log_distance_correlation:
stamp += "_logdcor"
stamp += str("_dis_lr_") + str(args.dis_lr)
stamp += "_grl_" + str(args.grl)
if args.clip_norm > 0.0:
stamp += "_clip_norm_" + str(args.clip_norm)
if args.clip_norm_adam > 0.0:
stamp += "_clip_norm_adam_" + str(args.clip_norm_adam)
if args.clip_value > 0.0:
stamp += "_clip_value_" + str(args.clip_value)
if args.clip_value_adam > 0.0:
stamp += "_clip_value_adam_" + str(args.clip_value_adam)
if args.clip_global_value_adam > 0.0:
stamp += "_clip_global_value_adam_" + str(args.clip_global_value_adam)
if args.clip_global_norm_adam > 0.0:
stamp += "_clip_global_norm_adam_" + str(args.clip_global_norm_adam)
if args.noise_stddev >= 1e-8:
stamp += "_noise_stddev_" + str(args.noise_stddev)
if args.sigmoid:
stamp += "_sigmoid"
else:
stamp += "_linear"
logdir = 'logs/%s' % stamp
writer = tf.summary.create_file_writer(logdir)
train(train_iter, val_iter, test_iter, epochs=args.num_epochs)
| bytedance/fedlearner | example/privacy/embedding_protection/FMNIST_Embedding_Protection_Framework_Demo.py | Python | apache-2.0 | 31,162 | [
"Gaussian"
] | 2d84187b6bf457a3c784bb57650afd670fecc13ec794ba24a9edee59b09b617b |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
with open("README.rst") as readme_file:
readme = readme_file.read()
changelog_header = """
*********
Changelog
*********
"""
with open("CHANGELOG.rst") as changelog_file:
changelog = changelog_file.read()
setup(
author="Diamond Light Source",
author_email="scientificsoftware@diamond.ac.uk",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
description="Screening program for small-molecule single-crystal X-ray diffraction "
"data",
entry_points={
"console_scripts": [
"i19.screen = screen19.screen:main",
"screen19 = screen19.screen:main",
"i19.stability_fft = screen19.stability_fft:main",
"i19.minimum_exposure = screen19.minimum_exposure:main",
"screen19.minimum_exposure = screen19.minimum_exposure:main",
],
"libtbx.dispatcher.script": [
"i19.screen = i19.screen",
"screen19 = screen19",
"i19.stability_fft = i19.stability_fft",
"i19.minimum_exposure = i19.minimum_exposure",
"screen19.minimum_exposure = screen19.minimum_exposure",
],
"libtbx.precommit": ["screen19 = screen19"],
},
install_requires=['typing;python_version<"3.5"', "procrunner"],
license="BSD license",
long_description="\n\n".join([readme, changelog_header, changelog]),
include_package_data=True,
name="screen19",
packages=find_packages(),
test_suite="tests",
tests_require=["mock>=2.0", "pytest>=4.5"],
url="https://github.com/xia2/screen19",
version="0.212",
zip_safe=False,
)
| xia2/i19 | setup.py | Python | bsd-3-clause | 1,986 | [
"CRYSTAL"
] | 8df18dc80f9654c428ee99b6ea795a27d4dc874a017d70d69243a493c85f21be |
# -*- coding: utf-8 -*-
"""Fake data generator.
To use:
1. Install fake-factory.
pip install fake-factory
2. Create your OSF user account
3. Run the script, passing in your username (email).
::
python -m scripts.create_fakes --user fred@cos.io
This will create 3 fake public projects, each with 3 fake contributors (with
you as the creator).
"""
from __future__ import print_function
import sys
import argparse
import logging
from modularodm.query.querydialect import DefaultQueryDialect as Q
from faker import Factory
from framework.auth import Auth
from website.app import init_app
from website import models, security
from framework.auth import utils
from tests.factories import UserFactory, ProjectFactory, NodeFactory
from faker.providers import BaseProvider
class Sciencer(BaseProvider):
# Science term Faker Provider created by @csheldonhess
# https://github.com/csheldonhess/FakeConsumer/blob/master/faker/providers/science.py
word_list = ('abiosis', 'abrade', 'absorption', 'acceleration', 'accumulation',
'acid', 'acidic', 'activist', 'adaptation', 'agonistic', 'agrarian', 'airborne',
'alchemist', 'alignment', 'allele', 'alluvial', 'alveoli', 'ambiparous',
'amphibian', 'amplitude', 'analysis', 'ancestor', 'anodize', 'anomaly',
'anther', 'antigen', 'apiary', 'apparatus', 'application', 'approximation',
'aquatic', 'aquifer', 'arboreal', 'archaeology', 'artery', 'assessment',
'asteroid', 'atmosphere', 'atomic', 'atrophy', 'attenuate', 'aven', 'aviary',
'axis', 'bacteria', 'balance', 'bases', 'biome', 'biosphere', 'black hole',
'blight', 'buoyancy', 'calcium', 'canopy', 'capacity', 'capillary', 'carapace',
'carcinogen', 'catalyst', 'cauldron', 'celestial', 'cells', 'centigrade',
'centimeter', 'centrifugal', 'chemical reaction', 'chemicals', 'chemistry',
'chlorophyll', 'choked', 'chromosome', 'chronic', 'churn', 'classification',
'climate', 'cloud', 'comet', 'composition', 'compound', 'compression',
'condensation', 'conditions', 'conduction', 'conductivity', 'conservation',
'constant', 'constellation', 'continental', 'convection', 'convention', 'cool',
'core', 'cosmic', 'crater', 'creature', 'crepuscular', 'crystals', 'cycle', 'cytoplasm',
'dampness', 'data', 'decay', 'decibel', 'deciduous', 'defoliate', 'density',
'denude', 'dependency', 'deposits', 'depth', 'desiccant', 'detritus',
'development', 'digestible', 'diluted', 'direction', 'disappearance', 'discovery',
'dislodge', 'displace', 'dissection', 'dissolution', 'dissolve', 'distance',
'diurnal', 'diverse', 'doldrums', 'dynamics', 'earthquake', 'eclipse', 'ecology',
'ecosystem', 'electricity', 'elements', 'elevation', 'embryo', 'endangered',
'endocrine', 'energy', 'entropy', 'environment', 'enzyme', 'epidermis', 'epoch',
'equilibrium', 'equine', 'erosion', 'essential', 'estuary', 'ethical', 'evaporation',
'event', 'evidence', 'evolution', 'examination', 'existence', 'expansion',
'experiment', 'exploration ', 'extinction', 'extreme', 'facet', 'fault', 'fauna',
'feldspar', 'fermenting', 'fission', 'fissure', 'flora', 'flourish', 'flowstone',
'foliage', 'food chain', 'forage', 'force', 'forecast', 'forensics', 'formations',
'fossil fuel', 'frequency', 'friction', 'fungi', 'fusion', 'galaxy', 'gastric',
'geo-science', 'geothermal', 'germination', 'gestation', 'global', 'gravitation',
'green', 'greenhouse effect', 'grotto', 'groundwater', 'habitat', 'heat', 'heavens',
'hemisphere', 'hemoglobin', 'herpetologist', 'hormones', 'host', 'humidity', 'hyaline',
'hydrogen', 'hydrology', 'hypothesis', 'ichthyology', 'illumination', 'imagination',
'impact of', 'impulse', 'incandescent', 'indigenous', 'inertia', 'inevitable', 'inherit',
'inquiry', 'insoluble', 'instinct', 'instruments', 'integrity', 'intelligence',
'interacts with', 'interdependence', 'interplanetary', 'invertebrate', 'investigation',
'invisible', 'ions', 'irradiate', 'isobar', 'isotope', 'joule', 'jungle', 'jurassic',
'jutting', 'kilometer', 'kinetics', 'kingdom', 'knot', 'laser', 'latitude', 'lava',
'lethal', 'life', 'lift', 'light', 'limestone', 'lipid', 'lithosphere', 'load',
'lodestone', 'luminous', 'luster', 'magma', 'magnet', 'magnetism', 'mangrove', 'mantle',
'marine', 'marsh', 'mass', 'matter', 'measurements', 'mechanical', 'meiosis', 'meridian',
'metamorphosis', 'meteor', 'microbes', 'microcosm', 'migration', 'millennia', 'minerals',
'modulate', 'moisture', 'molecule', 'molten', 'monograph', 'monolith', 'motion',
'movement', 'mutant', 'mutation', 'mysterious', 'natural', 'navigable', 'navigation',
'negligence', 'nervous system', 'nesting', 'neutrons', 'niche', 'nocturnal',
'nuclear energy', 'numerous', 'nurture', 'obsidian', 'ocean', 'oceanography', 'omnivorous',
'oolites (cave pearls)', 'opaque', 'orbit', 'organ', 'organism', 'ornithology',
'osmosis', 'oxygen', 'paleontology', 'parallax', 'particle', 'penumbra',
'percolate', 'permafrost', 'permutation', 'petrify', 'petrograph', 'phenomena',
'physical property', 'planetary', 'plasma', 'polar', 'pole', 'pollination',
'polymer', 'population', 'precipitation', 'predator', 'prehensile', 'preservation',
'preserve', 'pressure', 'primate', 'pristine', 'probe', 'process', 'propagation',
'properties', 'protected', 'proton', 'pulley', 'qualitative data', 'quantum', 'quark',
'quarry', 'radiation', 'radioactivity', 'rain forest', 'ratio', 'reaction', 'reagent',
'realm', 'redwoods', 'reeds', 'reflection', 'refraction', 'relationships between', 'reptile',
'research', 'resistance', 'resonate', 'rookery', 'rubble', 'runoff', 'salinity', 'sandbar',
'satellite', 'saturation', 'scientific investigation', 'scientist\'s', 'sea floor', 'season',
'sedentary', 'sediment', 'sedimentary', 'seepage', 'seismic', 'sensors', 'shard',
'similarity', 'solar', 'soluble', 'solvent', 'sonic', 'sound', 'source', 'species',
'spectacular', 'spectrum', 'speed', 'sphere', 'spring', 'stage', 'stalactite',
'stalagmites', 'stimulus', 'substance', 'subterranean', 'sulfuric acid', 'surface',
'survival', 'swamp', 'sylvan', 'symbiosis', 'symbol', 'synergy', 'synthesis', 'taiga',
'taxidermy', 'technology', 'tectonics', 'temperate', 'temperature', 'terrestrial',
'thermals', 'thermometer', 'thrust', 'torque', 'toxin', 'trade winds', 'pterodactyl',
'transformation tremors', 'tropical', 'umbra', 'unbelievable', 'underwater', 'unearth',
'unique', 'unite', 'unity', 'universal', 'unpredictable', 'unusual', 'ursine', 'vacuole',
'valuable', 'vapor', 'variable', 'variety', 'vast', 'velocity', 'ventifact', 'verdant',
'vespiary', 'viable', 'vibration', 'virus', 'viscosity', 'visible', 'vista', 'vital',
'vitreous', 'volt', 'volume', 'vulpine', 'wave', 'wax', 'weather', 'westerlies', 'wetlands',
'whitewater', 'xeriscape', 'xylem', 'yield', 'zero-impact', 'zone', 'zygote', 'achieving',
'acquisition of', 'an alternative', 'analysis of', 'approach toward', 'area', 'aspects of',
'assessment of', 'assuming', 'authority', 'available', 'benefit of', 'circumstantial',
'commentary', 'components', 'concept of', 'consistent', 'corresponding', 'criteria',
'data', 'deduction', 'demonstrating', 'derived', 'distribution', 'dominant', 'elements',
'equation', 'estimate', 'evaluation', 'factors', 'features', 'final', 'function',
'initial', 'instance ', 'interpretation of', 'maintaining ', 'method', 'perceived',
'percent', 'period', 'positive', 'potential', 'previous', 'primary', 'principle',
'procedure', 'process', 'range', 'region', 'relevant', 'required', 'research',
'resources', 'response', 'role', 'section', 'select', 'significant ', 'similar',
'source', 'specific', 'strategies', 'structure', 'theory', 'transfer', 'variables',
'corvidae', 'passerine', 'Pica pica', 'Chinchilla lanigera', 'Nymphicus hollandicus',
'Melopsittacus undulatus', )
def science_word(cls):
"""
:example 'Lorem'
"""
return cls.random_element(cls.word_list)
def science_words(cls, nb=3):
"""
Generate an array of random words
:example array('Lorem', 'ipsum', 'dolor')
:param nb how many words to return
"""
return [cls.science_word() for _ in range(0, nb)]
def science_sentence(cls, nb_words=6, variable_nb_words=True):
"""
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words around how many words the sentence should contain
:param variable_nb_words set to false if you want exactly $nbWords returned,
otherwise $nbWords may vary by +/-40% with a minimum of 1
"""
if nb_words <= 0:
return ''
if variable_nb_words:
nb_words = cls.randomize_nb_elements(nb_words)
words = cls.science_words(nb_words)
words[0] = words[0].title()
return " ".join(words) + '.'
def science_sentences(cls, nb=3):
"""
Generate an array of sentences
:example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.')
:param nb how many sentences to return
:return list
"""
return [cls.science_sentence() for _ in range(0, nb)]
def science_paragraph(cls, nb_sentences=3, variable_nb_sentences=True):
"""
Generate a single paragraph
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param nb_sentences around how many sentences the paragraph should contain
:param variable_nb_sentences set to false if you want exactly $nbSentences returned,
otherwise $nbSentences may vary by +/-40% with a minimum of 1
:return string
"""
if nb_sentences <= 0:
return ''
if variable_nb_sentences:
nb_sentences = cls.randomize_nb_elements(nb_sentences)
return " ".join(cls.science_sentences(nb_sentences))
def science_paragraphs(cls, nb=3):
"""
Generate an array of paragraphs
:example array($paragraph1, $paragraph2, $paragraph3)
:param nb how many paragraphs to return
:return array
"""
return [cls.science_paragraph() for _ in range(0, nb)]
def science_text(cls, max_nb_chars=200):
"""
Generate a text string.
Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs.
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param max_nb_chars Maximum number of characters the text should contain (minimum 5)
:return string
"""
text = []
if max_nb_chars < 5:
raise ValueError('text() can only generate text of at least 5 characters')
if max_nb_chars < 25:
# join words
while not text:
size = 0
# determine how many words are needed to reach the $max_nb_chars once;
while size < max_nb_chars:
word = (' ' if size else '') + cls.science_word()
text.append(word)
size += len(word)
text.pop()
text[0] = text[0][0].upper() + text[0][1:]
last_index = len(text) - 1
text[last_index] += '.'
elif max_nb_chars < 100:
# join sentences
while not text:
size = 0
# determine how many sentences are needed to reach the $max_nb_chars once
while size < max_nb_chars:
sentence = (' ' if size else '') + cls.science_sentence()
text.append(sentence)
size += len(sentence)
text.pop()
else:
# join paragraphs
while not text:
size = 0
# determine how many paragraphs are needed to reach the $max_nb_chars once
while size < max_nb_chars:
paragraph = ('\n' if size else '') + cls.science_paragraph()
text.append(paragraph)
size += len(paragraph)
text.pop()
return "".join(text)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.ERROR)
fake = Factory.create()
fake.add_provider(Sciencer)
def create_fake_user():
email = fake.email()
name = fake.name()
parsed = utils.impute_names(name)
user = UserFactory.build(username=email, fullname=name,
is_registered=True, is_claimed=True,
verification_key=security.random_string(15),
date_registered=fake.date_time(),
emails=[email],
**parsed
)
user.set_password('faker123')
user.save()
logger.info('Created user: {0} <{1}>'.format(user.fullname, user.username))
return user
def parse_args():
parser = argparse.ArgumentParser(description='Create fake data.')
parser.add_argument('-u', '--user', dest='user', required=True)
parser.add_argument('--nusers', dest='n_users', type=int, default=3)
parser.add_argument('--nprojects', dest='n_projects', type=int, default=3)
parser.add_argument('--ncomponents', dest='n_components', type=int, default=0)
parser.add_argument('-p', '--privacy', dest="privacy", type=str, default='private', choices=['public', 'private'])
parser.add_argument('-n', '--name', dest='name', type=str, default=None)
parser.add_argument('-t', '--tags', dest='n_tags', type=int, default=5)
return parser.parse_args()
def create_fake_project(creator, n_users, privacy, n_components, name, n_tags):
auth = Auth(user=creator)
project_title = name if name else fake.science_sentence()
project = ProjectFactory.build(title=project_title, description=fake.science_paragraph(), creator=creator)
project.set_privacy(privacy)
for _ in range(n_users):
contrib = create_fake_user()
project.add_contributor(contrib, auth=auth)
for _ in range(n_components):
NodeFactory(project=project, title=fake.science_sentence(), description=fake.science_paragraph(), creator=creator)
for _ in range(n_tags):
project.add_tag(fake.science_word(), auth=auth)
project.save()
logger.info('Created project: {0}'.format(project.title))
return project
def main():
args = parse_args()
creator = models.User.find(Q('username', 'eq', args.user))[0]
for i in range(args.n_projects):
name = args.name + str(i) if args.name else ''
create_fake_project(creator, args.n_users, args.privacy, args.n_components, name, args.n_tags)
print('Created {n} fake projects.'.format(n=args.n_projects))
sys.exit(0)
if __name__ == '__main__':
app = init_app('website.settings', set_backends=True, routes=True)
main()
| AndrewSallans/osf.io | scripts/create_fakes.py | Python | apache-2.0 | 16,186 | [
"Galaxy"
] | 5b67b40144c9c6354f41d41df951c7306392832e9b1e8da9bcda34435a79f9da |
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def cv_carsGLM():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:poisson
problem = random.sample(range(3),1)[0]
# pick the predictors and response column, along with the correct family
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
family = "binomial"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
family = "poisson"
response_col = "cylinders"
else :
family = "gaussian"
response_col = "economy"
print "Distribution: {0}".format(family)
print "Response column: {0}".format(response_col)
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
tests.check_models(glm1, glm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
try:
tests.check_models(glm1, glm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame(python_obj=[[random.randint(0,num_folds-1)] for f in range(cars.nrow)])
fold_assignments.set_names(["fold_assignments"])
cars = cars.cbind(fold_assignments)
glm = h2o.glm(y=cars[response_col], x=cars[predictors], training_frame=cars, family=family,
fold_column="fold_assignments", keep_cross_validation_predictions=True)
num_cv_models = len(glm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(glm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(glm._model_json['output']['cross_validation_models'][1]['name'])
assert isinstance(cv_model1, type(glm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model1),type(glm))
assert isinstance(cv_model2, type(glm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model2),type(glm))
# 4. keep_cross_validation_predictions
cv_predictions = glm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = glm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# # 5. manually construct models
# fold1 = cars[cars["fold_assignments"]==0]
# fold2 = cars[cars["fold_assignments"]==1]
# manual_model1 = h2o.glm(y=fold2[response_col],
# x=fold2[predictors],
# validation_y=fold1[response_col],
# validation_x=fold1[predictors],
# family=family)
# manual_model2 = h2o.glm(y=fold1[response_col],
# x=fold1[predictors],
# validation_y=fold2[response_col],
# validation_x=fold2[predictors],
# family=family)
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
# TODO: PUBDEV-1776
#glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow, family=family,
# fold_assignment="Modulo")
# 2. nfolds = 0
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=0, family=family)
# check that this is equivalent to no nfolds
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], family=family)
tests.check_models(glm1, glm2)
# 3. cross-validation and regular validation attempted
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.randint(3,10), validation_y=cars[response_col],
validation_x=cars[predictors], family=family)
## error cases
# 1. nfolds == 1 or < 0
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.sample([-1,1], 1)[0],
family=family)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow+1, family=family,
fold_assignment="Modulo")
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=3, fold_column="fold_assignments",
family=family, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# # 4. fold_column and fold_assignment both specified
# try:
# glm = h2o.glm(y=cars[response_col], x=cars[predictors], fold_assignment="Random", fold_column="fold_assignments",
# family=family, training_frame=cars)
# assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
# except EnvironmentError:
# assert True
if __name__ == "__main__":
tests.run_test(sys.argv, cv_carsGLM)
| printedheart/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_cv_carsGLM.py | Python | apache-2.0 | 6,744 | [
"Gaussian"
] | 1ff1964669ff9f74d0d1e1d7992ef6ab8d0eb736b351abc1c5d99e1567147065 |
#encoding: utf-8
import unittest
from ase import Atoms
from ase.io import read
from ase.calculators.neighborlist import NeighborList
from numpy import dot
from math import atan2,pi
class testAse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testRead(self):
atoms=read('range',format='lammps')
atoms. set_pbc((1, 1, 1))
sbs=atoms.get_chemical_symbols()
c=[]
for sb in sbs:
if sb=='H':
c.append('C')
else:
c.append('N')
atoms.set_chemical_symbols(c)
# print atoms.get_chemical_symbols()
print atoms;
def testNeighborlist(self):
atoms=read('range',format='lammps')
atoms. set_pbc((1, 1, 1))
nl = NeighborList([0.8 for atom in atoms],self_interaction=False,bothways=True)
nl.update(atoms)
ang=[]
for i in xrange(3):
indices, offsets = nl. get_neighbors(i)
angs=[]
for j, offset in zip(indices, offsets):
pos= atoms. positions[j] + dot(offset, atoms. get_cell())-atoms.positions[i]
ang1=atan2(pos[1],pos[0])+pi
angs.append((j,ang1))
newangs=sorted(angs,key=lambda d:d[1])
print newangs
if __name__ =='__main__':
unittest.main()
| vanceeasleaf/aces | aces/unittest/testAse.py | Python | gpl-2.0 | 1,134 | [
"ASE",
"LAMMPS"
] | dab4c1e1085fe371d57505f3778d3f3d9456906f9bd5b213bcaf7db520bf1c18 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function as framework_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework.func_graph import FuncGraph
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
if not context.executing_eagerly():
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d "
"elements. This may consume a large amount of memory." %
num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops, func_graphs):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: set of Operations.
func_graphs: list of FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if op not in reached_ops:
reached_ops.add(op)
for output in op.outputs:
if _IsBackpropagatable(output):
queue.extend(_Consumers(output, func_graphs))
def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, func_graphs,
xs):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op]' indicates the number of backprop inputs
to this operation.
Args:
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
func_graphs: list of FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops. This is
useful if to_ops occur in a function and from_ops are in an outer function
or graph.
xs: list of Tensors.
Returns:
A tuple containing: (1) the subset of to_ops reachable from from_ops by a
path of zero or more backpropagatable tensors, (2) a mapping from operation
to the number of backprop inputs to that op, and (3) a ControlFlowState
object which is not None if the ops between from_ops and to_ops contain
control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = set()
_MarkReachedOps(from_ops, reached_ops, func_graphs)
# X in reached_ops iff X is reachable from from_ops by a path of zero or more
# backpropagatable tensors.
reachable_to_ops = set(op for op in to_ops if op in reached_ops)
# Mark between ops.
between_ops = set()
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if op in reached_ops:
between_ops.add(op)
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops.remove(op)
for inp in _NonEagerInputs(op, xs):
queue.append(inp.op)
# X in between_ops iff X is on a path of zero or more backpropagatable tensors
# between from_ops and to_ops
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = collections.defaultdict(int)
for op in between_op_list:
for x in _NonEagerInputs(op, xs):
if x.op in between_ops:
pending_count[x.op] += 1
return reachable_to_ops, pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys,
ys,
colocate_gradients_with_ops,
gradient_uid="__unsupported__"):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gradient_uid: A unique identifier within the graph indicating
which invocation of gradients is being executed. Used to cluster
ops for compilation.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
new_grad_ys = []
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
new_grad_ys.append(
array_ops.fill(
array_ops.shape(y),
constant_op.constant(1, dtype=y.dtype, name="grad_ys_%d" % i)))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError(
"Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" % (dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError(
"Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" % (dtypes.as_dtype(
grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))
elif y.dtype == dtypes.variant:
if grad_y.dtype != dtypes.variant:
raise TypeError(
"Gradient type %s generated for variant "
"tensor %s with type %s must be variant" % (dtypes.as_dtype(
grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))
elif y.dtype == dtypes.resource:
# We assume y is the handle of a ResourceVariable. The gradient of a
# ResourceVariable should be a numeric value, not another resource.
if grad_y.dtype == dtypes.resource:
raise TypeError("Input gradient %s for resource tensor %s should not "
"be a resource" % (grad_y, y))
else:
raise TypeError(
"Tensor %s with type %s must be numeric "
"to obtain a default gradient" % (y, dtypes.as_dtype(y.dtype).name))
# Create a grad_y tensor in the name scope of the gradient.
# Required for TensorArrays to identify which gradient call a
# grad_y value is coming from.
if isinstance(grad_y, ops.IndexedSlices):
new_grad_ys.append(
ops.IndexedSlices(
indices=(array_ops.identity(
grad_y.indices, name="grad_ys_%d_indices" % i)
if isinstance(grad_y.indices, ops.Tensor) else
grad_y.indices),
values=(array_ops.identity(
grad_y.values, name="grad_ys_%d_values" % i) if isinstance(
grad_y.values, ops.Tensor) else grad_y.values),
dense_shape=(array_ops.identity(
grad_y.dense_shape, name="grad_ys_%d_shape" % i)
if isinstance(grad_y.dense_shape, ops.Tensor) else
grad_y.dense_shape)))
else:
new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i))
return new_grad_ys
def IsTrainable(tensor_or_dtype):
if isinstance(tensor_or_dtype, ops.Tensor):
dtype = tensor_or_dtype.dtype
else:
dtype = tensor_or_dtype
dtype = dtypes.as_dtype(dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128,
dtypes.resource, dtypes.variant)
def _IsBackpropagatable(tensor):
if IsTrainable(tensor):
return True
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype == dtypes.bfloat16
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
# While ops have inputs added to them during the gradient computation, so we
# skip the below check. See while_v2 for details.
if op.type == "While": return
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
def _StopOps(from_ops, stop_gradient_ops, pending_count, xs):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: mapping from operation to number of backprop inputs.
xs: list of Tensors.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in _NonEagerInputs(op, xs):
if pending_count[inp.op] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op)
stop_ops.update(op for op in stop_gradient_ops)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): # pylint: disable=invalid-name
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops._colocate_with_for_gradient(op, gradient_uid): # pylint: disable=protected-access
yield
else:
yield
def _IsPartitionedCall(op):
return op.type == "PartitionedCall" or op.type == "StatefulPartitionedCall"
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
if _IsPartitionedCall(op):
f.name = op.get_attr("f").name
else:
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# TODO(apassos) use a better dtype here
in_grads = functional_ops.symbolic_gradient(
input=f_in,
Tout=[x if x != dtypes.resource else dtypes.float32 for x in f_types],
f=f)
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_separate_compiled_gradients = func.definition.attr[
"_XlaSeparateCompiledGradients"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_separate_compiled_gradients = op.get_attr(
"_XlaSeparateCompiledGradients")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
return grad_fn() # Exit early
if not xla_compile:
return grad_fn() # Exit early
# If the gradients are supposed to be compiled separately, we give them a
# _XlaScope name that is based on the name_scope of the gradients. Otherwise
# they just inherit the existing _XlaScope name, which lets them be merged
# together with the non-gradient computation.
if xla_separate_compiled_gradients:
xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
else:
xla_grad_scope = xla_scope
attrs = {
"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
def _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs):
"""Raises an error if we backprop through a loop var."""
# Find the nearest 'to_op' reachable from 'op' to provide a more helpful error
# message.
target_op = None
queue = collections.deque([op])
visited = set()
while queue:
curr_op = queue.popleft()
if curr_op in visited: continue
visited.add(curr_op)
if curr_op in from_ops:
target_op = curr_op
break
queue.extend(t.op for t in _NonEagerInputs(curr_op, xs))
assert target_op
raise ValueError(
"Cannot compute gradient inside while loop with respect to op '%s'. "
"We do not support taking the gradient wrt or through the initial value "
"of a loop variable. Gradients can be computed through loop invariants "
"or wrt the input parameters to the loop body."
% target_op.name)
def _IsFunction(graph):
return (isinstance(graph, FuncGraph) or
isinstance(graph, framework_function._FuncGraph)) # pylint: disable=protected-access
def _Captures(func_graph):
if isinstance(func_graph, FuncGraph):
return func_graph.captures
else:
assert isinstance(func_graph, framework_function._FuncGraph) # pylint: disable=protected-access
return func_graph._captured # pylint: disable=protected-access
def _MaybeCaptured(t):
"""If t is a captured value placeholder, returns the original captured value.
Args:
t: Tensor
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
# pylint: disable=protected-access
if (not isinstance(t, ops.EagerTensor) and
_IsFunction(t.op.graph) and t.op.type == "Placeholder"):
for input_t, placeholder_t in _Captures(t.op.graph).items():
if t == placeholder_t:
return _MaybeCaptured(input_t)
# pylint: enable=protected-access
return t
def _NonEagerInputs(op, xs):
"""Returns the inputs of op, crossing closure boundaries where necessary.
Does not return any captured EagerTensors, i.e., the number of tensors
returned may be less than than the actual number of inputs.
Args:
op: Operation
xs: list of Tensors we are differentiating w.r.t.
Returns:
A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op
is in a FuncGraph and has captured inputs.
"""
return [t for t in _Inputs(op, xs) if not isinstance(t, ops.EagerTensor)]
# TODO(skyewm): plumbing xs through everywhere is ugly, consider making
# _GradientsHelper a class with xs as a member variable.
def _Inputs(op, xs):
"""Returns the inputs of op, crossing closure boundaries where necessary.
Args:
op: Operation
xs: list of Tensors we are differentiating w.r.t.
Returns:
A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op
is in a FuncGraph and has captured inputs.
"""
if _IsFunction(op.graph): # pylint: disable=protected-access
inputs = []
for t in op.inputs:
# If we're differentiating w.r.t. `t`, do not attempt to traverse through
# it to a captured value. The algorithm needs to "see" `t` in this case,
# even if it's a function input for a captured value, whereas usually we'd
# like to traverse through these closures as if the captured value was the
# direct input to op.
if t not in xs:
t = _MaybeCaptured(t)
inputs.append(t)
return inputs
else:
return op.inputs
def _Consumers(t, func_graphs):
"""Returns the consumers of t, crossing closure boundaries where necessary.
Args:
t: Tensor
func_graphs: a list of FuncGraphs that may have captured t.
Returns:
A list of tensors. The tensors will be from the current graph and/or
func_graphs.
"""
consumers = t.consumers()
for func in func_graphs:
for input_t, placeholder in _Captures(func).items():
if input_t == t:
consumers.extend(_Consumers(placeholder, func_graphs))
return consumers
def _GradientsHelper(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE,
src_graph=None):
"""Implementation of gradients()."""
if context.executing_eagerly():
raise RuntimeError("tf.gradients is not supported when eager execution "
"is enabled. Use tf.GradientTape instead.")
if src_graph is None:
src_graph = ops.get_default_graph()
try:
unconnected_gradients = UnconnectedGradients(unconnected_gradients)
except ValueError:
raise ValueError(
"Unknown value for unconnected_gradients: %r" % unconnected_gradients)
# If src_graph is a _FuncGraph (i.e. a function body), gather it and all
# ancestor graphs. This is necessary for correctly handling captured values.
func_graphs = []
curr_graph = src_graph
while _IsFunction(curr_graph):
func_graphs.append(curr_graph)
if isinstance(curr_graph, FuncGraph):
curr_graph = curr_graph.outer_graph
else:
assert isinstance(curr_graph, framework_function._FuncGraph) # pylint: disable=protected-access
curr_graph = curr_graph._outer_graph # pylint: disable=protected-access
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(
name, "gradients",
list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:
# Get a uid for this call to gradients that can be used to help
# cluster ops for compilation.
gradient_uid = ops.get_default_graph().unique_name("uid")
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [
x.handle if resource_variable_ops.is_resource_variable(x) else x
for x in xs
]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(
xs, name="x", as_ref=True)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops,
gradient_uid)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
reachable_to_ops, pending_count, loop_state = _PendingCount(
to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
ready = (pending_count[op] == 0)
if ready and op not in to_ops_set and op in reachable_to_ops:
to_ops_set.add(op)
queue.append(op)
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count, xs)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state,
aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
func_call = None
is_partitioned_call = _IsPartitionedCall(op)
# pylint: disable=protected-access
is_func_call = (
src_graph._is_function(op.type) or is_partitioned_call)
# pylint: enable=protected-access
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op not in stop_ops):
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
if is_func_call:
if is_partitioned_call:
func_call = src_graph._get_function( # pylint: disable=protected-access
compat.as_bytes(op.get_attr("f").name))
else:
func_call = src_graph._get_function(op.type) # pylint: disable=protected-access
# Note that __defun is not set if the graph is
# imported. If it's set, we prefer to access the original
# defun.
func_call = getattr(op, "__defun", func_call)
grad_fn = func_call.python_grad_func
else:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
# NOTE(skyewm): We don't support computing gradients wrt a loop variable
# unless it's within the context of a single iteration (i.e. the
# gradient is wrt to the loop parameter in the body function, not wrt or
# through the initial value). This means if we're in a while loop
# context, we should never see a switch node from this context.
# pylint: disable=protected-access
if (control_flow_util.IsSwitch(op) and
op._control_flow_context is not None and
op._control_flow_context.IsWhileContext() and
op._control_flow_context ==
ops.get_default_graph()._get_control_flow_context()):
_RaiseNoGradWrtInitialLoopValError(op, from_ops, xs)
# pylint: enable=protected-access
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and not out_grad) and (
(not grad_fn and is_func_call) or IsTrainable(op.outputs[i])):
# Only trainable outputs or outputs for a function call that
# will use SymbolicGradient get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with src_graph._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len([x for x in in_grads
if x is not None]) > 1:
with ops.device(None):
with ops._colocate_with_for_gradient( # pylint: disable=protected-access
None,
gradient_uid,
ignore_existing=True):
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(_Inputs(op, xs))
# Note: we don't filter out eager inputs here because the inputs need to
# line up with in_grads.
for i, (t_in, in_grad) in enumerate(zip(_Inputs(op, xs), in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
try:
in_grad.set_shape(t_in.get_shape())
except ValueError:
raise ValueError(
"Incompatible shapes between op input and calculated "
"input gradient. Forward operation: %s. Input index: %d. "
"Original input shape: %s. "
"Calculated input gradient shape: %s" %
(op.name, i, t_in.shape, in_grad.shape))
if not isinstance(t_in, ops.EagerTensor):
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
xs)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x, unconnected_gradients) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any(g is not None for g in out_grad):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
xs):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in _NonEagerInputs(op, xs):
pending_count[x.op] -= 1
ready = (pending_count[x.op] == 0)
if loop_state and not ready:
ready = pending_count[x.op] > 0 and control_flow_util.IsLoopSwitch(x.op)
if ready:
if control_flow_util.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_not_none_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_not_none_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_not_none_grad:
# For an unused exit, if it has trainable outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_util.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t, unconnected_gradients):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
if unconnected_gradients == UnconnectedGradients.ZERO:
t_dtype = t.dtype if t.dtype != dtypes.resource else dtypes.float32
return array_ops.zeros_like(t, dtype=t_dtype)
elif unconnected_gradients == UnconnectedGradients.NONE:
return None
else:
raise ValueError(
"Unknown value for unconnected_gradients: %r" % unconnected_gradients)
t_grad = op_grads[t.value_index]
assert not isinstance(
t_grad, list), ("gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values, array_ops.gather(
grad.indices, g.indices), g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list, gradient_uid):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(tensors_on_device, key=DeviceKey):
tensors = tensors_on_device[dev]
with ops._colocate_with_for_gradient( # pylint: disable=protected-access
tensors[0].op,
gradient_uid,
ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
@tf_export("AggregationMethod")
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads,
op,
gradient_uid,
loop_state,
aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
gradient_uid: A unique identifier within the graph indicating
which invocation of gradients is being executed. Used to cluster
ops for compilation.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError(
"Invalid aggregation_method specified %s." % aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_util.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all(
isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad
if g is not None
)):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all(isinstance(g, ops.Tensor) for g in out_grad if g is not None):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grads[i] = _AggregateIndexedSlicesGradients(out_grad)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
def _AggregateIndexedSlicesGradients(grads):
"""Aggregates gradients of type `IndexedSlices` by concatenation."""
if len(grads) < 1:
return None
elif len(grads) == 1:
return grads[0]
else:
grads = math_ops._as_indexed_slices_list( # pylint: disable=protected-access
[g for g in grads if g is not None])
grads = [_HandleNestedIndexedSlices(x) for x in grads] # pylint: disable=protected-access
# Form IndexedSlices out of the concatenated values and indices.
concat_grad = ops.IndexedSlices(
array_ops.concat([x.values for x in grads], axis=0),
array_ops.concat([x.indices for x in grads], axis=0),
grads[0].dense_shape)
return concat_grad
| jbedorf/tensorflow | tensorflow/python/ops/gradients_util.py | Python | apache-2.0 | 41,830 | [
"VisIt"
] | 4a2d3e715ce958df88a8ccc46dab93f254dfe82b4d304fca6be1c2c3fdf5a385 |
# -*- coding: utf-8 -*-
"""This script generates Slicer Interfaces based on the CLI modules XML. CLI
modules are selected from the hardcoded list below and generated code is placed
in the cli_modules.py file (and imported in __init__.py). For this to work
correctly you must have your CLI executabes in $PATH"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import str, bytes, open
import xml.dom.minidom
import subprocess
import os
from shutil import rmtree
import keyword
python_keywords = keyword.kwlist # If c++ SEM module uses one of these key words as a command line parameter, we need to modify variable
def force_to_valid_python_variable_name(old_name):
""" Valid c++ names are not always valid in python, so
provide alternate naming
>>> force_to_valid_python_variable_name('lambda') # doctest: +IGNORE_UNICODE
'opt_lambda'
>>> force_to_valid_python_variable_name('inputVolume') # doctest: +IGNORE_UNICODE
'inputVolume'
"""
new_name = old_name
new_name = new_name.lstrip().rstrip()
if old_name in python_keywords:
new_name = 'opt_' + old_name
return new_name
def add_class_to_package(class_codes, class_names, module_name, package_dir):
module_python_filename = os.path.join(package_dir, "%s.py" % module_name)
f_m = open(module_python_filename, 'w')
f_i = open(os.path.join(package_dir, "__init__.py"), 'a+')
f_m.write("""# -*- coding: utf-8 -*-
\"\"\"Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator.\"\"\"\n\n""")
imports = """from __future__ import print_function, division, unicode_literals, absolute_import
from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec,
File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath)
import os\n\n\n"""
f_m.write(imports)
f_m.write("\n\n".join(class_codes))
f_i.write("from %s import %s\n" % (module_name, ", ".join(class_names)))
f_m.close()
f_i.close()
def crawl_code_struct(code_struct, package_dir):
subpackages = []
for k, v in code_struct.items():
if isinstance(v, str) or isinstance(v, (str, bytes)):
module_name = k.lower()
class_name = k
class_code = v
add_class_to_package(
[class_code], [class_name], module_name, package_dir)
else:
l1 = {}
l2 = {}
for key in list(v.keys()):
if (isinstance(v[key], str) or isinstance(v[key], (str, bytes))):
l1[key] = v[key]
else:
l2[key] = v[key]
if l2:
v = l2
subpackages.append(k.lower())
f_i = open(os.path.join(package_dir, "__init__.py"), 'a+')
f_i.write("from %s import *\n" % k.lower())
f_i.close()
new_pkg_dir = os.path.join(package_dir, k.lower())
if os.path.exists(new_pkg_dir):
rmtree(new_pkg_dir)
os.mkdir(new_pkg_dir)
crawl_code_struct(v, new_pkg_dir)
if l1:
for ik, iv in l1.items():
crawl_code_struct({ik: {ik: iv}}, new_pkg_dir)
elif l1:
v = l1
module_name = k.lower()
add_class_to_package(
list(v.values()), list(v.keys()), module_name, package_dir)
if subpackages:
f = open(os.path.join(package_dir, "setup.py"), 'w')
f.write("""# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('{pkg_name}', parent_package, top_path)
{sub_pks}
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
""".format(pkg_name=package_dir.split("/")[-1], sub_pks="\n ".join(["config.add_data_dir('%s')" % sub_pkg for sub_pkg in subpackages])))
f.close()
def generate_all_classes(modules_list=[], launcher=[], redirect_x=False, mipav_hacks=False):
""" modules_list contains all the SEM compliant tools that should have wrappers created for them.
launcher containtains the command line prefix wrapper arugments needed to prepare
a proper environment for each of the modules.
"""
all_code = {}
for module in modules_list:
print("=" * 80)
print("Generating Definition for module {0}".format(module))
print("^" * 80)
package, code, module = generate_class(module, launcher, redirect_x=redirect_x, mipav_hacks=mipav_hacks)
cur_package = all_code
module_name = package.strip().split(" ")[0].split(".")[-1]
for package in package.strip().split(" ")[0].split(".")[:-1]:
if package not in cur_package:
cur_package[package] = {}
cur_package = cur_package[package]
if module_name not in cur_package:
cur_package[module_name] = {}
cur_package[module_name][module] = code
if os.path.exists("__init__.py"):
os.unlink("__init__.py")
crawl_code_struct(all_code, os.getcwd())
def generate_class(module, launcher, strip_module_name_prefix=True, redirect_x=False, mipav_hacks=False):
dom = grab_xml(module, launcher, mipav_hacks=mipav_hacks)
if strip_module_name_prefix:
module_name = module.split(".")[-1]
else:
module_name = module
inputTraits = []
outputTraits = []
outputs_filenames = {}
# self._outputs_nodes = []
class_string = "\"\"\""
for desc_str in ['title', 'category', 'description', 'version',
'documentation-url', 'license', 'contributor',
'acknowledgements']:
el = dom.getElementsByTagName(desc_str)
if el and el[0].firstChild and el[0].firstChild.nodeValue.strip():
class_string += desc_str + ": " + el[0].firstChild.nodeValue.strip() + "\n\n"
if desc_str == 'category':
category = el[0].firstChild.nodeValue.strip()
class_string += "\"\"\""
for paramGroup in dom.getElementsByTagName("parameters"):
indices = paramGroup.getElementsByTagName('index')
max_index = 0
for index in indices:
if int(index.firstChild.nodeValue) > max_index:
max_index = int(index.firstChild.nodeValue)
for param in paramGroup.childNodes:
if param.nodeName in ['label', 'description', '#text', '#comment']:
continue
traitsParams = {}
longFlagNode = param.getElementsByTagName('longflag')
if longFlagNode:
# Prefer to use longFlag as name if it is given, rather than the parameter name
longFlagName = longFlagNode[0].firstChild.nodeValue
# SEM automatically strips prefixed "--" or "-" from from xml before processing
# we need to replicate that behavior here The following
# two nodes in xml have the same behavior in the program
# <longflag>--test</longflag>
# <longflag>test</longflag>
longFlagName = longFlagName.lstrip(" -").rstrip(" ")
name = longFlagName
name = force_to_valid_python_variable_name(name)
traitsParams["argstr"] = "--" + longFlagName + " "
else:
name = param.getElementsByTagName(
'name')[0].firstChild.nodeValue
name = force_to_valid_python_variable_name(name)
if param.getElementsByTagName('index'):
traitsParams["argstr"] = ""
else:
traitsParams["argstr"] = "--" + name + " "
if param.getElementsByTagName('description') and param.getElementsByTagName('description')[0].firstChild:
traitsParams["desc"] = param.getElementsByTagName('description')[0].firstChild.nodeValue.replace('"', "\\\"").replace("\n", ", ")
argsDict = {'directory': '%s', 'file': '%s', 'integer': "%d",
'double': "%f", 'float': "%f", 'image': "%s",
'transform': "%s", 'boolean': '',
'string-enumeration': '%s', 'string': "%s",
'integer-enumeration': '%s',
'table': '%s', 'point': '%s', 'region': '%s', 'geometry': '%s'}
if param.nodeName.endswith('-vector'):
traitsParams["argstr"] += "%s"
else:
traitsParams["argstr"] += argsDict[param.nodeName]
index = param.getElementsByTagName('index')
if index:
traitsParams["position"] = int(
index[0].firstChild.nodeValue) - (max_index + 1)
desc = param.getElementsByTagName('description')
if index:
traitsParams["desc"] = desc[0].firstChild.nodeValue
typesDict = {'integer': "traits.Int", 'double': "traits.Float",
'float': "traits.Float", 'image': "File",
'transform': "File", 'boolean': "traits.Bool",
'string': "traits.Str", 'file': "File", 'geometry': "File",
'directory': "Directory", 'table': "File",
'point': "traits.List", 'region': "traits.List"}
if param.nodeName.endswith('-enumeration'):
type = "traits.Enum"
values = ['"%s"' % str(el.firstChild.nodeValue).replace('"', '') for el in param.getElementsByTagName('element')]
elif param.nodeName.endswith('-vector'):
type = "InputMultiPath"
if param.nodeName in ['file', 'directory', 'image', 'geometry', 'transform', 'table']:
values = ["%s(exists=True)" % typesDict[
param.nodeName.replace('-vector', '')]]
else:
values = [typesDict[param.nodeName.replace('-vector', '')]]
if mipav_hacks is True:
traitsParams["sep"] = ";"
else:
traitsParams["sep"] = ','
elif param.getAttribute('multiple') == "true":
type = "InputMultiPath"
if param.nodeName in ['file', 'directory', 'image', 'geometry', 'transform', 'table']:
values = ["%s(exists=True)" % typesDict[param.nodeName]]
elif param.nodeName in ['point', 'region']:
values = ["%s(traits.Float(), minlen=3, maxlen=3)" %
typesDict[param.nodeName]]
else:
values = [typesDict[param.nodeName]]
traitsParams["argstr"] += "..."
else:
values = []
type = typesDict[param.nodeName]
if param.nodeName in ['file', 'directory', 'image', 'geometry', 'transform', 'table']:
if not param.getElementsByTagName('channel'):
raise RuntimeError("Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field.\n{0}".format(traitsParams))
elif param.getElementsByTagName('channel')[0].firstChild.nodeValue == 'output':
traitsParams["hash_files"] = False
inputTraits.append(
"%s = traits.Either(traits.Bool, %s(%s), %s)" % (name,
type,
parse_values(
values).replace("exists=True", ""),
parse_params(traitsParams)))
traitsParams["exists"] = True
traitsParams.pop("argstr")
traitsParams.pop("hash_files")
outputTraits.append("%s = %s(%s%s)" % (name, type.replace("Input", "Output"), parse_values(values), parse_params(traitsParams)))
outputs_filenames[
name] = gen_filename_from_param(param, name)
elif param.getElementsByTagName('channel')[0].firstChild.nodeValue == 'input':
if param.nodeName in ['file', 'directory', 'image', 'geometry', 'transform', 'table'] and type not in ["InputMultiPath", "traits.List"]:
traitsParams["exists"] = True
inputTraits.append("%s = %s(%s%s)" % (name, type, parse_values(values), parse_params(traitsParams)))
else:
raise RuntimeError("Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field to be in ['input','output'].\n{0}".format(traitsParams))
else: # For all other parameter types, they are implicitly only input types
inputTraits.append("%s = %s(%s%s)" % (name, type, parse_values(
values), parse_params(traitsParams)))
if mipav_hacks:
blacklisted_inputs = ["maxMemoryUsage"]
inputTraits = [trait for trait in inputTraits if trait.split()[0] not in blacklisted_inputs]
compulsory_inputs = ['xDefaultMem = traits.Int(desc="Set default maximum heap size", argstr="-xDefaultMem %d")',
'xMaxProcess = traits.Int(1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", usedefault=True)']
inputTraits += compulsory_inputs
input_spec_code = "class " + module_name + "InputSpec(CommandLineInputSpec):\n"
for trait in inputTraits:
input_spec_code += " " + trait + "\n"
output_spec_code = "class " + module_name + "OutputSpec(TraitedSpec):\n"
if not outputTraits:
output_spec_code += " pass\n"
else:
for trait in outputTraits:
output_spec_code += " " + trait + "\n"
output_filenames_code = "_outputs_filenames = {"
output_filenames_code += ",".join(["'%s':'%s'" % (
key, value) for key, value in outputs_filenames.items()])
output_filenames_code += "}"
input_spec_code += "\n\n"
output_spec_code += "\n\n"
template = """class %module_name%(SEMLikeCommandLine):
%class_str%
input_spec = %module_name%InputSpec
output_spec = %module_name%OutputSpec
_cmd = "%launcher% %name% "
%output_filenames_code%\n"""
template += " _redirect_x = {0}\n".format(str(redirect_x))
main_class = template.replace('%class_str%', class_string).replace("%module_name%", module_name).replace("%name%", module).replace("%output_filenames_code%", output_filenames_code).replace("%launcher%", " ".join(launcher))
return category, input_spec_code + output_spec_code + main_class, module_name
def grab_xml(module, launcher, mipav_hacks=False):
# cmd = CommandLine(command = "Slicer3", args="--launch %s --xml"%module)
# ret = cmd.run()
command_list = launcher[:] # force copy to preserve original
command_list.extend([module, "--xml"])
final_command = " ".join(command_list)
xmlReturnValue = subprocess.Popen(
final_command, stdout=subprocess.PIPE, shell=True).communicate()[0]
if mipav_hacks:
# workaround for a jist bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7234&group_id=228&atid=942
new_xml = ""
replace_closing_tag = False
for line in xmlReturnValue.splitlines():
if line.strip() == "<file collection: semi-colon delimited list>":
new_xml += "<file-vector>\n"
replace_closing_tag = True
elif replace_closing_tag and line.strip() == "</file>":
new_xml += "</file-vector>\n"
replace_closing_tag = False
else:
new_xml += line + "\n"
xmlReturnValue = new_xml
# workaround for a JIST bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7233&group_id=228&atid=942
if xmlReturnValue.strip().endswith("XML"):
xmlReturnValue = xmlReturnValue.strip()[:-3]
if xmlReturnValue.strip().startswith("Error: Unable to set default atlas"):
xmlReturnValue = xmlReturnValue.strip()[len("Error: Unable to set default atlas"):]
try:
dom = xml.dom.minidom.parseString(xmlReturnValue.strip())
except Exception as e:
print(xmlReturnValue.strip())
raise e
return dom
# if ret.runtime.returncode == 0:
# return xml.dom.minidom.parseString(ret.runtime.stdout)
# else:
# raise Exception(cmd.cmdline + " failed:\n%s"%ret.runtime.stderr)
def parse_params(params):
list = []
for key, value in params.items():
if isinstance(value, (str, bytes)):
list.append('%s="%s"' % (key, value.replace('"', "'")))
else:
list.append('%s=%s' % (key, value))
return ", ".join(list)
def parse_values(values):
values = ['%s' % value for value in values]
if len(values) > 0:
retstr = ", ".join(values) + ", "
else:
retstr = ""
return retstr
def gen_filename_from_param(param, base):
fileExtensions = param.getAttribute("fileExtensions")
if fileExtensions:
# It is possible that multiple file extensions can be specified in a
# comma separated list, This will extract just the first extension
firstFileExtension = fileExtensions.split(',')[0]
ext = firstFileExtension
else:
ext = {'image': '.nii', 'transform': '.mat', 'file': '',
'directory': '', 'geometry': '.vtk'}[param.nodeName]
return base + ext
if __name__ == "__main__":
# NOTE: For now either the launcher needs to be found on the default path, or
# every tool in the modules list must be found on the default path
# AND calling the module with --xml must be supported and compliant.
modules_list = ['MedianImageFilter',
'CheckerBoardFilter',
'EMSegmentCommandLine',
'GrayscaleFillHoleImageFilter',
# 'CreateDICOMSeries', #missing channel
'TractographyLabelMapSeeding',
'IntensityDifferenceMetric',
'DWIToDTIEstimation',
'MaskScalarVolume',
'ImageLabelCombine',
'DTIimport',
'OtsuThresholdImageFilter',
'ExpertAutomatedRegistration',
'ThresholdScalarVolume',
'DWIUnbiasedNonLocalMeansFilter',
'BRAINSFit',
'MergeModels',
'ResampleDTIVolume',
'MultiplyScalarVolumes',
'LabelMapSmoothing',
'RigidRegistration',
'VotingBinaryHoleFillingImageFilter',
'BRAINSROIAuto',
'RobustStatisticsSegmenter',
'GradientAnisotropicDiffusion',
'ProbeVolumeWithModel',
'ModelMaker',
'ExtractSkeleton',
'GrayscaleGrindPeakImageFilter',
'N4ITKBiasFieldCorrection',
'BRAINSResample',
'DTIexport',
'VBRAINSDemonWarp',
'ResampleScalarVectorDWIVolume',
'ResampleScalarVolume',
'OtsuThresholdSegmentation',
# 'ExecutionModelTour',
'HistogramMatching',
'BRAINSDemonWarp',
'ModelToLabelMap',
'GaussianBlurImageFilter',
'DiffusionWeightedVolumeMasking',
'GrayscaleModelMaker',
'CastScalarVolume',
'DicomToNrrdConverter',
'AffineRegistration',
'AddScalarVolumes',
'LinearRegistration',
'SimpleRegionGrowingSegmentation',
'DWIJointRicianLMMSEFilter',
'MultiResolutionAffineRegistration',
'SubtractScalarVolumes',
'DWIRicianLMMSEFilter',
'OrientScalarVolume',
'FiducialRegistration',
'BSplineDeformableRegistration',
'CurvatureAnisotropicDiffusion',
'PETStandardUptakeValueComputation',
'DiffusionTensorScalarMeasurements',
'ACPCTransform',
'EMSegmentTransformToNewFormat',
'BSplineToDeformationField']
# SlicerExecutionModel compliant tools that are usually statically built, and don't need the Slicer3 --launcher
generate_all_classes(modules_list=modules_list, launcher=[])
# Tools compliant with SlicerExecutionModel called from the Slicer environment (for shared lib compatibility)
# launcher = ['/home/raid3/gorgolewski/software/slicer/Slicer', '--launch']
# generate_all_classes(modules_list=modules_list, launcher=launcher)
# generate_all_classes(modules_list=['BRAINSABC'], launcher=[] )
| carolFrohlich/nipype | nipype/interfaces/slicer/generate_classes.py | Python | bsd-3-clause | 21,792 | [
"VTK"
] | 1eba58ded56959a0e26430f6788962de5b27c290d9308a27d1b64fe47b6af13d |
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import os.path
import unittest
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem.Pharm2D import Generate, SigFactory, Matcher, Gobbi_Pharm2D
from rdkit.TestRunner import redirect_stdout
from rdkit.six import StringIO
class TestCase(unittest.TestCase):
def setUp(self):
fdefFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'Pharm2D', 'test_data', 'BaseFeatures.fdef')
featFactory = ChemicalFeatures.BuildFeatureFactory(fdefFile)
self.factory = SigFactory.SigFactory(featFactory, minPointCount=2, maxPointCount=3)
self.factory.SetBins([(0, 2), (2, 5), (5, 8)])
self.factory.Init()
def test1_simple(self):
mol = Chem.MolFromSmiles('OCC(=O)CCCN')
self.factory.skipFeats = ['Donor']
self.factory.Init()
self.assertEqual(self.factory.GetSigSize(), 510)
Generate._verbose = False
sig = Generate.Gen2DFingerprint(mol, self.factory)
Generate._verbose = False
tgt = (1, 2, 11, 52, 117)
onBits = sig.GetOnBits()
self.assertEqual(tuple(onBits), tgt)
self.assertEqual(len(onBits), len(tgt))
bitMatches = ([((0, ), (3, ))],
[((0, ), (7, )), ((3, ), (7, ))],
[((0, ), (3, ), (7, ))], )
for i, bit in enumerate(onBits):
matches = Matcher.GetAtomsMatchingBit(self.factory, bit, mol)
# print bit,matches
# tgt = bitMatches[i]
# self.assertEqual(matches,tgt)
def test2Bug28(self):
smi = 'Cc([s]1)nnc1SCC(\CS2)=C(/C([O-])=O)N3C(=O)[C@H]([C@@H]23)NC(=O)C[n]4cnnn4'
mol = Chem.MolFromSmiles(smi)
factory = Gobbi_Pharm2D.factory
factory.SetBins([(2, 3), (3, 4), (4, 5), (5, 8), (8, 100)])
sig = Generate.Gen2DFingerprint(mol, factory)
onBits = sig.GetOnBits()
for bit in onBits:
atoms = Matcher.GetAtomsMatchingBit(factory, bit, mol, justOne=1)
self.assertTrue(len(atoms))
def test3Roundtrip(self):
# longer-running Bug 28 test
nToDo = 20
with open(os.path.join(RDConfig.RDDataDir, 'NCI', 'first_5K.smi'), 'r') as inF:
inD = inF.readlines()[:nToDo]
factory = Gobbi_Pharm2D.factory
factory.SetBins([(2, 3), (3, 4), (4, 5), (5, 8), (8, 100)])
for line in inD:
smi = line.split('\t')[0]
mol = Chem.MolFromSmiles(smi)
sig = Generate.Gen2DFingerprint(mol, factory)
onBits = sig.GetOnBits()
for bit in onBits:
atoms = Matcher.GetAtomsMatchingBit(factory, bit, mol, justOne=1)
assert len(atoms), 'bit %d failed to match for smi %s' % (bit, smi)
def test_exampleCode(self):
# We make sure that the example code runs
f = StringIO()
with redirect_stdout(f):
Matcher._exampleCode()
self.assertIn('finished', f.getvalue())
if __name__ == '__main__': # pragma: nocover
unittest.main()
| rvianello/rdkit | rdkit/Chem/Pharm2D/UnitTestMatcher.py | Python | bsd-3-clause | 3,090 | [
"RDKit"
] | 9441f380978a12d2a1fc9fa369f219150a690ad5b4c708c40288470dc8cc15e8 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import MDAnalysis as mda
from MDAnalysis.exceptions import DuplicateWarning, NoDataError
from MDAnalysisTests.datafiles import PSF, DCD, GRO
from MDAnalysisTests.core.util import UnWrapUniverse
import pytest
levels = ('atoms', 'residues', 'segments')
class TestAccumulate(object):
"""Tests the functionality of *Group.accumulate()."""
@pytest.fixture(params=levels)
def group(self, request):
u = mda.Universe(PSF, DCD)
return getattr(u, request.param)
def test_accumulate_str_attribute(self, group):
assert_almost_equal(group.accumulate("masses"), np.sum(group.atoms.masses))
def test_accumulate_different_func(self, group):
assert_almost_equal(
group.accumulate("masses", function=np.prod),
np.prod(group.atoms.masses))
@pytest.mark.parametrize('name, compound', (('resindices', 'residues'),
('segindices', 'segments'),
('molnums', 'molecules'),
('fragindices', 'fragments')))
@pytest.mark.parametrize('level', levels)
def test_accumulate_str_attribute_compounds(self, name, compound, level):
u = UnWrapUniverse()
group = getattr(u, level)
ref = [sum(a.masses) for a in group.atoms.groupby(name).values()]
vals = group.accumulate("masses", compound=compound)
assert_almost_equal(vals, ref, decimal=5)
def test_accumulate_wrongname(self, group):
with pytest.raises(AttributeError):
group.accumulate("foo")
def test_accumulate_wrongcomponent(self, group):
with pytest.raises(ValueError):
group.accumulate("masses", compound="foo")
@pytest.mark.parametrize('level', levels)
def test_accumulate_nobonds(self, level):
group = getattr(mda.Universe(GRO), level)
with pytest.raises(NoDataError):
group.accumulate("masses", compound="fragments")
@pytest.mark.parametrize('level', levels)
def test_accumulate_nomolnums(self, level):
group = getattr(mda.Universe(GRO), level)
with pytest.raises(NoDataError):
group.accumulate("masses", compound="molecules")
def test_accumulate_array_attribute(self, group):
a = np.ones((len(group.atoms), 2, 5))
assert_equal(group.accumulate(a), np.sum(a, axis=0))
def test_accumulate_array_attribute_wrongshape(self, group):
with pytest.raises(ValueError):
group.accumulate(np.ones(len(group.atoms) - 1))
@pytest.mark.parametrize('name, compound', (('resindices', 'residues'),
('segindices', 'segments'),
('molnums', 'molecules'),
('fragindices', 'fragments')))
@pytest.mark.parametrize('level', levels)
def test_accumulate_array_attribute_compounds(self, name, compound, level):
u = UnWrapUniverse()
group = getattr(u, level)
ref = [np.ones((len(a), 2, 5)).sum(axis=0) for a in group.atoms.groupby(name).values()]
assert_equal(group.accumulate(np.ones((len(group.atoms), 2, 5)), compound=compound), ref)
class TestTotals(object):
"""Tests the functionality of *Group.total*() like total_mass
and total_charge.
"""
@pytest.fixture(params=levels)
def group(self, request):
u = mda.Universe(PSF, DCD)
return getattr(u, request.param)
def test_total_charge(self, group):
assert_almost_equal(group.total_charge(), -4.0, decimal=4)
@pytest.mark.parametrize('name, compound',
(('resids', 'residues'), ('segids', 'segments'),
('fragindices', 'fragments')))
def test_total_charge_compounds(self, group, name, compound):
ref = [sum(a.charges) for a in group.atoms.groupby(name).values()]
assert_almost_equal(group.total_charge(compound=compound), ref)
@pytest.mark.filterwarnings( # Prevents regression of issue #2990
"error:"
"Using a non-tuple sequence for multidimensional indexing is deprecated:"
"FutureWarning"
)
def test_total_charge_duplicates(self, group):
group2 = group + group[0]
ref = group.total_charge() + group[0].charge
with pytest.warns(DuplicateWarning) as w:
assert_almost_equal(group2.total_charge(), ref)
assert len(w) == 1
def test_total_mass(self, group):
assert_almost_equal(group.total_mass(), 23582.043)
@pytest.mark.parametrize('name, compound',
(('resids', 'residues'), ('segids', 'segments'),
('fragindices', 'fragments')))
def test_total_mass_compounds(self, group, name, compound):
ref = [sum(a.masses) for a in group.atoms.groupby(name).values()]
assert_almost_equal(group.total_mass(compound=compound), ref)
@pytest.mark.filterwarnings( # Prevents regression of issue #2990
"error:"
"Using a non-tuple sequence for multidimensional indexing is deprecated:"
"FutureWarning"
)
def test_total_mass_duplicates(self, group):
group2 = group + group[0]
ref = group.total_mass() + group2[0].mass
with pytest.warns(DuplicateWarning) as w:
assert_almost_equal(group2.total_mass(), ref)
assert len(w) == 1
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/core/test_accumulate.py | Python | gpl-2.0 | 6,638 | [
"MDAnalysis"
] | b66f112ef77e9f929d2633df09f49abaff28da9f18a876dd475ee1dc322e064f |
import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
precision = "mixed"
sysname = "switchedaccuratewater"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem = lb_loader.load(sysname)
positions, boxes = lb_loader.equilibrate(system, temperature, timestep, positions, equil_steps, minimize=True)
collision_rate = 1.0 / u.picoseconds
n_steps = 25
Neff_cutoff = 1E5
itype = "LangevinIntegrator"
langevin_timestep = 0.4 * u.femtoseconds
integrator = mm.LangevinIntegrator(temperature, collision_rate, langevin_timestep)
context = lb_loader.build(system, integrator, positions, temperature, precision=precision)
filename = "./data/%s_%s_%s_%.3f_%d.csv" % (precision, sysname, itype, langevin_timestep / u.femtoseconds, collision_rate * u.picoseconds)
print(filename)
integrator.step(450000)
data, start, g, Neff, mu, sigma, stderr = lb_loader.converge(context, n_steps=n_steps, Neff_cutoff=Neff_cutoff, filename=filename)
| kyleabeauchamp/HMCNotes | code/correctness/old/test_langevin.py | Python | gpl-2.0 | 1,095 | [
"OpenMM"
] | 840a2ec3a1809d011f0784c8488ddffddea8db512ad697f49cc760793fc9d7d9 |
#!/usr/bin/env python2
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# tweak PYTHONPATH
import sys, string, re
import os
from optparse import OptionParser
baseDir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(baseDir, '..', 'server', "lib"))
from igeclient.IClient import IClient
import pprint, traceback
from getpass import getpass
from code import InteractiveConsole
from ige.ospace import Const
import time
#not race specific:
levelTechs = {1: [
1000, 1100, 1101, 1102, 1104, 1106, 1107, 1110, 1112,
1400, 1401, 1402, 1403, 1404, 1405,
1500, 1510, 1511,
1800, 1801, 1802, 1803,
],
2: [
1105, 1111,
2001, 2006,
2400, 2401, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410,
2801, 2802, 2803,
],
3: [
3000, 3010, 3013,
3401, 3402, 3403, 3404, 3405, 3406, 3407, 3409, 3410,
3450, 3451,
3800, 3802,
],
4: [
4000, 4004, 4005,
],
5: [
5000, 5001, 5002,
5800, 5801, 5802, 5803,
],
6: [
6000, 6001, 6005, 6025,
],
99: [
99001, 99002, 99003, 99004,
]}
#race specific:
levelTechsRaces = {
1: {'B': [], 'H': [], 'C': []},
2: {
'B': [2003, 2005, 2007, 2804, 2805],
'H': [2000, 2004],
'C': [2002, 2004]},
3: {
'B': [3001, 3003, 3007, 3008, 3412, 3420, 3421, 3452, 3454, 3803, ],
'H': [3002, 3004, 3006, 3009, 3408, 3411, 3453, 3455, 3803, ],
'C': [3001, 3005, 3006, 3411, 3453, 3456, ]},
4: {
'B': [4003, 4006, 4008, 4400, 4401, 4402, 4403, 4404, 4405, 4406, 4458, 4460, 4476, 4502, 4504, ],
'H': [4002, 4007, 4008, 4009, 4010, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4459, 4461, 4477, 4479, 4480, 4500, 4503, ],
'C': [4001, 4006, 4007, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4459, 4462, 4477, 4479, 4501, 4503, ]},
5: {
'B': [5400, 5401, 5402, 5403, 5404, 5405, 5406, 5007, 5431, 5433, 5465, 5467, 5470, 5475, 5503, 5504, 5508, 5805, 5808],
'H': [5003, 5004, 5005, 5008, 5408, 5409, 5410, 5411, 5412, 5413, 5414, 5430, 5434, 5466, 5468, 5471, 5474, 5501, 5502, 5507, 5804, 5807],
'C': [5006, 5416, 5417, 5418, 5419, 5420, 5421, 5432, 5435, 5466, 5469, 5472, 5473, 5476, 5505, 5506, 5806]},
6: {
'B': [6200, 6201, 6202, 6203, 6204, 6205, 6220, 6221, 6222, 6241, ],
'H': [6100, 6101, 6102, 6103, 6104, 6105, 6120, 6121, 6140, 6141, 6160, ],
'C': [6301, 6302, 6303, 6304, 6305, 6306, 6320, 6321, 6322, 6323, 6340, 6360, ]
},
99: {'B': [], 'H': [], 'C': []},
}
advTechLevel = {
1: {},
2: {"B" : 1990, "H" : 1991, "C" : 1992},
3: {"B" : 2990, "H" : 2991, "C" : 2992},
4: {"B" : 3990, "H" : 3991, "C" : 3992},
5: {"B" : 4990, "H" : 4991, "C" : 4992},
6: {"B" : 5990, "H" : 5991, "C" : 5992},
99: {}
}
def cleanupBadFleets():
un = s.getInfo(1)
delete = []
#search by system rather than by player; there is no galaxy list of playerIDs
for galaxyID in un.galaxies:
galaxy = s.getInfo(galaxyID)
for systemID in galaxy.systems:
system = s.getInfo(systemID)
for fleetID in system.fleets:
fleet = s.getInfo(fleetID)
owner = s.getInfo(fleet.owner)
if not owner.galaxies:
delete.append((fleetID,systemID,fleet.owner,0))
if owner.galaxies[0] != galaxyID:
delete.append((fleetID,systemID,fleet.owner,1))
for row in delete:
if row[3]:
print "Deleting",row[0],"- owner not in fleet's galaxy"
else:
print "Deleting",row[0],"- owner not in a galaxy"
s.disbandFleet(row[0])
return
def msgHandler(id, data):
if id >= 0:
print 'Message', id, data
def getPlayer(name):
u = s.getInfo(1)
for playerID in u.players:
pl = s.getInfo(playerID)
if pl.name == name:
return pl
return None
def showPlayers():
un = s.getInfo(1)
players = []
aiPlayers = []
for playerID in un.players:
player = s.getInfo(playerID)
if not player.type in Const.AI_PLAYER_TYPES:
players.append((playerID, player.name))
else:
aiPlayers.append((playerID, player.name))
print
print
print "List of current players:"
for pl in players:
print "%5d: %s" % pl
print "List of current AI players:"
for pl in aiPlayers:
print "%5d: %s" % pl
print
print "Press Enter to continue"
raw_input()
def showGalaxies():
un = s.getInfo(1)
galaxies = []
for galaxyID in un.galaxies:
galaxy = s.getInfo(galaxyID)
galaxies.append((galaxyID, galaxy.name))
print
print
print "List of current galaxies:"
for gal in galaxies:
print "%5d: %s" % gal
print
def setCurrentObject():
objId = raw_input("oid: ")
newObjID = 0
try:
newObjID = int(objId)
except:
print "Invalid object"
return newObjID
def initDevelTesting(objID):
levels = 6
resources = 8
race = string.upper(raw_input("race: "))
if not (race=='b' or race=='B' or race=='c' or race=='C' or race =='h' or race =='H'):
print "Invalid race"
return objID
for level in range(2,levels+1):
advanceLevelRace(objID,level,race)
for level in range(1,levels+1):
giveTechsNum(objID,level)
giveTechsNum(objID,99)
for stratResID in range(1,resources+1):
giveStratResNum(objID,stratResID,50)
return objID
def giveTechs(objID):
lvl = raw_input("level: ")
level = 0
try:
level = int(lvl)
except:
print "Invalid level"
return objId
if level > 6 and not level == 99:
print "Invalid level"
return objId
giveTechsNum(objID,level)
def giveTechsNum(objID,level):
player = s.getInfo(objID)
plTechs = player.techs
for techId in levelTechs[level]:
plTechs[techId] = 5
if len(player.race) > 0:
print "setting race dependent techs"
for techId in levelTechsRaces[level][player.race]:
plTechs[techId] = 5
s.set(objID, "techs", plTechs)
print "Techs at level %d added to player %d." % (level, objID)
return objID
def giveTech(objID):
tid = raw_input("techId: ")
try:
techId = int(tid)
except:
print "Invalid techId"
return objID
player = s.getInfo(objID)
plTechs = player.techs
try:
plTechs[techId] = 5
except:
print "Invalid techId"
return objID
s.set(objID, "techs", plTechs)
print "Tech %d added to player %d." % (techId, objID)
return objID
def advanceLevel(objID):
lvl = raw_input("level: ")
try:
level = int(lvl)
except:
print "Invalid level"
return objID
if level > 6 or level < 2:
print "Invalid level"
return objID
race = string.upper(raw_input("race: "))
if not (race=='b' or race=='B' or race=='c' or race=='C' or race =='h' or race =='H'):
print "Invalid race"
return objID
advanceLevelRace(objID,level)
def advanceLevelRace(objID,level,race):
player = s.getInfo(objID)
plTechs = player.techs
plTechs[advTechLevel[level][race]] = 5
s.set(objID, "techs", plTechs)
s.set(objID, "techLevel", level)
s.set(objID, "race", race)
print "Tech %d added, techLevel advance to %d to player %d." % (advTechLevel[level][race], level, objID)
return objID
def promoteToImperator(objID):
galID = raw_input("galaxy id: ")
try:
galaxyID = int(galID)
except:
print "Invalid galaxy id"
return objID
s.set(objID, "imperator", 3)
s.set(galaxyID, "imperator", objID)
print "Galaxy %d has now imperator %d." % (galaxyID, objID)
return objID
def giveFame(objID):
numFame = raw_input("Amount of Fame: ")
try:
numberFame = int(numFame)
except:
print "Not a number"
return objID
player = s.getInfo(objID)
if not hasattr(player,'pirateFame'):
print "Object is not a pirate"
return objID
newFame = player.pirateFame + numberFame
s.set(objID, "pirateFame", newFame)
print "Player %d now has %d fame" % (objID, newFame)
def giveStratRes(objID):
resID = raw_input("strategy resource ('a' for all resources): ")
if not (resID == 'a'):
try:
stratResID = int(resID)
except:
print "Invalid strategy resource"
return objID
qty = raw_input("qty: ")
try:
quantity = int(qty)
except:
print "Invalid quantity"
return objID
if (resID == 'a'):
for stratResID in range(1,9):
giveStratResNum(objID,stratResID,quantity)
else:
giveStratResNum(objID,stratResID,quantity)
return objID
def giveStratResNum(objID,stratResID,quantity):
plQty = 0
player = s.getInfo(objID)
if stratResID in player.stratRes:
plQty = player.stratRes[stratResID]
stratRes = player.stratRes
stratRes[stratResID] = plQty + quantity
s.set(objID, "stratRes", stratRes)
print "Player %d now has %d pieces of %d." % (objID, stratRes[stratResID], stratResID)
def createGalaxy():
universe = 1
print "Creating new galaxy...please specify these parameters. Normal galaxy positions are multiples of 100."
name = raw_input("Galaxy Name: ")
gal_type = raw_input("Galaxy Type: ")
s.createNewSubscribedGalaxy(universe, name, gal_type, [])
def startGalaxy():
showGalaxies()
objId = raw_input("oid: ")
newObjID = 0
try:
newObjID = int(objId)
except:
print "Invalid object"
un = s.getInfo(1)
galaxyObj = 0
print newObjID
for galaxyID in un.galaxies:
print galaxyID
if galaxyID==newObjID:
galaxyObj = newObjID
if galaxyObj == 0:
print "Not a galaxy"
else:
s.enableTime(galaxyObj,1)
print "Galaxy will start on next turn process"
def deleteGalaxy():
showGalaxies()
print "Choose a galaxy to delete."
objId = raw_input("oid: ")
newObjID = 0
try:
newObjID = int(objId)
except:
print "Invalid object"
un = s.getInfo(1)
galaxyObj = 0
print newObjID
for galaxyID in un.galaxies:
if galaxyID==newObjID:
galaxyObjID = newObjID
if galaxyObjID == 0:
print "Not a galaxy"
else:
galaxy = s.getInfo(galaxyObjID)
print "Please confirm that you want to delete", galaxy.name
ok = raw_input("Y/N: ");
if string.upper(ok) == "Y":
s.delete(galaxyObjID)
print "Galaxy deleted"
def showObj(objID):
try:
obj = s.getInfo(objID)
objstr = repr(obj)
#insanely complex regex to chunk {data} and [data] parts during split by ,'s
objarr = re.findall("[^,\{\]]*(?:\{[^\}]*[\}\{]),?|[^,\{\]]*(?:\[[^\]]*[\]\[]),?|[^,\}\]]+,?",objstr)
for line in objarr:
print line
except:
print "Cannot get object",objID
def showMenu(objID):
print
print "----- OSpace admin console menu -----"
print "Current object: %s" % objID
print
print "1. Set current object 10. Create Galaxy"
print "2. Show Players 11. Start Galaxy Time (does not need Obj set)"
print "3. Show Galaxies 12. Delete Galaxy (does not need Obj set)"
print "4. Advance to level 13. Init Developer race (all techs, 50 each strat res)"
print "5. Make imperator 14. Give Fame to a Pirate Player"
print "6. Give particular tech "
print "7. Give techs "
print "8. Give Strat Res "
print "9. Finish prod queue "
print
print "T. Process turn R. Process X turns"
print "C. Interactive console I. Object Info"
print
print "Ctrl+Z to End"
print
def processTurns():
numT = raw_input("Number of turns: ")
try:
num = int(numT)
except:
print "invalid number of turns"
s.processTurns(num)
def finishProdQueue(objId):
p = s.get(objId)
for i in p.prodQueue:
i.currProd = 38400
s.set(p.oid, "prodQueue", p.prodQueue)
def processMenu(inp, objId, s):
if inp == "2":
showPlayers()
elif inp == "1":
return setCurrentObject()
elif inp == "7":
giveTechs(objId)
elif inp == "6":
giveTech(objId)
elif inp == "4":
advanceLevel(objId)
elif inp == "3":
showGalaxies()
elif inp == "5":
promoteToImperator(objId)
elif inp == "8":
giveStratRes(objId)
elif inp == "9":
finishProdQueue(objId)
elif inp == "10":
createGalaxy()
elif inp == "11":
startGalaxy()
elif inp == "12":
deleteGalaxy()
elif inp == "13":
initDevelTesting(objID)
elif inp == "14":
giveFame(objID)
elif string.upper(inp) == "I":
showObj(objID)
elif string.upper(inp) == "R":
processTurns()
elif string.upper(inp) == "T":
s.processTurn()
elif string.upper(inp) == "C":
console = InteractiveConsole(locals())
console.interact()
elif string.upper(inp) == "CLEANUPFLEETS":
console = cleanupBadFleets()
return objId
# parse command line arguments
parser = OptionParser(usage = "usage: %prog [options]")
parser.add_option("", "--configdir", dest = "configDir",
metavar = "DIRECTORY", default = os.path.join(os.path.expanduser("~"), ".outerspace"),
help = "Override default configuration directory",
)
options, args = parser.parse_args()
#s = IClient('ospace.net:9080', None, msgHandler, None, 'IClient/osc')
s = IClient('localhost:9080', None, msgHandler, None, 'IClient/osc')
# get admin login from <configDir>/token
password = open(os.path.join(options.configDir, "token"), "r").read()
s.connect()
s.login('Alpha', 'admin', password)
s.selectAdmin()
try:
objID = 0
while True:
showMenu(objID)
objID = processMenu(raw_input(), objID, s)
except EOFError:
pass
s.logout()
| ospaceteam/outerspace | tools/osclient_tui.py | Python | gpl-2.0 | 14,651 | [
"Galaxy"
] | 6ef6c7b37a420d9826b8053f84afe60b488a21237c348666ddb2640e80905915 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import re
import sys
import uuid
import warnings
from collections import Counter
from itertools import product
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Union
import numpy as np
import qcelemental as qcel
from psi4 import core
from psi4.driver import qcdb
from . import optproc
from .exceptions import TestComparisonError, ValidationError, UpgradeHelper
## Python basis helps
@staticmethod
def _pybuild_basis(mol,
key=None,
target=None,
fitrole='ORBITAL',
other=None,
puream=-1,
return_atomlist=False,
quiet=False):
if key == 'ORBITAL':
key = 'BASIS'
def _resolve_target(key, target):
"""Figure out exactly what basis set was intended by (key, target)
"""
horde = qcdb.libmintsbasisset.basishorde
if not target:
if not key:
key = 'BASIS'
target = core.get_global_option(key)
if target in horde:
return horde[target]
return target
# Figure out what exactly was meant by 'target'.
resolved_target = _resolve_target(key, target)
# resolved_target needs to be either a string or function for pyconstuct.
# if a string, they search for a gbs file with that name.
# if a function, it needs to apply a basis to each atom.
bs, basisdict = qcdb.BasisSet.pyconstruct(mol.to_dict(),
key,
resolved_target,
fitrole,
other,
return_dict=True,
return_atomlist=return_atomlist)
if return_atomlist:
atom_basis_list = []
for atbs in basisdict:
atommol = core.Molecule.from_dict(atbs['molecule'])
lmbs = core.BasisSet.construct_from_pydict(atommol, atbs, puream)
atom_basis_list.append(lmbs)
return atom_basis_list
if isinstance(resolved_target, str):
basisdict['name'] = basisdict['name'].split('/')[-1].replace('.gbs', '')
if callable(resolved_target):
basisdict['name'] = resolved_target.__name__.replace('basisspec_psi4_yo__', '').upper()
if not quiet:
core.print_out(basisdict['message'])
if 'ECP' in basisdict['message']:
core.print_out(' !!! WARNING: ECP capability is in beta. Please check occupations closely. !!!\n\n')
if basisdict['key'] is None:
basisdict['key'] = 'BASIS'
psibasis = core.BasisSet.construct_from_pydict(mol, basisdict, puream)
return psibasis
core.BasisSet.build = _pybuild_basis
## Python wavefunction helps
@staticmethod
def _core_wavefunction_build(mol, basis=None):
if basis is None:
basis = core.BasisSet.build(mol)
elif isinstance(basis, str):
basis = core.BasisSet.build(mol, "ORBITAL", basis)
wfn = core.Wavefunction(mol, basis)
# Set basis for density-fitted calculations to the zero basis...
# ...until the user explicitly provides a DF basis.
wfn.set_basisset("DF_BASIS_SCF", core.BasisSet.zero_ao_basis_set())
return wfn
core.Wavefunction.build = _core_wavefunction_build
def _core_wavefunction_get_scratch_filename(self, filenumber):
""" Given a wavefunction and a scratch file number, canonicalizes the name
so that files can be consistently written and read """
fname = os.path.split(os.path.abspath(core.get_writer_file_prefix(self.molecule().name())))[1]
psi_scratch = core.IOManager.shared_object().get_default_path()
return os.path.join(psi_scratch, fname + '.' + str(filenumber))
core.Wavefunction.get_scratch_filename = _core_wavefunction_get_scratch_filename
@staticmethod
def _core_wavefunction_from_file(wfn_data: Union[str, Dict, Path]) -> core.Wavefunction:
r"""Build Wavefunction from data.
Parameters
----------
wfn_data
If a dict, use data directly. Otherwise, path-like passed to :py:func:`numpy.load`
to read from disk.
Returns
-------
Wavefunction
A deserialized Wavefunction object
"""
# load the wavefunction from file
if isinstance(wfn_data, dict):
pass
elif isinstance(wfn_data, str):
if not wfn_data.endswith(".npy"):
wfn_data = wfn_data + ".npy"
wfn_data = np.load(wfn_data, allow_pickle=True).item()
else:
# Could be path-like or file-like, let `np.load` handle it
wfn_data = np.load(wfn_data, allow_pickle=True).item()
# variable type specific dictionaries to be passed into C++ constructor
wfn_matrix = wfn_data['matrix']
wfn_vector = wfn_data['vector']
wfn_dimension = wfn_data['dimension']
wfn_int = wfn_data['int']
wfn_string = wfn_data['string']
wfn_boolean = wfn_data['boolean']
wfn_float = wfn_data['float']
wfn_floatvar = wfn_data['floatvar']
wfn_matrixarr = wfn_data['matrixarr']
# reconstruct molecule from dictionary representation
wfn_molecule = wfn_data['molecule']
molecule = core.Molecule.from_dict(wfn_molecule)
# get basis set name and spherical harmonics boolean
basis_name = wfn_string['basisname']
if ".gbs" in basis_name:
basis_name = basis_name.split('/')[-1].replace('.gbs', '')
basis_puream = wfn_boolean['basispuream']
basisset = core.BasisSet.build(molecule, 'ORBITAL', basis_name, puream=basis_puream)
# change some variables to psi4 specific data types (Matrix, Vector, Dimension)
for label in wfn_matrix:
array = wfn_matrix[label]
wfn_matrix[label] = core.Matrix.from_array(array, name=label) if array is not None else None
for label in wfn_vector:
array = wfn_vector[label]
wfn_vector[label] = core.Vector.from_array(array, name=label) if array is not None else None
for label in wfn_dimension:
tup = wfn_dimension[label]
wfn_dimension[label] = core.Dimension.from_list(tup, name=label) if tup is not None else None
for label in wfn_matrixarr:
array = wfn_matrixarr[label]
wfn_matrixarr[label] = core.Matrix.from_array(array, name=label) if array is not None else None
# make the wavefunction
wfn = core.Wavefunction(molecule, basisset, wfn_matrix, wfn_vector, wfn_dimension, wfn_int, wfn_string,
wfn_boolean, wfn_float)
# some of the wavefunction's variables can be changed directly
for k, v in wfn_floatvar.items():
wfn.set_variable(k, v)
for k, v in wfn_matrixarr.items():
wfn.set_variable(k, v)
return wfn
core.Wavefunction.from_file = _core_wavefunction_from_file
def _core_wavefunction_to_file(wfn: core.Wavefunction, filename: str = None) -> Dict:
"""Converts a Wavefunction object to a base class
Parameters
----------
wfn
A Wavefunction or inherited class
filename
An optional filename to write the data to
Returns
-------
dict
A dictionary and NumPy representation of the Wavefunction.
"""
# collect the wavefunction's variables in a dictionary indexed by varaible type
# some of the data types have to be made numpy-friendly first
if wfn.basisset().name().startswith("anonymous"):
raise ValidationError("Cannot serialize wavefunction with custom basissets.")
wfn_data = {
'molecule': wfn.molecule().to_dict(),
'matrix': {
'Ca': wfn.Ca().to_array() if wfn.Ca() else None,
'Cb': wfn.Cb().to_array() if wfn.Cb() else None,
'Da': wfn.Da().to_array() if wfn.Da() else None,
'Db': wfn.Db().to_array() if wfn.Db() else None,
'Fa': wfn.Fa().to_array() if wfn.Fa() else None,
'Fb': wfn.Fb().to_array() if wfn.Fb() else None,
'H': wfn.H().to_array() if wfn.H() else None,
'S': wfn.S().to_array() if wfn.S() else None,
'X': wfn.lagrangian().to_array() if wfn.lagrangian() else None,
'aotoso': wfn.aotoso().to_array() if wfn.aotoso() else None,
'gradient': wfn.gradient().to_array() if wfn.gradient() else None,
'hessian': wfn.hessian().to_array() if wfn.hessian() else None
},
'vector': {
'epsilon_a': wfn.epsilon_a().to_array() if wfn.epsilon_a() else None,
'epsilon_b': wfn.epsilon_b().to_array() if wfn.epsilon_b() else None,
'frequencies': wfn.frequencies().to_array() if wfn.frequencies() else None
},
'dimension': {
'doccpi': wfn.doccpi().to_tuple(),
'frzcpi': wfn.frzcpi().to_tuple(),
'frzvpi': wfn.frzvpi().to_tuple(),
'nalphapi': wfn.nalphapi().to_tuple(),
'nbetapi': wfn.nbetapi().to_tuple(),
'nmopi': wfn.nmopi().to_tuple(),
'nsopi': wfn.nsopi().to_tuple(),
'soccpi': wfn.soccpi().to_tuple()
},
'int': {
'nalpha': wfn.nalpha(),
'nbeta': wfn.nbeta(),
'nfrzc': wfn.nfrzc(),
'nirrep': wfn.nirrep(),
'nmo': wfn.nmo(),
'nso': wfn.nso(),
'print': wfn.get_print(),
},
'string': {
'name': wfn.name(),
'module': wfn.module(),
'basisname': wfn.basisset().name()
},
'boolean': {
'PCM_enabled': wfn.PCM_enabled(),
'same_a_b_dens': wfn.same_a_b_dens(),
'same_a_b_orbs': wfn.same_a_b_orbs(),
'density_fitted': wfn.density_fitted(),
'basispuream': wfn.basisset().has_puream()
},
'float': {
'energy': wfn.energy(),
'efzc': wfn.efzc(),
'dipole_field_x': wfn.get_dipole_field_strength()[0],
'dipole_field_y': wfn.get_dipole_field_strength()[1],
'dipole_field_z': wfn.get_dipole_field_strength()[2]
},
'floatvar': wfn.scalar_variables(),
'matrixarr': {k: v.to_array() for k, v in wfn.array_variables().items()}
} # yapf: disable
if filename is not None:
if not filename.endswith('.npy'): filename += '.npy'
np.save(filename, wfn_data, allow_pickle=True)
return wfn_data
core.Wavefunction.to_file = _core_wavefunction_to_file
## Python JK helps
@staticmethod
def _core_jk_build(orbital_basis: core.BasisSet, aux: core.BasisSet = None, jk_type: str = None, do_wK: bool = None, memory: int = None) -> core.JK:
"""
Constructs a Psi4 JK object from an input basis.
Parameters
----------
orbital_basis
Orbital basis to use in the JK object.
aux
Optional auxiliary basis set for density-fitted tensors. Defaults
to the DF_BASIS_SCF if set, otherwise the correspond JKFIT basis
to the passed in `orbital_basis`.
jk_type
Type of JK object to build (DF, Direct, PK, etc). Defaults to the
current global SCF_TYPE option.
Returns
-------
JK
Uninitialized JK object.
Example
-------
jk = psi4.core.JK.build(bas)
jk.set_memory(int(5e8)) # 4GB of memory
jk.initialize()
...
jk.C_left_add(matirx)
jk.compute()
jk.C_clear()
...
"""
optstash = optproc.OptionsState(["SCF_TYPE"])
if jk_type is not None:
core.set_global_option("SCF_TYPE", jk_type)
if aux is None:
if core.get_global_option("SCF_TYPE") == "DF":
aux = core.BasisSet.build(orbital_basis.molecule(), "DF_BASIS_SCF", core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", orbital_basis.name(), orbital_basis.has_puream())
else:
aux = core.BasisSet.zero_ao_basis_set()
if (do_wK is None) or (memory is None):
jk = core.JK.build_JK(orbital_basis, aux)
else:
jk = core.JK.build_JK(orbital_basis, aux, bool(do_wK), int(memory))
optstash.restore()
return jk
core.JK.build = _core_jk_build
## Grid Helpers
def _core_vbase_get_np_xyzw(Vpot):
"""
Returns the x, y, z, and weights of a grid as a tuple of NumPy array objects.
"""
x_list = []
y_list = []
z_list = []
w_list = []
# Loop over every block in the potenital
for b in range(Vpot.nblocks()):
# Obtain the block
block = Vpot.get_block(b)
# Obtain the x, y, and z coordinates along with the weight
x_list.append(block.x())
y_list.append(block.y())
z_list.append(block.z())
w_list.append(block.w())
x = np.hstack(x_list)
y = np.hstack(y_list)
z = np.hstack(z_list)
w = np.hstack(w_list)
return (x, y, z, w)
core.VBase.get_np_xyzw = _core_vbase_get_np_xyzw
## Python other helps
def set_options(options_dict: Dict[str, Any], verbose: int = 1) -> None:
"""Sets Psi4 options from an input dictionary.
Parameters
----------
options_dict
Dictionary where keys are "option_name" for global options or
"module_name__option_name" (double underscore separation) for
option local to "module_name". Values are the option value. All
are case insensitive.
verbose
Control print volume.
"""
optionre = re.compile(r'\A(?P<module>\w+__)?(?P<option>\w+)\Z', re.IGNORECASE)
rejected = {}
for k, v, in options_dict.items():
mobj = optionre.match(k.strip())
module = mobj.group('module').upper()[:-2] if mobj.group('module') else None
option = mobj.group('option').upper()
if module:
if ((module, option, v) not in [('SCF', 'GUESS', 'READ')]) and ((module, option) not in [('PCM', 'INPUT')]):
# TODO guess/read exception is for distributed driver. should be handled differently.
try:
core.set_local_option(module, option, v)
except RuntimeError as err:
rejected[k] = (v, err)
if verbose > 1:
print('Setting: core.set_local_option', module, option, v)
if (module, option) == ("PCM", "INPUT"):
pcm_helper(v)
else:
try:
core.set_global_option(option, v)
except RuntimeError as err:
rejected[k] = (v, err)
if verbose > 1:
print('Setting: core.set_global_option', option, v)
if rejected:
raise ValidationError(f'Error setting options: {rejected}')
# TODO could subclass ValidationError and append rejected so that run_json could handle remanants.
def set_module_options(module: str, options_dict: Dict[str, Any]) -> None:
"""
Sets Psi4 module options from a module specification and input dictionary.
"""
warnings.warn(
"Using `psi4.set_module_options(<module>, {<key>: <val>})` instead of `psi4.set_options({<module>__<key>: <val>})` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=2)
for k, v, in options_dict.items():
core.set_local_option(module.upper(), k.upper(), v)
## OEProp helpers
def pcm_helper(block: str):
"""
Passes multiline string *block* to PCMSolver parser.
Parameters
----------
block
multiline string with PCM input in PCMSolver syntax.
"""
import pcmsolver
with NamedTemporaryFile(mode="w+t", delete=True) as fl:
fl.write(block)
fl.flush()
parsed_pcm = pcmsolver.parse_pcm_input(fl.name)
with NamedTemporaryFile(mode="w+t", delete=False) as fl:
fl.write(parsed_pcm)
core.set_local_option("PCM", "PCMSOLVER_PARSED_FNAME", fl.name)
def basname(name):
"""Imitates BasisSet.make_filename() without the gbs extension"""
return name.lower().replace('+', 'p').replace('*', 's').replace('(', '_').replace(')', '_').replace(',', '_')
def temp_circular_import_blocker():
pass
def basis_helper(block, name='', key='BASIS', set_option=True):
"""For PsiAPI mode, forms a basis specification function from *block*
and associates it with keyword *key* under handle *name*. Registers
the basis spec with Psi4 so that it can be applied again to future
molecules. For usage, see mints2, mints9, and cc54 test cases. Unless
*set_option* is False, *name* will be set as current active *key*,
equivalent to `set key name` or `set_option({key: name})`.
"""
key = key.upper()
name = ('anonymous' + str(uuid.uuid4())[:8]) if name == '' else name
cleanbas = basname(name).replace('-', '') # further remove hyphens so can be function name
block = qcel.util.filter_comments(block)
command_lines = re.split('\n', block)
symbol_re = re.compile(r'^\s*assign\s+(?P<symbol>[A-Z]{1,3})\s+(?P<basis>[-+*\(\)\w]+)\s*$', re.IGNORECASE)
label_re = re.compile(
r'^\s*assign\s+(?P<label>(?P<symbol>[A-Z]{1,3})(?:(_\w+)|(\d+))?)\s+(?P<basis>[-+*\(\)\w]+)\s*$',
re.IGNORECASE)
all_re = re.compile(r'^\s*assign\s+(?P<basis>[-+*\(\)\w]+)\s*$', re.IGNORECASE)
basislabel = re.compile(r'\s*\[\s*([-*\(\)\w]+)\s*\]\s*')
def anon(mol, role):
basstrings = {}
# Start by looking for assign lines, and remove them
leftover_lines = []
assignments = False
for line in command_lines:
if symbol_re.match(line):
m = symbol_re.match(line)
mol.set_basis_by_symbol(m.group('symbol'), m.group('basis'), role=role)
assignments = True
elif label_re.match(line):
m = label_re.match(line)
mol.set_basis_by_label(m.group('label'), m.group('basis'), role=role)
assignments = True
elif all_re.match(line):
m = all_re.match(line)
mol.set_basis_all_atoms(m.group('basis'), role=role)
assignments = True
else:
# Ignore blank lines and accumulate remainder
if line and not line.isspace():
leftover_lines.append(line.strip())
# Now look for regular basis set definitions
basblock = list(filter(None, basislabel.split('\n'.join(leftover_lines))))
if len(basblock) == 1:
if not assignments:
# case with no [basname] markers where whole block is contents of gbs file
mol.set_basis_all_atoms(name, role=role)
basstrings[basname(name)] = basblock[0]
else:
message = (
"Conflicting basis set specification: assign lines present but shells have no [basname] label."
"")
raise TestComparisonError(message)
else:
# case with specs separated by [basname] markers
for idx in range(0, len(basblock), 2):
basstrings[basname(basblock[idx])] = basblock[idx + 1]
return basstrings
anon.__name__ = 'basisspec_psi4_yo__' + cleanbas
qcdb.libmintsbasisset.basishorde[name.upper()] = anon
if set_option:
core.set_global_option(key, name)
core.OEProp.valid_methods = [
'DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'LOWDIN_CHARGES', 'WIBERG_LOWDIN_INDICES', 'MAYER_INDICES',
'MBIS_CHARGES','MBIS_VOLUME_RATIOS', 'MO_EXTENTS', 'GRID_FIELD', 'GRID_ESP', 'ESP_AT_NUCLEI', 'NO_OCCUPATIONS'
]
## Option helpers
def _core_set_global_option_python(key, EXTERN):
"""
This is a fairly hacky way to get around EXTERN issues. Effectively we are routing this option Python side through attributes until the general Options overhaul.
"""
if (key != "EXTERN"):
raise ValidationError("Options: set_global_option_python does not recognize keyword %s" % key)
if EXTERN is None:
core.EXTERN = None
core.set_global_option("EXTERN", False)
elif isinstance(EXTERN, core.ExternalPotential):
# Well this is probably the worst hack I have done, thats saying something
core.EXTERN = EXTERN
core.set_global_option("EXTERN", True)
else:
raise ValidationError("Options: set_global_option_python can either be a NULL or External Potential object")
core.set_global_option_python = _core_set_global_option_python
## QCvar helps
_qcvar_transitions = {
"SCSN-MP2 CORRELATION ENERGY": "SCS(N)-MP2 CORRELATION ENERGY",
"SCSN-MP2 TOTAL ENERGY": "SCS(N)-MP2 TOTAL ENERGY",
"MAYER_INDICES": "MAYER INDICES",
"WIBERG_LOWDIN_INDICES": "WIBERG LOWDIN INDICES",
"LOWDIN_CHARGES": "LOWDIN CHARGES",
"MULLIKEN_CHARGES": "MULLIKEN CHARGES",
"(AT) CORRECTION ENERGY": "A-(T) CORRECTION ENERGY",
"CCSD(AT) TOTAL ENERGY": "A-CCSD(T) TOTAL ENERGY",
"CCSD(AT) CORRELATION ENERGY": "A-CCSD(T) CORRELATION ENERGY",
}
_qcvar_cancellations = {
"SCSN-MP2 SAME-SPIN CORRELATION ENERGY": ["MP2 SAME-SPIN CORRELATION ENERGY"],
"SCSN-MP2 OPPOSITE-SPIN CORRELATION ENERGY": ["MP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS-CCSD SAME-SPIN CORRELATION ENERGY": ["CCSD SAME-SPIN CORRELATION ENERGY"],
"SCS-CCSD OPPOSITE-SPIN CORRELATION ENERGY": ["CCSD OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS-MP2 SAME-SPIN CORRELATION ENERGY": ["MP2 SAME-SPIN CORRELATION ENERGY"],
"SCS-MP2 OPPOSITE-SPIN CORRELATION ENERGY": ["MP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS(N)-OMP2 CORRELATION ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS(N)-OMP2 TOTAL ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCSN-OMP2 CORRELATION ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCSN-OMP2 TOTAL ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
}
def _qcvar_warnings(key: str) -> str:
if any([key.upper().endswith(" DIPOLE " + cart) for cart in ["X", "Y", "Z"]]):
warnings.warn(
f"Using scalar QCVariable `{key.upper()}` [D] instead of array `{key.upper()[:-2]}` [e a0] is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=3)
if any([key.upper().endswith(" QUADRUPOLE " + cart) for cart in ["XX", "YY", "ZZ", "XY", "XZ", "YZ"]]):
warnings.warn(
f"Using scalar QCVariable `{key.upper()}` [D A] instead of array `{key.upper()[:-3]}` [e a0^2] is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=3)
if key.upper() in _qcvar_transitions:
warnings.warn(
f"Using QCVariable `{key.upper()}` instead of `{_qcvar_transitions[key.upper()]}` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=3)
return _qcvar_transitions[key.upper()]
if key.upper() in _qcvar_cancellations:
raise UpgradeHelper(key.upper(), "no direct replacement", 1.4, " Consult QCVariables " + ", ".join(_qcvar_cancellations[key.upper()]) + " to recompose the quantity.")
return key
_multipole_order = ["dummy", "dummy", "QUADRUPOLE", "OCTUPOLE", "HEXADECAPOLE"]
for order in range(5, 10):
_multipole_order.append(f"{int(2**order)}-POLE")
def _qcvar_reshape_set(key, val):
"""Reverse `_qcvar_reshape_get` for internal psi4.core.Matrix storage."""
reshaper = None
if key.upper().startswith("MBIS"):
if key.upper().endswith("CHARGES"):
return val
elif key.upper().endswith("DIPOLES"):
reshaper = (-1, 3)
return val.reshape(reshaper)
elif key.upper().endswith("QUADRUPOLES"):
val = val.reshape(-1, 3, 3)
val = np.array([_multipole_compressor(val[iat], 2) for iat in range(len(val))])
return val
elif key.upper().endswith("OCTUPOLES"):
val = val.reshape(-1, 3, 3, 3)
val = np.array([_multipole_compressor(val[iat], 3) for iat in range(len(val))])
return val
elif key.upper().endswith("DIPOLE"):
reshaper = (1, 3)
elif any(key.upper().endswith(p) for p in _multipole_order):
val = _multipole_compressor(val, _multipole_order.index(key.upper().split()[-1]))
reshaper = (1, -1)
elif key.upper() in ["MULLIKEN_CHARGES", "LOWDIN_CHARGES", "MULLIKEN CHARGES", "LOWDIN CHARGES"]:
reshaper = (1, -1)
if reshaper:
return val.reshape(reshaper)
else:
return val
def _qcvar_reshape_get(key, val):
"""For QCVariables where the 2D psi4.core.Matrix shape is unnatural, convert to natural shape in ndarray."""
reshaper = None
if key.upper().startswith("MBIS"):
if key.upper().endswith("CHARGES"):
return val.np
elif key.upper().endswith("DIPOLES"):
reshaper = (-1, 3)
return val.np.reshape(reshaper)
elif key.upper().endswith("QUADRUPOLES"):
val = val.np.reshape(-1, 6)
val = np.array([_multipole_plumper(val[iat], 2) for iat in range(len(val))])
return val
elif key.upper().endswith("OCTUPOLES"):
val = val.np.reshape(-1, 10)
val = np.array([_multipole_plumper(val[iat], 3) for iat in range(len(val))])
return val
elif key.upper().endswith("DIPOLE"):
reshaper = (3, )
elif any(key.upper().endswith(p) for p in _multipole_order):
return _multipole_plumper(val.np.reshape((-1, )), _multipole_order.index(key.upper().split()[-1]))
elif key.upper() in ["MULLIKEN_CHARGES", "LOWDIN_CHARGES", "MULLIKEN CHARGES", "LOWDIN CHARGES"]:
reshaper = (-1, )
if reshaper:
return val.np.reshape(reshaper)
else:
return val
def _multipole_compressor(complete, order):
"""Form flat unique components multipole array from complete Cartesian array.
Parameters
----------
order : int
Multipole order. e.g., 1 for dipole, 4 for hexadecapole.
complete : ndarray
Multipole array, order-dimensional Cartesian array expanded to complete components.
Returns
-------
compressed : ndarray
Multipole array, length (order + 1) * (order + 2) / 2 compressed to unique components.
"""
compressed = []
for ii in range(order + 1):
lx = order - ii
for lz in range(ii + 1):
ly = ii - lz
np_index = []
for xval in range(lx):
np_index.append(0)
for yval in range(ly):
np_index.append(1)
for zval in range(lz):
np_index.append(2)
compressed.append(complete[tuple(np_index)])
assert len(compressed) == ((order + 1) * (order + 2) / 2)
return np.array(compressed)
def _multipole_plumper(compressed: np.ndarray, order: int) -> np.ndarray:
"""Form multidimensional multipole array from unique components array.
Parameters
----------
order
Multipole order. e.g., 1 for dipole, 4 for hexadecapole.
compressed
Multipole array, length (order + 1) * (order + 2) / 2 compressed to unique components.
Returns
-------
complete : numpy.ndarray
Multipole array, order-dimensional Cartesian array expanded to complete components.
"""
shape = tuple([3] * order)
complete = np.zeros(shape)
def compound_index(counter):
# thanks, https://www.pamoc.it/tpc_cart_mom.html Eqn 2.2!
# jn = nz + (ny + nz)(ny + nz + 1) / 2
return int(
counter.get("2", 0) + (counter.get("1", 0) + counter.get("2", 0)) *
(counter.get("1", 0) + counter.get("2", 0) + 1) / 2)
for idx in product("012", repeat=order):
xyz_counts = Counter(idx) # "010" --> {"0": 2, "1": 1}
np_index = tuple(int(x) for x in idx) # ('0', '1') --> (0, 1)
complete[np_index] = compressed[compound_index(xyz_counts)]
return complete
def _core_has_variable(key: str) -> bool:
"""Whether scalar or array QCVariable *key* has been set in global memory."""
return core.has_scalar_variable(key) or core.has_array_variable(key)
def _core_wavefunction_has_variable(cls: core.Wavefunction, key: str) -> bool:
"""Whether scalar or array QCVariable *key* has been set on *self* :class:`psi4.core.Wavefunction`."""
return cls.has_scalar_variable(key) or cls.has_array_variable(key)
def _core_variable(key: str) -> Union[float, core.Matrix, np.ndarray]:
"""Return copy of scalar or array QCVariable *key* from global memory.
Returns
-------
float or numpy.ndarray or Matrix
Scalar variables are returned as floats.
Array variables not naturally 2D (like multipoles) are returned as :class:`numpy.ndarray` of natural dimensionality.
Other array variables are returned as :py:class:`~psi4.core.Matrix` and may have an extra dimension with symmetry information.
Example
-------
>>> psi4.gradient("hf/cc-pvdz")
>>> psi4.variable("CURRENT ENERGY")
-100.00985995185668
>>> psi4.variable("CURRENT DIPOLE")
array([ 0. , 0. , -0.83217802])
>>> psi4.variable("CURRENT GRADIENT")
<psi4.core.Matrix object at 0x12d884fc0>
>>> psi4.variable("CURRENT GRADIENT").np
array([[ 6.16297582e-33, 6.16297582e-33, -9.41037138e-02],
[-6.16297582e-33, -6.16297582e-33, 9.41037138e-02]])
"""
key = _qcvar_warnings(key)
if core.has_scalar_variable(key):
return core.scalar_variable(key)
elif core.has_array_variable(key):
return _qcvar_reshape_get(key, core.array_variable(key))
else:
raise KeyError("psi4.core.variable: Requested variable " + key + " was not set!\n")
def _core_wavefunction_variable(cls: core.Wavefunction, key: str) -> Union[float, core.Matrix, np.ndarray]:
"""Return copy of scalar or array QCVariable *key* from *self* :class:`psi4.core.Wavefunction`.
Returns
-------
float or numpy.ndarray or Matrix
Scalar variables are returned as floats.
Array variables not naturally 2D (like multipoles) are returned as :class:`numpy.ndarray` of natural dimensionality.
Other array variables are returned as :py:class:`~psi4.core.Matrix` and may have an extra dimension with symmetry information.
Example
-------
>>> g, wfn = psi4.gradient("hf/cc-pvdz", return_wfn=True)
>>> wfn.variable("CURRENT ENERGY")
-100.00985995185668
>>> wfn.variable("CURRENT DIPOLE")
array([ 0. , 0. , -0.83217802])
>>> wfn.variable("CURRENT GRADIENT")
<psi4.core.Matrix object at 0x12d884fc0>
>>> wfn.variable("CURRENT GRADIENT").np
array([[ 6.16297582e-33, 6.16297582e-33, -9.41037138e-02],
[-6.16297582e-33, -6.16297582e-33, 9.41037138e-02]])
"""
key = _qcvar_warnings(key)
if cls.has_scalar_variable(key):
return cls.scalar_variable(key)
elif cls.has_array_variable(key):
return _qcvar_reshape_get(key, cls.array_variable(key))
else:
raise KeyError("psi4.core.Wavefunction.variable: Requested variable " + key + " was not set!\n")
def _core_set_variable(key: str, val: Union[core.Matrix, np.ndarray, float]) -> None:
"""Sets scalar or array QCVariable *key* to *val* in global memory."""
if isinstance(val, core.Matrix):
if core.has_scalar_variable(key):
raise ValidationError("psi4.core.set_variable: Target variable " + key + " already a scalar variable!")
else:
core.set_array_variable(key, val)
elif isinstance(val, np.ndarray):
if core.has_scalar_variable(key):
raise ValidationError("psi4.core.set_variable: Target variable " + key + " already a scalar variable!")
else:
core.set_array_variable(key, core.Matrix.from_array(_qcvar_reshape_set(key, val)))
else:
if core.has_array_variable(key):
raise ValidationError("psi4.core.set_variable: Target variable " + key + " already an array variable!")
else:
core.set_scalar_variable(key, val)
# TODO _qcvar_warnings(key)
def _core_wavefunction_set_variable(cls: core.Wavefunction, key: str, val: Union[core.Matrix, np.ndarray, float]) -> None:
"""Sets scalar or array QCVariable *key* to *val* on *cls*."""
if isinstance(val, core.Matrix):
if cls.has_scalar_variable(key):
raise ValidationError("psi4.core.Wavefunction.set_variable: Target variable " + key +
" already a scalar variable!")
else:
cls.set_array_variable(key, val)
elif isinstance(val, np.ndarray):
if cls.has_scalar_variable(key):
raise ValidationError("psi4.core.Wavefunction.set_variable: Target variable " + key +
" already a scalar variable!")
else:
cls.set_array_variable(key, core.Matrix.from_array(_qcvar_reshape_set(key, val)))
else:
if cls.has_array_variable(key):
raise ValidationError("psi4.core.Wavefunction.set_variable: Target variable " + key +
" already an array variable!")
else:
cls.set_scalar_variable(key, val)
# TODO _qcvar_warnings(key)
def _core_del_variable(key: str) -> None:
"""Removes scalar or array QCVariable *key* from global memory if present."""
if core.has_scalar_variable(key):
core.del_scalar_variable(key)
elif core.has_array_variable(key):
core.del_array_variable(key)
def _core_wavefunction_del_variable(cls: core.Wavefunction, key: str) -> None:
"""Removes scalar or array QCVariable *key* from *cls* if present."""
if cls.has_scalar_variable(key):
cls.del_scalar_variable(key)
elif cls.has_array_variable(key):
cls.del_array_variable(key)
def _core_variables(include_deprecated_keys: bool = False) -> Dict[str, Union[float, core.Matrix, np.ndarray]]:
"""Return all scalar or array QCVariables from global memory."""
dicary = {**core.scalar_variables(), **{k: _qcvar_reshape_get(k, v) for k, v in core.array_variables().items()}}
if include_deprecated_keys:
for old_key, current_key in _qcvar_transitions.items():
if current_key in dicary:
dicary[old_key] = dicary[current_key]
return dicary
def _core_wavefunction_variables(cls, include_deprecated_keys: bool = False) -> Dict[str, Union[float, core.Matrix, np.ndarray]]:
"""Return all scalar or array QCVariables from *cls*."""
dicary = {**cls.scalar_variables(), **{k: _qcvar_reshape_get(k, v) for k, v in cls.array_variables().items()}}
if include_deprecated_keys:
for old_key, current_key in _qcvar_transitions.items():
if current_key in dicary:
dicary[old_key] = dicary[current_key]
return dicary
core.has_variable = _core_has_variable
core.variable = _core_variable
core.set_variable = _core_set_variable
core.del_variable = _core_del_variable
core.variables = _core_variables
core.Wavefunction.has_variable = _core_wavefunction_has_variable
core.Wavefunction.variable = _core_wavefunction_variable
core.Wavefunction.set_variable = _core_wavefunction_set_variable
core.Wavefunction.del_variable = _core_wavefunction_del_variable
core.Wavefunction.variables = _core_wavefunction_variables
## Psi4 v1.4 Export Deprecations
def _core_get_variable(key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.variable` instead.
"""
warnings.warn(
"Using `psi4.core.get_variable` instead of `psi4.core.variable` (or `psi4.core.scalar_variable` for scalar variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.scalar_variable(key)
def _core_get_variables():
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.variables` instead.
"""
warnings.warn(
"Using `psi4.core.get_variables` instead of `psi4.core.variables` (or `psi4.core.scalar_variables` for scalar variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.scalar_variables()
def _core_get_array_variable(key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.variable` instead.
"""
warnings.warn(
"Using `psi4.core.get_array_variable` instead of `psi4.core.variable` (or `psi4.core.array_variable` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.array_variable(key)
def _core_get_array_variables():
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.variables` instead.
"""
warnings.warn(
"Using `psi4.core.get_array_variables` instead of `psi4.core.variables` (or `psi4.core.array_variables` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.array_variables()
core.get_variable = _core_get_variable
core.get_variables = _core_get_variables
core.get_array_variable = _core_get_array_variable
core.get_array_variables = _core_get_array_variables
def _core_wavefunction_get_variable(cls, key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.variable` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.get_variable` instead of `psi4.core.Wavefunction.variable` (or `psi4.core.Wavefunction.scalar_variable` for scalar variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.scalar_variable(key)
def _core_wavefunction_get_array(cls, key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.variable` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.get_array` instead of `psi4.core.Wavefunction.variable` (or `psi4.core.Wavefunction.array_variable` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.array_variable(key)
def _core_wavefunction_set_array(cls, key, val):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.set_variable` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.set_array` instead of `psi4.core.Wavefunction.set_variable` (or `psi4.core.Wavefunction.set_array_variable` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.set_array_variable(key, val)
def _core_wavefunction_arrays(cls):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.variables` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.arrays` instead of `psi4.core.Wavefunction.variables` (or `psi4.core.Wavefunction.array_variables` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.array_variables()
core.Wavefunction.get_variable = _core_wavefunction_get_variable
core.Wavefunction.get_array = _core_wavefunction_get_array
core.Wavefunction.set_array = _core_wavefunction_set_array
core.Wavefunction.arrays = _core_wavefunction_arrays
def _core_wavefunction_frequencies(cls):
if not hasattr(cls, 'frequency_analysis'):
return None
vibinfo = cls.frequency_analysis
vibonly = qcdb.vib.filter_nonvib(vibinfo)
return core.Vector.from_array(qcdb.vib.filter_omega_to_real(vibonly['omega'].data))
def _core_wavefunction_legacy_frequencies(cls):
"""
.. deprecated:: 1.4
"""
warnings.warn(
"Using `psi4.core.Wavefunction.legacy_frequencies` (accessing c-side member data) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.legacy_frequencies()
def _core_wavefunction_set_frequencies(cls, val):
"""
.. deprecated:: 1.4
"""
warnings.warn(
"Using `psi4.core.Wavefunction.set_frequencies` (accessing c-side member data) instead of `psi4.core.Wavefunction.frequency_analysis` (py-side member data) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.set_legacy_frequencies(val)
core.Wavefunction.frequencies = _core_wavefunction_frequencies
core.Wavefunction.legacy_frequencies = _core_wavefunction_legacy_frequencies
core.Wavefunction.set_frequencies = _core_wavefunction_set_frequencies
def _core_wavefunction_X(cls):
warnings.warn(
"Using `psi4.core.Wavefunction.X` instead of `psi4.core.Wavefunction.lagrangian` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.lagrangian()
core.Wavefunction.X = _core_wavefunction_X
## Psi4 v1.3 Export Deprecations
def _core_get_gradient():
"""
.. deprecated:: 1.2
"""
warnings.warn(
"Using `psi4.core.get_gradient` (only used internally for C++ optking; deprecated silently in 1.2) is deprecated, and in 1.5 (or whenever Py optking is adopted) it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.get_legacy_gradient()
def _core_set_gradient(val):
"""
.. deprecated:: 1.2
"""
warnings.warn(
"Using `psi4.core.set_gradient` (only used internally for C++ optking; deprecated silently in 1.2) is deprecated, and in 1.5 (or whenever Py optking is adopted) it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.set_legacy_gradient(val)
core.get_gradient = _core_get_gradient
core.set_gradient = _core_set_gradient
def _core_doublet(A, B, transA, transB):
"""Multiply two matrices together.
.. deprecated:: 1.4
Use :py:func:`psi4.core.doublet` instead.
"""
warnings.warn(
"Using `psi4.core.Matrix.doublet` instead of `psi4.core.doublet` is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.doublet(A, B, transA, transB)
def _core_triplet(A, B, C, transA, transB, transC):
"""Multiply three matrices together.
.. deprecated:: 1.4
Use :py:func:`psi4.core.triplet` instead.
"""
warnings.warn(
"Using `psi4.core.Matrix.triplet` instead of `psi4.core.triplet` is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.triplet(A, B, C, transA, transB, transC)
core.Matrix.doublet = staticmethod(_core_doublet)
core.Matrix.triplet = staticmethod(_core_triplet)
| psi4/psi4 | psi4/driver/p4util/python_helpers.py | Python | lgpl-3.0 | 44,094 | [
"Psi4"
] | 35558e371dd30329c11ec535fec108e0324db73f93dfe2efc62c82ff4abc3a88 |
from numpy import exp, array, random, dot
class NeuronLayer():
def __init__(self, number_of_neurons, number_of_inputs_per_neuron):
self.synaptic_weights = 2 * random.random((number_of_inputs_per_neuron, number_of_neurons)) - 1
class NeuralNetwork():
def __init__(self, neural_layers):
self.neural_layers = neural_layers
self.neural_layers_output = []
# The Sigmoid function, which describes an S shaped curve.
# We pass the weighted sum of the inputs through this function to
# normalise them between 0 and 1.
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
# The derivative of the Sigmoid function.
# This is the gradient of the Sigmoid curve.
# It indicates how confident we are about the existing weight.
def __sigmoid_derivative(self, x):
return x * (1 - x)
# We train the neural network through a process of trial and error.
# Adjusting the synaptic weights each time.
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in range(number_of_training_iterations):
self.think(training_set_inputs, True)
neural_layer_length = len(self.neural_layers)
layer_error = layer_delta = layer_adjustment = None
index = neural_layer_length - 1
while index > -1:
layer = self.neural_layers[index]
if index == 0:
layer_error = layer_delta.dot(self.neural_layers[index+1].synaptic_weights.T)
else:
layer_error = training_set_outputs - self.neural_layers_output[index]
layer_delta = layer_error * self.__sigmoid_derivative(self.neural_layers_output[index])
if index == 0:
layer_adjustment = training_set_inputs.T.dot(layer_delta)
else:
layer_adjustment = self.neural_layers_output[index-1].T.dot(layer_delta)
self.neural_layers[index].synaptic_weights += layer_adjustment
index -= 1
# The neural network thinks.
def think(self, training_set_inputs, training=False):
outputs = []
for i, layer in enumerate(self.neural_layers):
output_from_layer = self.__sigmoid(dot(training_set_inputs, self.neural_layers[i].synaptic_weights))
outputs.append(output_from_layer)
training_set_inputs = output_from_layer
if training:
self.neural_layers_output = outputs
else:
return outputs
# The neural network prints its weights
def print_weights(self):
for i, layer in enumerate(self.neural_layers):
print(" Layer %s: " % (i))
print(layer.synaptic_weights)
print("=====================================")
if __name__ == "__main__":
#Seed the random number generator
random.seed(1)
neural_layers = [
NeuronLayer(4, 3), # Create layer Layer=1 (4 neurons, each with 3 inputs)
#NeuronLayer(9, 4), # Create layer Output-4 (9 neurons, each with 3 inputs)
#NeuronLayer(7, 9), # Create layer Output-3 (7 neurons, each with 9 inputs)
#NeuronLayer(4, 7), # Create layer Output-2 (4 neurons, each with 7 inputs)
NeuronLayer(3, 4), # Create layer Output-1 (3 neurons, each with 4 inputs)
#NeuronLayer(1, 3) # Create layer Output (a single neuron with 3 inputs)
]
# Combine the layers to create a neural network
neural_network = NeuralNetwork(neural_layers)
print("Stage 1) Random starting synaptic weights: ")
neural_network.print_weights()
# The training set. We have 7 examples, each consisting of 3 input values
# and 1 output value.
training_set_inputs = array([
[0, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 0], [1, 0, 0],
[1, 1, 1], [0, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 0],
[1, 1, 1], [0, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 0],
[1, 0, 0], [1, 1, 1], [0, 0, 0]
])
training_set_outputs = array([
[0, 1, 1, 1, 1,
0, 0, 1, 1, 1,
0, 0, 1, 1, 1,
1, 0, 0]
]).T
# Train the neural network using the training set.
# Do it 60,000 times and make small adjustments each time.
neural_network.train(training_set_inputs, training_set_outputs, 60000)
print("Stage 2) New synaptic weights after training: ")
neural_network.print_weights()
# Test the neural network with a new situation.
print("Stage 3) Considering a new situation [1, 1, 0] -> ?: ")
outputs = neural_network.think(array([1, 1, 0]))
print(outputs[-1])
# Test the neural network with a new situation.
print("Stage 4) Considering a new situation [1, 0, 0] -> ?: ")
outputs = neural_network.think(array([1, 0, 0]))
print(outputs[-1])
| abhishekkr/tutorials_as_code | talks-articles/machine-learning/toolbox/numpy/multi-layer-neural-network.py | Python | mit | 4,859 | [
"NEURON"
] | d40f10e5eb3cedaba5a50e00e51122c1360885b21e7600250669b440efa1dee2 |
"""
Copyright (c) 2017 Sam Witte
Created on Jan 19, 2017
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Results from
"""
from __future__ import absolute_import
from __future__ import division
import numpy as np
from scipy.interpolate import interp1d
pi = np.pi
name = "DarkSideG2"
modulated = False
energy_resolution_type = "Dirac"
def EnergyResolution(e):
return np.ones_like(e)
FFSD = 'GaussianFFSD'
FFSI = 'HelmFF'
FF = {'SI': FFSI,
'SDPS': FFSD,
'SDAV': FFSD,
}
target_nuclide_AZC_list = \
np.array([[40, 18, 1.]])
target_nuclide_JSpSn_list = \
np.array([[0., 0., 0.]])
target_nuclide_mass_list = np.array([37.42])
num_target_nuclides = target_nuclide_mass_list.size
Ethreshold = 40.
Emaximum = 240.
ERmaximum = 240.
def QuenchingFactor(e):
return np.ones_like(e)
def Efficiency_ER(er):
try:
len(er)
except TypeError:
er = [er]
return np.ones_like(er)
def Efficiency(e,er):
try:
len(er)
except TypeError:
er = [er]
return 0.7 * np.array([1.0 if ee > Ethreshold and ee < ERmaximum else 0. for ee in er])
Exposure = 20. * 1000. * 365.24 * 3.
ERecoilList = np.array([])
| SamWitte/Codds_DarkMatter | src/Data/DarkSideG2.py | Python | gpl-2.0 | 1,829 | [
"DIRAC"
] | 89e4d32e7eaede4b5ea2d90979049ced64fc7b268234079f9aa73118ca6f08b4 |
"""
==================================
Bayesian optimization with `skopt`
==================================
Gilles Louppe, Manoj Kumar July 2016.
Reformatted by Holger Nahrstaedt 2020
.. currentmodule:: skopt
Problem statement
-----------------
We are interested in solving
.. math::
x^* = arg \min_x f(x)
under the constraints that
- :math:`f` is a black box for which no closed form is known
(nor its gradients);
- :math:`f` is expensive to evaluate;
- and evaluations of :math:`y = f(x)` may be noisy.
**Disclaimer.** If you do not have these constraints, then there
is certainly a better optimization algorithm than Bayesian optimization.
This example uses :class:`plots.plot_gaussian_process` which is available
since version 0.8.
Bayesian optimization loop
--------------------------
For :math:`t=1:T`:
1. Given observations :math:`(x_i, y_i=f(x_i))` for :math:`i=1:t`, build a
probabilistic model for the objective :math:`f`. Integrate out all
possible true functions, using Gaussian process regression.
2. optimize a cheap acquisition/utility function :math:`u` based on the
posterior distribution for sampling the next point.
:math:`x_{t+1} = arg \\min_x u(x)`
Exploit uncertainty to balance exploration against exploitation.
3. Sample the next observation :math:`y_{t+1}` at :math:`x_{t+1}`.
Acquisition functions
---------------------
Acquisition functions :math:`u(x)` specify which sample :math:`x`: should be
tried next:
- Expected improvement (default):
:math:`-EI(x) = -\\mathbb{E} [f(x) - f(x_t^+)]`
- Lower confidence bound: :math:`LCB(x) = \mu_{GP}(x) + \kappa \sigma_{GP}(x)`
- Probability of improvement: :math:`-PI(x) = -P(f(x) \geq f(x_t^+) + \kappa)`
where :math:`x_t^+` is the best point observed so far.
In most cases, acquisition functions provide knobs (e.g., :math:`\kappa`) for
controlling the exploration-exploitation trade-off.
- Search in regions where :math:`\mu_{GP}(x)` is high (exploitation)
- Probe regions where uncertainty :math:`\sigma_{GP}(x)` is high (exploration)
"""
print(__doc__)
import numpy as np
np.random.seed(237)
import matplotlib.pyplot as plt
from skopt.plots import plot_gaussian_process
#############################################################################
# Toy example
# -----------
#
# Let assume the following noisy function :math:`f`:
noise_level = 0.1
def f(x, noise_level=noise_level):
return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2))\
+ np.random.randn() * noise_level
#############################################################################
# **Note.** In `skopt`, functions :math:`f` are assumed to take as input a 1D
# vector :math:`x`: represented as an array-like and to return a scalar
# :math:`f(x)`:.
# Plot f(x) + contours
x = np.linspace(-2, 2, 400).reshape(-1, 1)
fx = [f(x_i, noise_level=0.0) for x_i in x]
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx],
[fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),
alpha=.2, fc="r", ec="None")
plt.legend()
plt.grid()
plt.show()
#############################################################################
# Bayesian optimization based on gaussian process regression is implemented in
# :class:`gp_minimize` and can be carried out as follows:
from skopt import gp_minimize
res = gp_minimize(f, # the function to minimize
[(-2.0, 2.0)], # the bounds on each dimension of x
acq_func="EI", # the acquisition function
n_calls=15, # the number of evaluations of f
n_random_starts=5, # the number of random initialization points
noise=0.1**2, # the noise level (optional)
random_state=1234) # the random seed
#############################################################################
# Accordingly, the approximated minimum is found to be:
"x^*=%.4f, f(x^*)=%.4f" % (res.x[0], res.fun)
#############################################################################
# For further inspection of the results, attributes of the `res` named tuple
# provide the following information:
#
# - `x` [float]: location of the minimum.
# - `fun` [float]: function value at the minimum.
# - `models`: surrogate models used for each iteration.
# - `x_iters` [array]:
# location of function evaluation for each iteration.
# - `func_vals` [array]: function value for each iteration.
# - `space` [Space]: the optimization space.
# - `specs` [dict]: parameters passed to the function.
print(res)
#############################################################################
# Together these attributes can be used to visually inspect the results of the
# minimization, such as the convergence trace or the acquisition function at
# the last iteration:
from skopt.plots import plot_convergence
plot_convergence(res);
#############################################################################
# Let us now visually examine
#
# 1. The approximation of the fit gp model to the original function.
# 2. The acquisition values that determine the next point to be queried.
plt.rcParams["figure.figsize"] = (8, 14)
def f_wo_noise(x):
return f(x, noise_level=0)
#############################################################################
# Plot the 5 iterations following the 5 random points
for n_iter in range(5):
# Plot true function.
plt.subplot(5, 2, 2*n_iter+1)
if n_iter == 0:
show_legend = True
else:
show_legend = False
ax = plot_gaussian_process(res, n_calls=n_iter,
objective=f_wo_noise,
noise_level=noise_level,
show_legend=show_legend, show_title=False,
show_next_point=False, show_acq_func=False)
ax.set_ylabel("")
ax.set_xlabel("")
# Plot EI(x)
plt.subplot(5, 2, 2*n_iter+2)
ax = plot_gaussian_process(res, n_calls=n_iter,
show_legend=show_legend, show_title=False,
show_mu=False, show_acq_func=True,
show_observations=False,
show_next_point=True)
ax.set_ylabel("")
ax.set_xlabel("")
plt.show()
#############################################################################
# The first column shows the following:
#
# 1. The true function.
# 2. The approximation to the original function by the gaussian process model
# 3. How sure the GP is about the function.
#
# The second column shows the acquisition function values after every
# surrogate model is fit. It is possible that we do not choose the global
# minimum but a local minimum depending on the minimizer used to minimize
# the acquisition function.
#
# At the points closer to the points previously evaluated at, the variance
# dips to zero.
#
# Finally, as we increase the number of points, the GP model approaches
# the actual function. The final few points are clustered around the minimum
# because the GP does not gain anything more by further exploration:
plt.rcParams["figure.figsize"] = (6, 4)
# Plot f(x) + contours
_ = plot_gaussian_process(res, objective=f_wo_noise,
noise_level=noise_level)
plt.show()
| scikit-optimize/scikit-optimize | examples/bayesian-optimization.py | Python | bsd-3-clause | 7,424 | [
"Gaussian"
] | 5abce7cd8f6ab6816727bb97548196278ebb165f70cec23857ca6bbcf8948e1f |
import copy
import itertools
import inspect
import numpy as np
from scipy.sparse import lil_matrix, csc_matrix
from conny import utility
class Function:
def compute(self, inputs):
raise NotImplemented
def derive(self, activation, input_):
raise NotImplemented
def gradient(self, num_inputs, activation):
return list(self.derive(i) for i in range(num_inputs))
class Node(list):
def __init__(self, *args, **kwargs):
self._parse_args(args)
self._parse_kwargs(kwargs)
def __repr__(self):
attrs = []
if len(self):
attrs.append('children=' + str(len(self)))
else:
attrs.append('outgoing=' + str(len(self.outgoing)))
if self.input:
attrs.append('input')
if self.output:
attrs.append('output')
return '<Node {}>'.format(' '.join(attrs))
def __eq__(self, other):
return id(self) == id(other)
def __mul__(self, repeat):
return list(copy.deepcopy(self) for _ in range(repeat))
def connect(self, target, strategy='full'):
"""
Connections exist between leaf nodes only. Calling connect() on a
parent node creates connections between all leaf nodes to which a path
of output or input nodes exist respectively.
"""
outputs = self._filter_leaves_or_self('output', True)
inputs = target._filter_leaves_or_self('input', True)
function = '_connect_' + strategy
if not hasattr(self, function):
raise NotImplemented
pairs = getattr(self, function)(outputs, inputs)
for left, right in pairs:
left.outgoing.append(right)
return pairs
def get_leaves(self):
if len(self):
for child in self:
yield from child.get_leaves()
else:
yield self
def _parse_args(self, args):
# Construct from activation function
if len(args) == 1 and utility.implements(args[0], Function):
self.function = args[0]
self.outgoing = []
return
# Construct from one or more lists of children
args = list(utility.flatten(args, lambda x: isinstance(x, Node)))
if args and all(isinstance(arg, Node) for arg in args):
self += copy.deepcopy(args)
return
raise ValueError('No matching constructor')
def _parse_kwargs(self, kwargs):
inout = kwargs.get('inout', False)
# Whether to consider this node if someone connects to the parent.
self.input = kwargs.get('input', False) or inout
# Whether to consider this node if the parent connects to someone.
self.output = kwargs.get('output', False) or inout
def _filter_leaves(self, attibute, value):
if len(self):
for child in self:
if getattr(child, attibute) == value:
yield from child._filter_leaves(attibute, value)
elif getattr(self, attibute) == value:
yield self
def _filter_leaves_or_self(self, attribute, value):
if len(self):
return list(self._filter_leaves(attribute, value))
else:
return [self]
def _connect_full(self, outputs, inputs):
yield from itertools.product(outputs, inputs)
class Network:
def __init__(self, root, **kwargs):
self.root = root
self.nodes = list(self.root.get_leaves())
self._init_nodes()
self._init_edges()
self._init_functions()
def _init_nodes(self):
# TODO: Take per neuron bias into account
shape = len(self.nodes)
# Index of activation function inside self.functions
self.types = np.zeros(shape, dtype=np.int8)
# Current activation vector of the neurons
self.current = np.zeros(shape, dtype=np.float32)
# Previous activation vector of the neurons
self.previous = np.zeros(shape, dtype=np.float32)
def _init_edges(self, scale=0.1):
shape = (len(self.nodes), len(self.nodes))
# Sparse matrix of weights between the neurons
self.weights = lil_matrix(shape, dtype=np.float32)
# Sparse matrix of derivatives with respect to the weights
self.gradient = lil_matrix(shape, dtype=np.float32)
# Initialize used weights. All other weights are zero in sparse matrix
# representation and thus don't affect products of the activation
# vector and the weight matrix.
self.edges = []
for i, source in enumerate(self.nodes):
for target in source.outgoing:
j = self.nodes.index(target)
self.edges.append((i, j))
self.weights[i, j] = scale * np.random.normal()
self.gradient[i, j] = 0
# Compress matrices into efficient formats
self.weights = csc_matrix(self.weights)
self.gradient = csc_matrix(self.gradient)
def _init_functions(self):
# Ordered list of activation functions used in this network
self.functions = list(set(node.function for node in self.nodes))
assert len(self.functions) < 256, 'Too many activation functions'
for index, node in enumerate(self.nodes):
self.types[index] = self.functions.index(node.function)
self.gradient = csc_matrix(self.gradient)
| danijar/conny | conny/core.py | Python | gpl-3.0 | 5,408 | [
"NEURON"
] | ecb02b029ec163f56481442a9755516d0d89b39a07f6f30196611f16d5cac2d3 |
# -*- coding: utf-8 -*-
"""
Create an initial ATP Profile for an ECs Mesh and write it out as .vtp.
"""
import os
import sys
# Run in current directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Import path for the GenerateATPMap script.
importPath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../util'))
if not importPath in sys.path:
sys.path.insert(1, importPath)
del importPath
import GenerateATPMapV2
# This is for the c8064 mesh.
GenerateATPMapV2.centrelineFile = "c8064Centreline.vtk"
GenerateATPMapV2.meshFile = "quadMeshFullSmoothECc8064.vtp"
GenerateATPMapV2.debugAtpFile = "quadMeshFullSmoothATPV2c8064.vtp"
GenerateATPMapV2.atpFile = "quadMeshFullSmoothATPc8064.vtp"
GenerateATPMapV2.numBranches = 3
GenerateATPMapV2.numQuads = 8064
GenerateATPMapV2.numAxialQuads = 64
GenerateATPMapV2.numECsPerCol = 4
GenerateATPMapV2.atpGradient = 3.3
GenerateATPMapV2.atpMin = 0.1
GenerateATPMapV2.atpMax = 1.0
def main():
GenerateATPMapV2.buildATPMesh()
if __name__ == '__main__':
print "Starting", os.path.basename(__file__)
main()
print "Exiting", os.path.basename(__file__)
| BlueFern/DBiharMesher | meshes/c8064Smooth/Generate8064ATPMapV2.py | Python | gpl-2.0 | 1,134 | [
"VTK"
] | 1625633f781078b786be1f5d656b441e46a56427a96fca9c00247ec1038d1dd3 |
# this one was generated with:
# for i in *.py; do n=`echo $i | cut -f 1 -d .`; \
# echo -e "class $n:\n kits = ['vtk_kit']\n cats = ['Filters']\n" \
# >> blaat.txt; done
class appendPolyData:
kits = ['vtk_kit']
cats = ['Filters']
help = """DeVIDE encapsulation of the vtkAppendPolyDataFilter that
enables us to combine multiple PolyData structures into one.
DANGER WILL ROBINSON: contact the author, this module is BROKEN.
"""
class clipPolyData:
kits = ['vtk_kit']
cats = ['Filters']
keywords = ['polydata', 'clip', 'implicit']
help = \
"""Given an input polydata and an implicitFunction, this will clip
the polydata.
All points that are inside the implicit function are kept, everything
else is discarded. 'Inside' is defined as all points in the polydata
where the implicit function value is greater than 0.
"""
class closing:
kits = ['vtk_kit']
cats = ['Filters', 'Morphology']
keywords = ['morphology']
help = """Performs a greyscale morphological closing on the input image.
Dilation is followed by erosion. The structuring element is ellipsoidal
with user specified sizes in 3 dimensions. Specifying a size of 1 in any
dimension will disable processing in that dimension.
"""
class contour:
kits = ['vtk_kit']
cats = ['Filters']
help = """Extract isosurface from volume data.
"""
class decimate:
kits = ['vtk_kit']
cats = ['Filters']
help = """Reduce number of triangles in surface mesh by merging triangles
in areas of low detail.
"""
class DICOMAligner:
kits = ['vtk_kit', 'wx_kit']
cats = ['DICOM','Filters']
keywords = ['align','reslice','rotate','orientation','dicom']
help = """Aligns a vtkImageData volume (as read from DICOM) to the
standard DICOM LPH (Left-Posterior-Head) coordinate system.
If alignment is not performed the image's "world" (patient LPH)
coordinates may be computed incorrectly (e.g. in Slice3DViewer).
The transformation reslices the original volume, then moves
the image origin as required. The new origin has to be at the centre
of the voxel with most negative LPH coordinates.
Example use case: Before aligning multiple MRI sequences
for fusion/averaging
(Module by Francois Malan)"""
class doubleThreshold:
kits = ['vtk_kit']
cats = ['Filters']
help = """Apply a lower and an upper threshold to the input image data.
"""
class EditMedicalMetaData:
kits = ['vtk_kit']
cats = ['Filters', 'Medical', 'DICOM']
help = """Edit Medical Meta Data structure. Use this to edit for
example the medical meta data output of a DICOMReader before
writing DICOM data to disk, or to create new meta data. You don't
have to supply an input.
"""
class extractGrid:
kits = ['vtk_kit']
cats = ['Filters']
help = """Subsamples input dataset.
This module makes use of the ParaView vtkPVExtractVOI class, which can
handle structured points, structured grids and rectilinear grids.
"""
class extractHDomes:
kits = ['vtk_kit']
cats = ['Filters', 'Morphology']
keywords = ['morphology']
help = """Extracts light structures, also known as h-domes.
The user specifies the parameter 'h' that indicates how much brighter the
light structures are than their surroundings. In short, this algorithm
performs a fast greyscale reconstruction of the input image from a marker
that is the image - h. The result of this reconstruction is subtracted
from the image.
See 'Morphological Grayscale Reconstruction in Image Analysis:
Applications and Efficient Algorithms', Luc Vincent, IEEE Trans. on Image
Processing, 1993.
"""
class extractImageComponents:
kits = ['vtk_kit']
cats = ['Filters']
help = """Extracts one, two or three components from multi-component
image data.
Specify the indices of the components you wish to extract and the number
of components.
"""
class FastSurfaceToDistanceField:
kits = ['vtk_kit','vtktudoss_kit']
cats = ['Filters']
keywords = ['distance','distance field', 'mauch', 'polydata',
'implicit']
help = """Given an input surface (vtkPolyData), create a signed
distance field with the surface at distance 0. Uses Mauch's very
fast CPT / distance field implementation.
"""
class FitEllipsoidToMask:
kits = ['numpy_kit', 'vtk_kit']
cats = ['Filters']
keywords = ['PCA', 'eigen-analysis', 'principal components', 'ellipsoid']
help = """Given an image mask in VTK image data format, perform eigen-
analysis on the world coordinates of 'on' points.
Returns dictionary with eigen values in 'u', eigen vectors in 'v' and
world coordinates centroid of 'on' points.
"""
class glyphs:
kits = ['vtk_kit']
cats = ['Filters']
help = """Visualise vector field with glyphs.
After connecting this module, execute your network once, then you
can select the relevant vector attribute to glyph from the
'Vectors Selection' choice in the interface.
"""
class greyReconstruct:
kits = ['vtk_kit']
cats = ['Filters', 'Morphology']
keywords = ['morphology']
help = """Performs grey value reconstruction of mask I from marker J.
Theoretically, marker J is dilated and the infimum with mask I is
determined. This infimum now takes the place of J. This process is
repeated until stability.
This module uses a DeVIDE specific implementation of Luc Vincent's fast
hybrid algorithm for greyscale reconstruction.
"""
class ICPTransform:
kits = ['vtk_kit']
cats = ['Filters']
keywords = ['iterative closest point transform', 'map',
'register', 'registration']
help = """Use iterative closest point transform to map two
surfaces onto each other with an affine transform.
Three different transform modes are available:
<ul>
<li>Rigid: rotation + translation</li>
<li>Similarity: rigid + isotropic scaling</li>
<li>Affine: rigid + scaling + shear</li>
</ul>
The output of this class is a linear transform that can be used as
input to for example a transformPolydata class.
"""
class imageFillHoles:
kits = ['vtk_kit']
cats = ['Filters', 'Morphology']
keywords = ['morphology']
help = """Filter to fill holes.
In binary images, holes are image regions with 0-value that are completely
surrounded by regions of 1-value. This module can be used to fill these
holes. This filling also works on greyscale images.
In addition, the definition of a hole can be adapted by 'deactivating'
image borders so that 0-value regions that touch these deactivated borders
are still considered to be holes and will be filled.
This module is based on two DeVIDE-specific filters: a fast greyscale
reconstruction filter as per Luc Vincent and a special image border mask
generator filter.
"""
class imageFlip:
kits = ['vtk_kit']
cats = ['Filters']
help = """Flips image (volume) with regards to a single axis.
At the moment, this flips by default about Z. You can change this by
introspecting and calling the SetFilteredAxis() method via the
object inspection.
"""
class imageGaussianSmooth:
kits = ['vtk_kit']
cats = ['Filters']
help = """Performs 3D Gaussian filtering of the input volume.
"""
class imageGradientMagnitude:
kits = ['vtk_kit']
cats = ['Filters']
help = """Calculates the gradient magnitude of the input volume using
central differences.
"""
class imageGreyDilate:
kits = ['vtk_kit']
cats = ['Filters', 'Morphology']
keywords = ['morphology']
help = """Performs a greyscale 3D dilation on the input.
"""
class imageGreyErode:
kits = ['vtk_kit']
cats = ['Filters', 'Morphology']
keywords = ['morphology']
help = """Performs a greyscale 3D erosion on the input.
"""
class ImageLogic:
kits = ['vtk_kit']
cats = ['Filters', 'Combine']
help = """Performs pointwise boolean logic operations on input images.
WARNING: vtkImageLogic in VTK 5.0 has a bug where it does require two
inputs even if performing a NOT or a NOP. This has been fixed in VTK CVS.
DeVIDE will upgrade to > 5.0 as soon as a new stable VTK is released.
"""
class imageMask:
kits = ['vtk_kit']
cats = ['Filters', 'Combine']
help = """The input data (input 1) is masked with the mask (input 2).
The output image is identical to the input image wherever the mask has
a value. The output image is 0 everywhere else.
"""
class imageMathematics:
kits = ['vtk_kit']
cats = ['Filters', 'Combine']
help = """Performs point-wise mathematical operations on one or two images.
The underlying logic can do far more than the UI shows at this moment.
Please let me know if you require more options.
"""
class imageMedian3D:
kits = ['vtk_kit']
cats = ['Filters', 'Morphology']
keywords = ['morphology']
help = """Performs 3D morphological median on input data.
"""
class landmarkTransform:
kits = ['vtk_kit']
cats = ['Filters']
help = """The landmarkTransform will calculate a 4x4 linear transform
that maps from a set of source landmarks to a set of target landmarks.
The mapping is optimised with a least-squares metric.
For convenience, there are two inputs that you can use in any
combination (either or both). All points that you supply (for
example the output of slice3dVWR modules) will be combined
internally into one list and then divided into two groups based on
the point names: the one group with names starting with 'source'
the other with 'target'. Points will then be matched up by name,
so 'source example 1' will be matched with 'target example 1'.
This module will supply a vtkTransform at its output. By
connecting the vtkTransform to a transformPolyData or a
transformVolume module, you'll be able to perform the actual
transformation.
See the "Performing landmark registration on two volumes" example in the
"Useful Patterns" section of the DeVIDE F1 central help.
"""
class marchingCubes:
kits = ['vtk_kit']
cats = ['Filters']
help = """Extract surface from input volume using the Marching Cubes
algorithm.
"""
class morphGradient:
kits = ['vtk_kit']
cats = ['Filters', 'Morphology']
keywords = ['morphology']
help = """Performs a greyscale morphological gradient on the input image.
This is done by performing an erosion and a dilation of the input image
and then subtracting the erosion from the dilation. The structuring
element is ellipsoidal with user specified sizes in 3 dimensions.
Specifying a size of 1 in any dimension will disable processing in that
dimension.
This module can also return both half gradients: the inner (image -
erosion) and the outer (dilation - image).
"""
class opening:
kits = ['vtk_kit']
cats = ['Filters', 'Morphology']
keywords = ['morphology']
help = """Performs a greyscale morphological opening on the input image.
Erosion is followed by dilation. The structuring element is ellipsoidal
with user specified sizes in 3 dimensions. Specifying a size of 1 in any
dimension will disable processing in that dimension.
"""
class MIPRender:
kits = ['vtk_kit']
cats = ['Volume Rendering']
help = """Performs Maximum Intensity Projection on the input volume /
image.
"""
class PerturbPolyPoints:
kits = ['vtk_kit']
cats = ['Filters']
keywords = ['polydata','move','perturb','random','noise','shuffle']
help = """Randomly perturbs each polydata vertex in a uniformly random direction"""
class polyDataConnect:
kits = ['vtk_kit']
cats = ['Filters']
help = """Perform connected components analysis on polygonal data.
In the default 'point seeded regions' mode:
Given a number of seed points, extract all polydata that is
directly or indirectly connected to those seed points. You could see
this as a polydata-based region growing.
"""
class polyDataNormals:
kits = ['vtk_kit']
cats = ['Filters']
help = """Calculate surface normals for input data mesh.
"""
class probeFilter:
kits = ['vtk_kit']
cats = ['Filters']
help = """Maps source values onto input dataset.
Input can be e.g. polydata and source a volume, in which case interpolated
values from the volume will be mapped on the vertices of the polydata,
i.e. the interpolated values will be associated as the attributes of the
polydata points.
"""
class RegionGrowing:
kits = ['vtk_kit']
cats = ['Filters']
keywords = ['region growing', 'threshold', 'automatic',
'segmentation']
help = """Perform 3D region growing with automatic thresholding
based on seed positions.
Given any number of seed positions (for example the first output
of a slice3dVWR), first calculate lower and upper thresholds
automatically as follows:
<ol>
<li>calculate mean intensity over all seed positions.</li>
<li>lower threshold = mean - auto_thresh_interval%
* [full input data scalar range].</li>
<li>upper threshold = mean + auto_thresh_interval% * [full input
data scalar range].</li>
</ol>
After the data has been thresholded with the automatic thresholds,
a 3D region growing is started from all seed positions.
"""
class resampleImage:
kits = ['vtk_kit']
cats = ['Filters']
help = """Resample an image using nearest neighbour, linear or cubic
interpolation.
"""
class seedConnect:
kits = ['vtk_kit']
cats = ['Filters']
help = """3D region growing.
Finds all points connected to the seed points that also have values
equal to the 'Input Connected Value'. This module casts all input to
unsigned char. The output is also unsigned char.
"""
class selectConnectedComponents:
kits = ['vtk_kit']
cats = ['Filters']
help = """3D region growing.
Finds all points connected to the seed points that have the same values
as at the seed points. This is primarily useful for selecting connected
components.
"""
class shellSplatSimple:
kits = ['vtk_kit']
cats = ['Volume Rendering']
help = """Simple configuration for ShellSplatting an input volume.
ShellSplatting is a fast direct volume rendering method. See
http://visualisation.tudelft.nl/Projects/ShellSplatting for more
information.
"""
class StreamerVTK:
kits = ['vtk_kit']
cats = ['Streaming']
keywords = ['streaming', 'streamer', 'hybrid']
help = """Use this module to terminate streaming subsets of
networks consisting of VTK modules producing image or poly data.
This module requests input in blocks to build up a complete output
dataset. Together with the hybrid scheduling in DeVIDE, this
can save loads of memory.
"""
class streamTracer:
kits = ['vtk_kit']
cats = ['Filters']
help = """Visualise a vector field with stream lines.
After connecting this module, execute your network once, then you
can select the relevant vector attribute to glyph from the
'Vectors Selection' choice in the interface.
"""
class surfaceToDistanceField:
kits = ['vtk_kit']
cats = ['Filters']
help = """Given an input surface (vtkPolyData), create an unsigned
distance field with the surface at distance 0.
The user must specify the dimensions and bounds of the output volume.
WARNING: this filter is *incredibly* slow, even for small volumes and
extremely simple geometry. Only use this if you know exactly what
you're doing.
"""
class transformImageToTarget:
kits = ['vtk_kit']
cats = ['Filters']
keywords = ['align','reslice','rotate','orientation','transform','resample']
help = """Transforms input volume by the supplied geometrical transform, and maps it onto a new coordinate frame (orientation, extent, spacing) specified by the
the specified target grid. The target is not overwritten, but a new volume is created with the desired orientation, dimensions and spacing.
This volume is then filled by probing the transformed source. Areas for which no values are found are zero-padded.
"""
class transformPolyData:
kits = ['vtk_kit']
cats = ['Filters']
help = """Given a transform, for example the output of the
landMarkTransform, this module will transform its input polydata.
"""
class transformVolumeData:
kits = ['vtk_kit']
cats = ['Filters']
help = """Transform volume according to 4x4 homogeneous transform.
"""
class ExpVolumeRender:
kits = ['vtk_kit']
cats = ['Volume Rendering']
help = """EXPERIMENTAL Volume Render.
This is an experimental volume renderer module used to test out
new ideas. Handle with EXTREME caution, it might open portals to
other dimensions, letting through its evil minions into ours, and
forcing you to take a stand with only a crowbar at your disposal.
If you would rather just volume render some data, please use the
non-experimental VolumeRender module.
"""
class VolumeRender:
kits = ['vtk_kit']
cats = ['Volume Rendering']
help = """Use direct volume rendering to visualise input volume.
You can select between traditional raycasting, 2D texturing and 3D
texturing. The raycaster can only handler unsigned short or unsigned char
data, so you might have to use a vtkShiftScale module to preprocess.
You can supply your own opacity and colour transfer functions at the
second and third inputs. If you don't supply these, the module will
create opacity and/or colour ramps based on the supplied threshold.
"""
class warpPoints:
kits = ['vtk_kit']
cats = ['Filters']
help = """Warp input points according to their associated vectors.
After connecting this module up, you have to execute the network
once, then select the relevant vectors from the 'Vectors
Selection' choice in the interface.
"""
class wsMeshSmooth:
kits = ['vtk_kit']
cats = ['Filters']
help = """Module that runs vtkWindowedSincPolyDataFilter on its input data
for mesh smoothing.
"""
| nagyistoce/devide | modules/filters/module_index.py | Python | bsd-3-clause | 18,517 | [
"Gaussian",
"ParaView",
"VTK"
] | 77bcf9ed39a4f8c595cbcb5c65baf8d0cac1c5fe5aa4209c3cf72b30fac23700 |
# -*- coding: utf-8 -*-
import os, sys
COMMON_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJECT_DIR = os.path.dirname(COMMON_DIR)
ZIP_PACKAGES_DIRS = (os.path.join(PROJECT_DIR, 'zip-packages'),
os.path.join(COMMON_DIR, 'zip-packages'))
# Overrides for os.environ
env_ext = {'DJANGO_SETTINGS_MODULE': 'settings'}
def setup_env(manage_py_env=False):
"""Configures app engine environment for command-line apps."""
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError, e:
# Not on the system path. Build a list of alternative paths where it
# may be. First look within the project for a local copy, then look for
# where the Mac OS SDK installs it.
paths = [os.path.join(COMMON_DIR, '.google_appengine'),
'/usr/local/google_appengine',
'/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine']
for path in os.environ.get('PATH', '').replace(';', ':').split(':'):
path = path.rstrip(os.sep)
if path.endswith('google_appengine'):
paths.append(path)
if os.name in ('nt', 'dos'):
prefix = '%(PROGRAMFILES)s' % os.environ
paths.append(prefix + r'\Google\google_appengine')
# Loop through all possible paths and look for the SDK dir.
SDK_PATH = None
for sdk_path in paths:
sdk_path = os.path.realpath(sdk_path)
if os.path.exists(sdk_path):
SDK_PATH = sdk_path
break
if SDK_PATH is None:
# The SDK could not be found in any known location.
sys.stderr.write('The Google App Engine SDK could not be found!\n'
'Visit http://code.google.com/p/app-engine-patch/'
' for installation instructions.\n')
sys.exit(1)
# Add the SDK and the libraries within it to the system path.
EXTRA_PATHS = [SDK_PATH]
lib = os.path.join(SDK_PATH, 'lib')
# Automatically add all packages in the SDK's lib folder:
for dir in os.listdir(lib):
path = os.path.join(lib, dir)
# Package can be under 'lib/<pkg>/<pkg>/' or 'lib/<pkg>/lib/<pkg>/'
detect = (os.path.join(path, dir), os.path.join(path, 'lib', dir))
for path in detect:
if os.path.isdir(path):
EXTRA_PATHS.append(os.path.dirname(path))
break
sys.path = EXTRA_PATHS + sys.path
from google.appengine.api import apiproxy_stub_map
# Add this folder to sys.path
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
setup_project()
from appenginepatcher.patch import patch_all
patch_all()
if not manage_py_env:
return
print >> sys.stderr, 'Running on app-engine-patch 1.0.2.1'
def setup_project():
from appenginepatcher import on_production_server
if on_production_server:
# This fixes a pwd import bug for os.path.expanduser()
global env_ext
env_ext['HOME'] = PROJECT_DIR
os.environ.update(env_ext)
# Add the two parent folders and appenginepatcher's lib folder to sys.path.
# The current folder has to be added in main.py or setup_env(). This
# suggests a folder structure where you separate reusable code from project
# code:
# project -> common -> appenginepatch
# You can put a custom Django version into the "common" folder, for example.
EXTRA_PATHS = [
PROJECT_DIR,
COMMON_DIR,
]
this_folder = os.path.abspath(os.path.dirname(__file__))
EXTRA_PATHS.append(os.path.join(this_folder, 'appenginepatcher', 'lib'))
# We support zipped packages in the common and project folders.
# The files must be in the packages folder.
for packages_dir in ZIP_PACKAGES_DIRS:
if os.path.isdir(packages_dir):
for zip_package in os.listdir(packages_dir):
EXTRA_PATHS.append(os.path.join(packages_dir, zip_package))
# App Engine causes main.py to be reloaded if an exception gets raised
# on the first request of a main.py instance, so don't call setup_project()
# multiple times. We ensure this indirectly by checking if we've already
# modified sys.path.
if len(sys.path) < len(EXTRA_PATHS) or \
sys.path[:len(EXTRA_PATHS)] != EXTRA_PATHS:
# Remove the standard version of Django
for k in [k for k in sys.modules if k.startswith('django')]:
del sys.modules[k]
sys.path = EXTRA_PATHS + sys.path
| lstoll/XMPPtweets | src/common/appenginepatch/aecmd.py | Python | mit | 4,777 | [
"VisIt"
] | 24e925ea0afdaff0866b2507306f848ea641e72e9f20f2211f116571e2558ca7 |
from django.contrib.auth.models import User
from lettuce import step, world
from questionnaire.features.pages.questionnaires import QuestionnairePage
from questionnaire.features.pages.users import LoginPage
from questionnaire.models import Questionnaire, Section, SubSection, Question, QuestionGroup, QuestionGroupOrder, Region, Country, UserProfile
@step(u'Given I am a logged-in user with a user Profile')
def given_i_am_a_logged_in_user_with_a_user_profile(step):
world.region = Region.objects.create(name="Afro")
world.country = Country.objects.create(name="Uganda", code="UGA")
world.region.countries.add(world.country)
world.user = User.objects.create(username='user', email='user@mail.com')
world.user.set_password("password")
world.user.save()
UserProfile.objects.create(user=world.user, country=world.country, region=world.region)
world.page = LoginPage(world.browser)
world.page.visit()
data = {'username': world.user.username,
'password': "password"}
world.page.fill_form(data)
world.page.submit()
@step(u'And I have a questionnaire with questions')
def given_i_have_a_questionnaire_with_questions(step):
world.questionnaire = Questionnaire.objects.create(name="JRF 2013 Core English", description="From dropbox as given by Rouslan")
world.section_1 = Section.objects.create(order=0,
title="WHO/UNICEF Joint Reporting Form on Immunization for the Period January-December, 2013",
description="""If a question is not relevant, enter "NR" (not relevant).<br/>
If no data are available, enter "ND" (no data).<br/>
If the number of cases is zero, enter 0.""",
questionnaire=world.questionnaire, name="Cover page")
world.section_2 = Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)", order=1,
questionnaire=world.questionnaire, name="Reported Cases")
world.sub_section = SubSection.objects.create(order=1, section=world.section_1)
world.question1 = Question.objects.create(text='Name of person in Ministry of Health responsible for completing this form',
UID='C00023', answer_type='Text', instructions="""
List the name of the person responsible for submitting the completed form.
Since multiple departments in the Ministry of Health may have relevant data,
this person should liaise with other departments to ensure that the form
contains the most accurate and complete data possible. For example,
information on Vitamin A may come from the nutrition department.""")
world.question2 = Question.objects.create(text='Position/title', UID='C00024', answer_type='Text',)
world.question3 = Question.objects.create(text='Email address', UID='C00025', answer_type='Text',)
world.question4 = Question.objects.create(text='Name of UNICEF contact', UID='C00026', answer_type='Text',)
world.question5 = Question.objects.create(text='Email address of UNICEF contact', UID='C00027', answer_type='Text',)
world.question6 = Question.objects.create(text='Name of WHO contact', UID='C00028', answer_type='Text',)
world.question7 = Question.objects.create(text='Email address of WHO contact', UID='C00029', answer_type='Text',)
world.question8 = Question.objects.create(text='Total number of districts in the country', UID='C00030', answer_type='Number',
instructions="""
A district is defined as the third administrative level (nation is the first, province is the second).
""")
parent = QuestionGroup.objects.create(subsection=world.sub_section, order=1)
parent.question.add(world.question1, world.question2, world.question3, world.question4, world.question5, world.question6, world.question7, world.question8)
QuestionGroupOrder.objects.create(question=world.question1, question_group=parent, order=1)
QuestionGroupOrder.objects.create(question=world.question2, question_group=parent, order=2)
QuestionGroupOrder.objects.create(question=world.question3, question_group=parent, order=3)
QuestionGroupOrder.objects.create(question=world.question4, question_group=parent, order=4)
QuestionGroupOrder.objects.create(question=world.question5, question_group=parent, order=5)
QuestionGroupOrder.objects.create(question=world.question6, question_group=parent, order=6)
QuestionGroupOrder.objects.create(question=world.question7, question_group=parent, order=7)
QuestionGroupOrder.objects.create(question=world.question8, question_group=parent, order=8)
@step(u'And I navigate to the section of the questionnaire to be filled in')
def and_i_navigate_to_the_section_of_the_questionnaire_to_be_filled_in(step):
world.page = QuestionnairePage(world.browser, world.section_1)
world.page.visit()
@step(u'When I enter valid responses to the questions')
def and_i_enter_valid_responses_to_the_questions(step):
world.responses = {
'Text-0-response': 'James Smith',
'Text-1-response': 'EPI Manager',
'Text-2-response': 'jsmith@moh.gov.ug',
'Text-3-response': 'Angellina Jones',
'Text-4-response': 'ajones@unicef.org',
'Text-5-response': 'Brad Wolfstrom',
'Text-6-response': 'brad.wolfstrom@who.org',
'Number-0-response': '200'}
world.page.fill_form(world.responses)
@step(u'And I click the save button')
def when_i_click_the_save_button(step):
world.page.click_by_id('save_draft_button')
@step(u'Then I should see a message that a draft of my responses has been saved')
def then_i_should_see_a_message_that_a_draft_of_my_responses_has_been_saved(step):
world.page.validate_alert_success()
@step(u'And when I navigate back to this section')
def and_when_i_navigate_back_to_this_section(step):
world.page.visit()
@step(u'I should see my responses filled out')
def i_should_see_my_responses_filled_out(step):
world.page.validate_responses(world.responses)
@step(u'When I enter invalid responses to the questions')
def when_i_enter_invalid_responses_to_the_questions(step):
data = {
'Text-0-response': '',
'Text-1-response': '',
'Text-2-response': '',
'Text-3-response': 'Angellina Jones',
'Text-4-response': 'ajones@unicef.org',
'Text-5-response': 'Brad Wolfstrom',
'Text-6-response': 'brad.wolfstrom@who.org',
'Number-0-response': 'something that is not a number'}
world.page.fill_form(data)
@step(u'Then I should see a save draft error message')
def then_i_should_see_a_save_draft_error_message(step):
world.page.validate_alert_error()
@step(u'And I switch to another section')
def and_i_switch_to_another_section(step):
world.page.click_link_by_partial_href('section/2') | testvidya11/ejrf | questionnaire/features/responses_steps.py | Python | bsd-3-clause | 6,990 | [
"VisIt"
] | a81025ce5027524d2844991e294b5104d582b8940efd28c7209be366b85edcaa |
#!/usr/bin/env python
"""
lib.py
State Estimation and Analysis for PYthon
Library of utilities for general seapy module, imported into the namespace
when importing the seapy module
Written by Brian Powell on 10/18/13
Copyright (c)2017 University of Hawaii under the BSD-License.
"""
import numpy as np
from scipy import ndimage
import os
import re
import datetime
import itertools
secs2day = 1.0 / 86400.0
default_epoch = datetime.datetime(2000, 1, 1)
_default_timeref = "days since " + default_epoch.strftime("%Y-%m-%m %H:%M:%S")
def adddim(fld, size=1):
"""
replicate a field and add a new first dimension with given size
Parameters
----------
fld : array_like
Input field.
size : int, optional
Size of additional first dimension
Returns
-------
fld : array
Examples
--------
>>> a=np.array([4, 5, 6, 7])
>>> a.shape
(4,)
>>> b = seapy.adddim(a, 2)
>>> b.shape
(2, 4)
>>> b
array([[4, 5, 6, 7],
[4, 5, 6, 7]])
"""
fld = np.atleast_1d(fld)
s = np.ones(fld.ndim + 1).astype(int)
s[0] = int(size)
return np.tile(fld, s)
def fill(x, max_gap=None, kind='linear'):
"""
Fill missing data from a 1-D vector. When data are missing from a
vector, this method will interpolate to fill gaps that are less than
the specified max (or ignored).
Parameters
----------
x : array
The array to be filled. It will be cast as a masked array for
invalid values. If already a masked array, then that mask will
persist.
max_gap : int, optional
The maximum number of continuous values to interpolate (e.g.,
if this value is 10 and there are 12 continuous missing values,
they will be left unfilled). Default is to fill everything.
kind : str, optional
The kind of interpolant to use (see scipy.interpolate.interp1d).
Default is 'linear'
Returns
-------
x : array
The filled array
"""
from scipy.interpolate import interp1d
x = np.ma.masked_invalid(np.atleast_1d(x).flatten(), copy=False)
# If no gaps or empty data, do nothing
if not np.any(x.mask) or len(x.compressed()) < 3:
return x
f = interp1d(x.nonzero()[0], x.compressed())
nx = x.copy()
if max_gap is not None:
regions = contiguous(x)
for r in regions:
if ((r.stop - r.start) <= max_gap) and \
(r.stop < f.x.max()) and (r.start > f.x.min()):
nx[r] = f(np.arange(r.start, r.stop))
else:
bad = np.nonzero(x.mask)[0]
bad = np.delete(bad, np.nonzero(
np.logical_or(bad <= f.x.min(), bad >= f.x.max())))
nx[bad] = f(bad)
return nx
def contiguous(x):
"""
Find the indices that provide contiguous regions of a numpy.masked_array.
This will find all regions of valid data. NOTE: this casts as 1-D.
Parameters
----------
x : np.array or np.ma.array
The data to find the contiguous regions
Returns
-------
idx : array of slices
Array of slices for each contiguous region
Examples
--------
>>> a = np.array([4, 3, 2, np.nan, 6, 7, 2])
>>> r = contiguous(a)
[slice(0, 2, None), slice(4, 6, None)]
If no contiguous regions are available, an empty array is returned.
"""
x = np.ma.masked_invalid(np.atleast_1d(x).flatten(), copy=False)
idx = x.nonzero()[0]
try:
d = idx[np.nonzero(np.diff(idx) - 1)[0] + 1]
return np.array([np.s_[r[0]:r[1]] for r in
zip(np.hstack((idx.min(), d)),
np.hstack((d - 1, idx.max() + 1)))])
except:
return []
def chunker(seq, size):
"""
Iterate over an iterable in 'chunks' of a given size
Parameters
----------
seq : iterable,
The sequence to iterate over
size : int,
The number of items to be returned in each 'chunk'
Returns
-------
chunk : seq,
The items of the chunk to be iterated
Examples
--------
>>> x = [0,3,4,7,9,10,12,14]
>>> for i in chunker(x, 3):
>>> print(i)
[0, 3, 4]
[7, 9, 10]
[12, 14]
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def smooth(data, ksize=3, kernel=None, copy=True):
"""
Smooth the data field using a specified convolution kernel
or a default averaging kernel.
Parameters
----------
data : masked array_like
Input field.
ksize : int, optional
Size of square kernel
kernel : ndarray, optional
Define a convolution kernel. Default is averaging
copy : bool, optional
If true, a copy of input array is made
Returns
-------
fld : masked array
"""
fld = np.ma.array(data, copy=copy)
mask = np.ma.getmaskarray(fld).copy()
# Make sure ksize is odd
ksize = int(ksize + 1) if int(ksize) % 2 == 0 else int(ksize)
if fld.ndim > 3 or fld.ndim < 2:
raise AttributeError("Can only convolve 2- or 3-D fields")
if ksize < 3:
raise ValueError("ksize must be greater than or equal to 3")
if kernel is None:
kernel = np.ones((ksize, ksize)) / (ksize * ksize)
else:
ksize = kernel.shape[0]
# First, convole over any masked values
fld = convolve_mask(fld, ksize=ksize, copy=False)
# Next, perform the convolution
if fld.ndim == 2:
fld = ndimage.convolve(fld.data, kernel,
mode="reflect", cval=0.0)
else:
kernel = np.expand_dims(kernel, axis=3)
fld = np.transpose(ndimage.convolve(
fld.filled(0).transpose(1, 2, 0), kernel,
mode="reflect", cval=0.0), (2, 0, 1))
# Apply the initial mask
return np.ma.array(fld, mask=mask)
def convolve(data, ksize=3, kernel=None, copy=True, only_mask=False):
"""
Convolve the kernel across the data to smooth or highlight
the field across the masked region.
Parameters
----------
data : masked array_like
Input field.
ksize : int, optional
Size of square kernel
kernel : ndarray, optional
Define a convolution kernel. Default is averaging
copy : bool, optional
If true, a copy of input array is made
only_mask : bool, optional
If true, only consider the smoothing over the masked
region
Returns
-------
fld : masked array
"""
fld = np.ma.array(data, copy=copy)
if not copy:
fld._sharedmask = False
# Make sure ksize is odd
ksize = int(ksize + 1) if int(ksize) % 2 == 0 else int(ksize)
if fld.ndim > 3 or fld.ndim < 2:
raise AttributeError("Can only convolve 2- or 3-D fields")
if ksize < 3:
raise ValueError("ksize must be greater than or equal to 3")
if kernel is None:
center = np.round(ksize / 2).astype(int)
kernel = np.ones([ksize, ksize])
kernel[center, center] = 0.0
else:
ksize = kernel.shape[0]
# Convolve the mask
msk = np.ma.getmaskarray(fld)
if fld.ndim == 2:
count = ndimage.convolve((~msk).view(np.int8), kernel,
mode="constant", cval=0.0)
nfld = ndimage.convolve(fld.data * (~msk).view(np.int8), kernel,
mode="constant", cval=0.0)
else:
kernel = np.expand_dims(kernel, axis=3)
count = np.transpose(ndimage.convolve(
(~msk).view(np.int8).transpose(1, 2, 0), kernel,
mode="constant", cval=0.0), (2, 0, 1))
nfld = np.transpose(ndimage.convolve(
(fld.data * (~msk).view(np.int8)).transpose(1, 2, 0), kernel,
mode="constant", cval=0.0), (2, 0, 1))
if only_mask:
lst = np.nonzero(np.logical_and(msk, count > 0))
fld[lst] = np.ma.nomask
fld[lst] = nfld[lst] / count[lst]
else:
lst = np.nonzero(~msk)
fld[lst] = nfld[lst] / count[lst]
return fld
def convolve_mask(data, ksize=3, kernel=None, copy=True):
"""
Convolve data over the missing regions of a mask
Parameters
----------
data : masked array_like
Input field.
ksize : int, optional
Size of square kernel
kernel : ndarray, optional
Define a convolution kernel. Default is averaging
copy : bool, optional
If true, a copy of input array is made
Returns
-------
fld : masked array
"""
return convolve(data, ksize, kernel, copy, True)
def matlab2date(daynum):
"""
Given a day number from matlab, convert into a datetime
Parameters
----------
daynum: float
Scalar or array of matlab day numbers
Returns
-------
datetime : list
"""
daynum = np.atleast_1d(daynum)
return [datetime.datetime.fromordinal(d.astype(np.int)) +
datetime.timedelta(days=(d % 1 - 366)) for d in daynum]
def date2day(date=default_epoch, epoch=default_epoch):
"""
Compute the fractional number of days elapsed since the epoch to the date
given.
Parameters
----------
date : datetime
Input date
epoch : datetime
Date of epoch
Returns
-------
numdays : list
"""
date = np.atleast_1d(date)
return [(t - epoch).total_seconds() * secs2day for t in date]
def day2date(day=0, epoch=default_epoch):
"""
Return a datetime object from the number of days since the epoch
Parameters
----------
day : scalar
Input day number
epoch : datetime
Date of epoch
Returns
-------
date : list of datetime(s)
"""
day = np.atleast_1d(day)
return [epoch + datetime.timedelta(days=float(t)) for t in day]
def _distq(lon1, lat1, lon2, lat2):
"""
Compute the geodesic distance between lat/lon points. This code is
taken from the dist.f routine and the Matlab version distg.m passed
around WHOI and APL. This was stripped down to use the WGS84 ellipsoid.
Parameters
----------
lon1 : array_like or scalar
Input array of source longitude(s)
lat1 : array_like or scalar
Input array of source latitude(s)
lon2 : array_like or scalar
Input array of destination longitude(s)
lat2 : array_like or scalar
Input array of destination latitude(s)
Returns
-------
distance : array or scalar of distance in meters
angle: array or scalar of angle in radians
"""
lon1 = np.asanyarray(np.radians(lon1))
lat1 = np.asanyarray(np.radians(lat1))
lon2 = np.asanyarray(np.radians(lon2))
lat2 = np.asanyarray(np.radians(lat2))
# Set the WGS84 parameters
A = 6378137.
E = 0.081819191
B = np.sqrt(A * A - (A * E)**2)
EPS = E * E / (1.0 - E * E)
# Move any latitudes off of the equator
lat1[lat1 == 0] = np.finfo(float).eps
lat2[lat2 == 0] = -np.finfo(float).eps
# COMPUTE THE RADIUS OF CURVATURE IN THE PRIME VERTICAL FOR EACH POINT
xnu1 = A / np.sqrt(1.0 - (E * np.sin(lat1))**2)
xnu2 = A / np.sqrt(1.0 - (E * np.sin(lat2))**2)
TPSI2 = (1.0 - E * E) * np.tan(lat2) + E * E * xnu1 * np.sin(lat1) / \
(xnu2 * np.cos(lat2))
PSI2 = np.arctan(TPSI2)
DPHI2 = lat2 - PSI2
DLAM = (lon2 - lon1) + np.finfo(float).eps
CTA12 = np.sin(DLAM) / (np.cos(lat1) * TPSI2 - np.sin(lat1) * np.cos(DLAM))
A12 = np.arctan(CTA12)
CTA21P = np.sin(DLAM) / (np.sin(PSI2) * np.cos(DLAM) -
np.cos(PSI2) * np.tan(lat1))
A21P = np.arctan(CTA21P)
# C GET THE QUADRANT RIGHT
DLAM2 = (np.abs(DLAM) < np.pi).astype(int) * DLAM + \
(DLAM >= np.pi).astype(int) * (-2 * np.pi + DLAM) + \
(DLAM <= -np.pi).astype(int) * (2 * np.pi + DLAM)
A12 = A12 + (A12 < -np.pi).astype(int) * 2 * np.pi - \
(A12 >= np.pi).astype(int) * 2 * np.pi
A12 = A12 + np.pi * np.sign(-A12) * \
(np.sign(A12).astype(int) != np.sign(DLAM2))
A21P = A21P + (A21P < -np.pi).astype(int) * 2 * np.pi - \
(A21P >= np.pi).astype(int) * 2 * np.pi
A21P = A21P + np.pi * np.sign(-A21P) * \
(np.sign(A21P).astype(int) != np.sign(-DLAM2))
SSIG = np.sin(DLAM) * np.cos(PSI2) / np.sin(A12)
dd1 = np.array([np.cos(lon1) * np.cos(lat1),
np.sin(lon1) * np.cos(lat1), np.sin(lat1)])
dd2 = np.array([np.cos(lon2) * np.cos(lat2),
np.sin(lon2) * np.cos(lat2), np.sin(lat2)])
dd2 = np.sum((dd2 - dd1)**2, axis=0)
bigbrnch = (dd2 > 2).astype(int)
SIG = np.arcsin(SSIG) * (bigbrnch == 0).astype(int) + \
(np.pi - np.arcsin(SSIG)) * bigbrnch
SSIGC = -np.sin(DLAM) * np.cos(lat1) / np.sin(A21P)
SIGC = np.arcsin(SSIGC)
A21 = A21P - DPHI2 * np.sin(A21P) * np.tan(SIG / 2.0)
# C COMPUTE RANGE
G2 = EPS * (np.sin(lat1))**2
G = np.sqrt(G2)
H2 = EPS * (np.cos(lat1) * np.cos(A12))**2
H = np.sqrt(H2)
SIG2 = SIG * SIG
TERM1 = -H2 * (1.0 - H2) / 6.0
TERM2 = G * H * (1.0 - 2.0 * H2) / 8.0
TERM3 = (H2 * (4.0 - 7.0 * H2) - 3.0 * G2 * (1.0 - 7.0 * H2)) / 120.0
TERM4 = -G * H / 48.0
rng = xnu1 * SIG * (1.0 + SIG2 * (TERM1 + SIG * TERM2 + SIG2 * TERM3 +
SIG2 * SIG * TERM4))
return rng, A12
def earth_distance(lon1, lat1, lon2, lat2):
"""
Compute the geodesic distance between lat/lon points.
Parameters
----------
lon1 : array_like or scalar
Input array of source longitude(s)
lat1 : array_like or scalar
Input array of source latitude(s)
lon2 : array_like or scalar
Input array of destination longitude(s)
lat2 : array_like or scalar
Input array of destination latitude(s)
Returns
-------
distance : array or scalar of distance in meters
"""
rng, _ = _distq(lon1, lat1, lon2, lat2)
return rng
def earth_angle(lon1, lat1, lon2, lat2):
"""
Compute the angle between lat/lon points. NOTE: The bearing angle
is computed, but then converted to geometric (counter-clockwise)
angle to be returned.
Parameters
----------
lon1 : array_like or scalar
Input array of source longitude(s)
lat1 : array_like or scalar
Input array of source latitude(s)
lon2 : array_like or scalar
Input array of destination longitude(s)
lat2 : array_like or scalar
Input array of destination latitude(s)
Returns
-------
angle : array or scalar of bearing in radians
"""
_, angle = _distq(lon1, lat1, lon2, lat2)
return (np.pi / 2.0 - angle)
def flatten(l, ltypes=(list, tuple, set)):
"""
Flatten a list or tuple that contains additional lists or tuples. Like
the numpy flatten, but for python types.
Parameters
----------
l: tuple or list,
The data that is to be flattened
ltypes: tuple,
Data types to attempt to flatten
Returns
-------
list
See Also
--------
numpy.flatten()
Notes
-----
This code was taken from:
<http://rightfootin.blogspot.com.au/2006/09/more-on-python-flatten.html>
Examples
--------
>>> a=[[1,3,4,1], ('test', 'this'), [5,2]]
>>> flatten(a)
[1, 3, 4, 1, 'test', 'this', 5, 2]
"""
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def list_files(path=".", regex=None, full_path=True):
"""
list all sorted file names in the given path that conform to the regular
expression pattern. This is not a generator function because it sorts
the files in alphabetic/numeric order.
Parameters
----------
path : string
Search for the given matches
regex : string, optional
Input regular expression string to filter filenames
full_path : bool, optional
If True, return the full path for each found object. If false,
return only the filename
Returns
-------
files : array
Examples
--------
>>> files = seapy.list_files('/path/to/dir/test_.*txt')
>>> print(files)
['/path/to/dir/test_001.txt', '/path/to/dir/test_002.txt']
NOTE: this is equivalent for separating:
>>> files = seapy.list_files('/path/to/dir', 'test_.*txt')
"""
# If only one parameter is given, parse into its components
if regex is None:
regex = os.path.basename(path)
path = os.path.dirname(path)
if not path:
path = './'
elif path[-1] != '/':
path += '/'
files = []
prog = re.compile(regex)
for file in os.listdir(path):
if prog.search(file) is not None:
if full_path:
files.append(path + file)
else:
files.append(file)
files.sort()
return files
def netcdf(file, aggdim=None):
"""
Wrapper around netCDF4 to open a file as either a Dataset or an
MFDataset.
Parameters
----------
file : string or list,
Filename(s) to open. If the string has wildcards or is a list,
this attempts to open an MFDataset
aggdim : string,
Name of dimension to concatenate along if loading a set of files.
A value of None (default) uses the unlimited dimension.
Returns
-------
netCDF4 Dataset or MFDataset
"""
import netCDF4
try:
nc = netCDF4.Dataset(file)
except (OSError, RuntimeError):
try:
nc = netCDF4.MFDataset(file, aggdim=aggdim)
except IndexError:
raise FileNotFoundError("{:s} cannot be found.".format(file))
return nc
def primes(number):
"""
Return a list of primes less than or equal to a given value.
Parameters
----------
number : int
Find prime values up to this value
Returns
-------
primes : ndarray
Notes
-----
This code was taken from "Cooking with Python, Part 2" by Martelli, et al.
<http://archive.oreilly.com/pub/a/python/excerpt/pythonckbk_chap1/index1.html?page=last>
"""
def __erat2():
D = {}
yield 2
for q in itertools.islice(itertools.count(3), 0, None, 2):
p = D.pop(q, None)
if p is None:
D[q * q] = q
yield q
else:
x = p + q
while x in D or not (x & 1):
x += p
D[x] = p
return np.array(list(itertools.takewhile(lambda p: p < number, __erat2())))
def rotate(u, v, angle):
"""
Rotate a vector field by the given angle
Parameters
----------
u : array like
Input u component
v : array like
Input v component
angle : array like
Input angle of rotation in radians
Returns
-------
rotated_u, rotated_v : array
"""
u = np.asanyarray(u)
v = np.asanyarray(v)
angle = np.asanyarray(angle)
sa = np.sin(angle)
ca = np.cos(angle)
return u * ca - v * sa, u * sa + v * ca
def today2day(epoch=default_epoch):
"""
Return the day number of today (UTC time) since the epoch.
Parameters
----------
epoch : datetime
Date of epoch
Returns
-------
numdays : scalar
"""
return date2day(datetime.datetime.utcnow(), epoch)
def unique_rows(x):
"""
Convert rows into godelnumbers and find the rows that are unique using
np.unique
Parameters
----------
x : ndarray or tuple,
array of elements to find unique value. If columns are greater
than 1, then the columns are combined into a single Godel number.
If a tuple of arrays are passed, they are combined.
Returns
-------
idx : ndarray,
Indices of the unique values
Examples
--------
>>> a = np.array([3, 3, 5, 5, 6])
>>> b = np.array([2, 3, 3, 3, 3])
>>> idx = unique_rows((a, b))
>>> idx
array([0, 1, 2, 4])
"""
if isinstance(x, tuple):
x = np.vstack(x).T
else:
x = np.atleast_1d(x)
vals, idx = np.unique(godelnumber(x), return_index=True)
return idx
def vecfind(a, b, tolerance=None):
"""
Find all occurences of b in a within the given tolerance and return
the sorted indices of a and b that yield the corresponding values.
The indices are of equal length, such that
Written by Eric Firing, University of Hawaii.
Parameters
----------
a : array
Input vector
b : array
Input vector
tolerance : same type as stored values of a and b, optional
Input tolerance for how close a is to b. If not specified,
then elements of a and b must be equal.
Returns
-------
index_a, index_b : arrays of indices for each vector where values are equal,
such that a[index_a] == b[index_b]
Examples
--------
>>> a = np.array([3,4,1,8,9])
>>> b = np.array([4,7,1])
>>> ia, ib = vecfind(a, b)
By definition,
>>> len(ia) == len(ib)
True
>>> a[ia] == b[ib]
True
"""
a = np.asanyarray(a).flatten()
b = np.asanyarray(b).flatten()
# if no tolerance, compute a zero distance the proper type
if tolerance is None:
tolerance = a[0] - a[0]
_, uniq_a = np.unique(a, return_index=True)
_, uniq_b = np.unique(b, return_index=True)
na = len(uniq_a)
t = np.hstack((a[uniq_a], b[uniq_b]))
is_a = np.zeros(t.shape, dtype=np.int8)
is_a[:na] = 1
isorted = np.argsort(t)
tsorted = t[isorted]
is_a_sorted = is_a[isorted]
dt = np.diff(tsorted)
mixed = np.abs(np.diff(is_a_sorted)) == 1
ipair = np.nonzero((np.abs(dt) <= tolerance) & mixed)[0]
# Now ipair should be the indices of the first elements
# of consecutive pairs in tsorted for which the two items
# are from different arrays, and differ by less than tolerance.
# The problem is that they could be in either order.
iswap = np.nonzero(is_a_sorted[ipair] == 0)[0] # b is first, so swap
temp = isorted[ipair[iswap] + 1]
isorted[ipair[iswap] + 1] = isorted[ipair[iswap]]
isorted[ipair[iswap]] = temp
isorted_a = isorted[ipair]
isorted_b = isorted[ipair + 1] - na
return uniq_a[isorted_a], uniq_b[isorted_b]
def godelnumber(x):
"""
Convert the columns of x into godel numbers. If x is MxN, return an Mx1
vector. The Godel number is prime**x
Parameters
----------
x : ndarray,
Values to convert into Godel number(s)
Returns
-------
godel : ndarray
"""
x = np.atleast_2d(x.astype(int))
if x.ndim > 1:
primevals = primes(x.shape[1] * 10)[:x.shape[1]].astype(float)
return(np.prod(primevals**x, axis=1))
else:
return 2.0**x
pass
| dalepartridge/seapy | lib.py | Python | mit | 22,993 | [
"Brian",
"NetCDF"
] | eb96c6637ba13fe68789273ee34c0d5088ef8578e5264445d50601a87445b256 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
cc_plugin_ncei/ncei_trajectory_profile.py
'''
from compliance_checker.base import BaseCheck
from cc_plugin_ncei.ncei_base import TestCtx, NCEI1_1Check, NCEI2_0Check
from cc_plugin_ncei import util
from isodate import parse_duration
class NCEITrajectoryProfileOrthogonalBase(BaseCheck):
_cc_spec = 'ncei-trajectory-profile-orthogonal'
valid_feature_types = [
'trajectory',
'trajectory_id'
]
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consitent with a trajectory profile orthogonal dataset
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are trajectory profile orthogonal feature types')
message = '{} must be a valid trajectory profile orthogonal feature type. It must have dimensions of (trajectory, obs, z).'
message += ' Also, x, y, and t must have dimensions (trajectory, obs). z must be a coordinate variable with dimensions (z).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_trajectory_profile_orthogonal(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results
def check_trajectory_id(self, dataset):
'''
Checks that if a variable exists for the trajectory id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "trajectory_id" exists')
trajectory_ids = dataset.get_variables_by_attributes(cf_role='trajectory_id')
# No need to check
exists_ctx.assert_true(trajectory_ids, 'variable defining cf_role="trajectory_id" exists')
if not trajectory_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(trajectory_ids[0].name))
test_ctx.assert_true(
getattr(trajectory_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results
class NCEITrajectoryProfileOrthogonal1_1(NCEI1_1Check, NCEITrajectoryProfileOrthogonalBase):
register_checker = True
_cc_spec_version = '1.1'
_cc_description = (
'This test checks the selected file against the NCEI netCDF trajectoryProfile Orthogonal '
'template version 1.1 (found at https://www.nodc.noaa.gov/data/formats/netcdf/v1.1/'
'trajectoryProfileOrtho.cdl). The NCEI version 1.1 templates are based on “feature types”'
', as identified by Unidata and CF, and conform to ACDD version 1.0 and CF version 1.6. '
'You can find more information about the version 1.1 templates at https://www.nodc.noaa.'
'gov/data/formats/netcdf/v1.1/. This test is specifically for the trajectoryProfile '
'feature type in an Orthogonal multidimensional array representation, which is typically '
'used for a series of profile features located at points ordered along a trajectory and '
'all data points have the exact same depth values.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/necdf/v1.1/trajectoryProfileIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.1.0'
valid_templates = [
"NODC_NetCDF_TrajectoryProfile_Orthogonal_Template_v1.1"
]
@classmethod
def beliefs(cls):
'''
Not applicable for gliders
'''
return {}
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Trajectory Profile orthogonal dataset')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Trajectory',
'cdm_data_type attribute must be set to Trajectory'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'trajectoryProfile',
'featureType attribute must be set to trajectoryProfile'
)
results.append(required_ctx.to_result())
return results
class NCEITrajectoryProfileOrthogonal2_0(NCEI2_0Check, NCEITrajectoryProfileOrthogonalBase):
register_checker = True
_cc_spec_version = '2.0'
_cc_description = (
'This test checks the selected file against the NCEI netCDF trajectoryProfile Orthogonal '
'template version 2.0 (found at https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/'
'trajectoryProfileOrtho.cdl). The NCEI version 2.0 templates are based on “feature types”'
', as identified by Unidata and CF, and conform to ACDD version 1.3 and CF version 1.6. '
'You can find more information about the version 2.0 templates at https://www.nodc.noaa.'
'gov/data/formats/netcdf/v2.0/. This test is specifically for the trajectoryProfile '
'feature type in an Orthogonal multidimensional array representation, which is typically '
'used for a series of profile features located at points ordered along a trajectory and '
'all data points have the exact same depth values.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/necdf/v2.0/trajectoryProfileIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.3.0'
valid_templates = [
"NCEI_NetCDF_TrajectoryProfile_Orthogonal_Template_v2.0"
]
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Trajectory Profile orthogonal dataset')
required_ctx.assert_true(
getattr(dataset, 'ncei_template_version', '').lower() == self.valid_templates[0].lower(),
'ncei_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Trajectory',
'cdm_data_type attribute must be set to Trajectory'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'trajectoryProfile',
'featureType attribute must be set to trajectoryProfile'
)
results.append(required_ctx.to_result())
return results
def check_recommended_attributes(self, dataset):
'''
Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Check time_coverage_duration and resolution
for attr in ['time_coverage_duration', 'time_coverage_resolution']:
attr_value = getattr(dataset, attr, '')
try:
parse_duration(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except Exception:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
results.append(recommended_ctx.to_result())
return results
class NCEITrajectoryProfileIncompleteBase(BaseCheck):
_cc_spec = 'ncei-trajectory-profile-incomplete'
valid_feature_types = [
'trajectory',
'trajectory_id'
]
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consitent with a trajectory profile incomplete dataset
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are trajectory profile incomplete feature types')
message = '{} must be a valid trajectory profile incomplete feature type. It and z must have dimensions of (trajectory, obs, nzMax).'
message += ' Also, x, y, and t must have dimensions (trajectory, obs).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_trajectory_profile_incomplete(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results
def check_trajectory_id(self, dataset):
'''
Checks that if a variable exists for the trajectory id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "trajectory_id" exists')
trajectory_ids = dataset.get_variables_by_attributes(cf_role='trajectory_id')
# No need to check
exists_ctx.assert_true(trajectory_ids, 'variable defining cf_role="trajectory_id" exists')
if not trajectory_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(trajectory_ids[0].name))
test_ctx.assert_true(
getattr(trajectory_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results
class NCEITrajectoryProfileIncomplete1_1(NCEI1_1Check, NCEITrajectoryProfileIncompleteBase):
register_checker = True
_cc_spec_version = '1.1'
_cc_description = (
'This test checks the selected file against the NCEI netCDF trajectoryProfile '
'Incomplete template version 1.1 (found at https://www.nodc.noaa.gov/data/formats/'
'netcdf/v1.1/trajectoryProfileIncom.cdl). The NCEI version 1.1 templates are based '
'on “feature types”, as identified by Unidata and CF, and conform to ACDD version 1.0'
' and CF version 1.6. You can find more information about the version 1.1 templates at '
'https://www.nodc.noaa.gov/data/formats/netcdf/v1.1/. This test is specifically for the '
'trajectoryProfile feature type in an Incomplete multidimensional array representation, '
'which is typically used for a series of profile features located at points ordered along '
'a trajectory and all data points do not have the exact same number of elements.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/necdf/v1.1/trajectoryProfileIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.1.0'
valid_templates = [
"NODC_NetCDF_TrajectoryProfile_Incomplete_Template_v1.1"
]
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Trajectory Profile incomplete dataset')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Trajectory',
'cdm_data_type attribute must be set to Trajectory'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'trajectoryProfile',
'featureType attribute must be set to trajectoryProfile'
)
results.append(required_ctx.to_result())
return results
class NCEITrajectoryProfileIncomplete2_0(NCEI2_0Check, NCEITrajectoryProfileIncompleteBase):
register_checker = True
_cc_spec_version = '2.0'
_cc_description = (
'This test checks the selected file against the NCEI netCDF trajectoryProfile '
'Incomplete template version 2.0 (found at https://www.nodc.noaa.gov/data/formats/'
'netcdf/v2.0/trajectoryProfileIncom.cdl). The NCEI version 2.0 templates are based '
'on “feature types”, as identified by Unidata and CF, and conform to ACDD version 1.3'
' and CF version 1.6. You can find more information about the version 2.0 templates at '
'https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/. This test is specifically for the '
'trajectoryProfile feature type in an Incomplete multidimensional array representation, '
'which is typically used for a series of profile features located at points ordered along '
'a trajectory and all data points do not have the exact same number of elements.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/necdf/v2.0/trajectoryProfileIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.3.0'
valid_templates = [
"NCEI_NetCDF_TrajectoryProfile_Incomplete_Template_v2.0"
]
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Trajectory Profile incomplete dataset')
required_ctx.assert_true(
getattr(dataset, 'ncei_template_version', '').lower() == self.valid_templates[0].lower(),
'ncei_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Trajectory',
'cdm_data_type attribute must be set to Trajectory'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'trajectoryProfile',
'featureType attribute must be set to trajectoryProfile'
)
results.append(required_ctx.to_result())
return results
def check_recommended_attributes(self, dataset):
'''
Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Check time_coverage_duration and resolution
for attr in ['time_coverage_duration', 'time_coverage_resolution']:
attr_value = getattr(dataset, attr, '')
try:
parse_duration(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except Exception:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
results.append(recommended_ctx.to_result())
return results
| ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_trajectory_profile.py | Python | apache-2.0 | 15,889 | [
"NetCDF"
] | 250181bc74add83a9cd0fbe6080342ff878db2cd1b88791998f3b67583d5f861 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
# TODO: rethink this function: should it just iterate
# over xs and ps (cumulative probabilities) and not compute
# differences?
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def MakeBinomialPmf(n, p):
"""Evaluates the binomial PMF.
Returns the distribution of successes in n trials with probability p.
"""
pmf = Pmf()
for k in range(n+1):
pmf[k] = stats.binom.pmf(k, n, p)
return pmf
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
The result is a little funny, because the values at 0 and 1
are not symmetric. Nevertheless, it is a reasonable discrete
model of the continuous distribution, and behaves well as
the number of values increases.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = special.betainc(self.alpha, self.beta, xs)
cdf = Cdf(xs, ps)
return cdf
def Percentile(self, ps):
"""Returns the given percentiles from this distribution.
ps: scalar, array, or list of [0-100]
"""
ps = np.asarray(ps) / 100
xs = special.betaincinv(self.alpha, self.beta, ps)
return xs
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.normal(0, jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column].copy()
weights /= sum(weights)
indices = np.random.choice(df.index, len(df), replace=True, p=weights)
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| qrsforever/workspace | python/learn/thinkbayes/thinkstats2.py | Python | mit | 70,069 | [
"Gaussian"
] | 042f9405db46ca42d3d18d26e4424f4edbe55d0487c1579a8c1bdeadabebea4e |
##
# title: BreadInterface.Prototype.py
# by: Brian Kim
# description: the main BreadInterface app that help end-developers design their
# button layouts
#
import pygtk
pygtk.require('2.0')
import gtk
from App import App
from Controller import Controller
from Lifecycle import Lifecycle
from BreadInterface.views.UnicodePickerView import UnicodePickerView
#
# a function that will generate a pygtk BI Controller
#
def generate_code( info ):
# gogogogo
y = '#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'
y += '##\n# @file %s.py\n# @author <your name here>\n# @brief <Controller description>\n#\n\n' % info['title']
y += 'import pygtk\npygtk.require(\'2.0\')\nimport gtk\n\n'
y += 'from BreadInterface import *\n\n'
y += 'class %s( Controller ):\n' % info['title']
y += ' class MyView( Lifecycle, gtk.HBox ):\n'
y += ' def __init__( self ):\n'
y += ' gtk.HBox.__init__( self )\n'
y += ' #\n # Button Labels\n #\n'
y += ' def tl_label( self ):\n return \'%s\'\n' % info['tl_label']
y += ' def tr_label( self ):\n return \'%s\'\n' % info['tr_label']
y += ' def bl_label( self ):\n return \'%s\'\n' % info['bl_label']
y += ' def bm_label( self ):\n return \'%s\'\n' % info['bm_label']
y += ' def br_label( self ):\n return \'%s\'\n' % info['br_label']
y += '\n'
y += ' #\n # Button Clicks\n #\n'
y += ' def tl_clicked( self, b ):\n # %s\n pass\n' % info['tl']
y += ' def tr_clicked( self, b ):\n # %s\n pass\n' % info['tr']
y += ' def bl_clicked( self, b ):\n # %s\n pass\n' % info['bl']
y += ' def bm_clicked( self, b ):\n # %s\n pass\n' % info['bm']
y += ' def br_clicked( self, b ):\n # %s\n pass\n' % info['br']
y += '\n'
y += ' #\n # Button Descriptions\n #\n'
y += ' def tl_info( self ):\n return \'%s\'\n' % info['tl']
y += ' def tr_info( self ):\n return \'%s\'\n' % info['tr']
y += ' def bl_info( self ):\n return \'%s\'\n' % info['bl']
y += ' def bm_info( self ):\n return \'%s\'\n' % info['bm']
y += ' def br_info( self ):\n return \'%s\'\n\n' % info['br']
y += ' #\n # Lifecycle methods\n #\n'
y += ' def __init__( self ):\n Controller.__init__( self, view=%s.MyView(), title=\'%s\' )\n' % (info['title'],info['title'])
y += ' def start( self ):\n Controller.start(self)\n'
y += ' def update( self ):\n Controller.update(self)\n'
y += ' def stop( self ):\n Controller.stop(self)\n'
y += ' def cleanup( self ):\n Controller.cleanup(self)\n\n'
y += 'if __name__ == \"__main__\":\n'
y += ' App(root=%s()).start()' % info['title']
return y
class PrototypeController( Controller ):
#
# breadinterface buttons definition
#
def tl_label( self ):
return self._tl_label
def tm_label( self ):
return self._tm_label
def tr_label( self ):
return self._tr_label
def bl_label( self ):
return self._bl_label
def bm_label( self ):
return self._bm_label
def br_label( self ):
return self._br_label
#
# button clicks
def tl_clicked( self, widget ):
self._tl_label = self.view.entry.get_text()
self.update()
def tm_clicked( self, widget ):
Controller.tm_clicked( self, widget )
self._tm_label = self.view.entry.get_text()
self.update()
def tr_clicked( self, widget ):
self._tr_label = self.view.entry.get_text()
self.update()
def bl_clicked( self, widget ):
self._bl_label = self.view.entry.get_text()
self.update()
def bm_clicked( self, widget ):
self._bm_label = self.view.entry.get_text()
self.update()
def br_clicked( self, widget ):
self._br_label = self.view.entry.get_text()
self.update()
#
# button information
def tl_info( self ):
return 'change button label to text field value'
def tr_info( self ):
return 'change button label to text field value'
def bl_info( self ):
return 'change button label to text field value'
def bm_info( self ):
return 'change button label to text field value'
def br_info( self ):
return 'change button label to text field value'
def update( self ):
Controller.update(self)
self.view.clear()
#
# generate code (prototype view delegate)
def generate_code( self, info ):
if len(info['title']) == 0:
info['title'] = "MyController"
info['tl_label'] = self._tl_label
info['tr_label'] = self._tr_label
info['bl_label'] = self._bl_label
info['bm_label'] = self._bm_label
info['br_label'] = self._br_label
y = generate_code( info )
with open('%s.py'%info['title'],'w') as fp:
fp.write(y)
fp.close()
msg = gtk.MessageDialog(type=gtk.MESSAGE_INFO,flags=gtk.DIALOG_MODAL,buttons=gtk.BUTTONS_OK)
msg.set_markup('The file \'%s.py\' has been successfully created'%info['title'])
msg.run()
self.nav.stop()
##
# custom constructor
#
def __init__( self ):
# ui
self._tl_label = "tl"
self._tm_label = "tm"
self._tr_label = "tr"
self._bl_label = "bl"
self._bm_label = "bm"
self._br_label = "br"
Controller.__init__( self, view=PrototypeController.PrototypeView(self) )
##
# custom view class definition
#
class PrototypeView( gtk.VBox, Lifecycle, UnicodePickerView.Delegate ):
class Delegate():
def generate_code( self, info ):
pass
def gen1_clicked( self, view ):
self.remove(self.vbox1)
self.add(self.vbox2)
# such a hack...
self.title_entry.set_text( self.delegate._tm_label )
def go2gen1( self, view ):
self.remove(self.vbox2)
self.add(self.vbox1)
def gen2_clicked( self, view ):
x = {}
x['title'] = self.title_entry.get_text()
x['tl'] = self.tl_entry.get_text()
x['tr'] = self.tr_entry.get_text()
x['bl'] = self.bl_entry.get_text()
x['bm'] = self.bm_entry.get_text()
x['br'] = self.br_entry.get_text()
self.delegate.generate_code( x )
def uni_clicked( self, view ):
# swap the views
self.remove( self.vbox1 )
self.add( self.unicode_view )
#
# unicode picker delegate
def did_select_unicode( self, val ):
old = self.entry.get_text()
old += val
self.entry.set_text(old)
# swap the views
self.remove(self.unicode_view)
self.add(self.vbox1)
def __init__( self, delegate=Delegate() ):
gtk.VBox.__init__( self, 25 )
self.set_border_width( 25 )
#
# model
self.delegate = delegate
self.instr_s = PrototypeController.PrototypeView.instructions()
self.instr_buf = gtk.TextBuffer()
self.instr_buf.set_text(self.instr_s)
#
# ui
self.vbox1 = gtk.VBox()
#
# vbox1
# top: instructions
# middle: entry
# bottom: generate code+unicode picker
#
#
# top
self.instr = gtk.TextView( self.instr_buf )
self.instr.set_editable( False )
self.instr.set_wrap_mode( gtk.WRAP_WORD )
#
# middle
hbox = gtk.HBox()
label = gtk.Label("Desired Button Label: ")
self.entry = gtk.Entry()
hbox.pack_start( label, False )
hbox.pack_start( self.entry )
#
# bottom
self.gen1button = gtk.Button( "Generate Code" )
self.unibutton = gtk.Button( "Unicode Picker" )
# hook up button clicks
self.gen1button.connect( "clicked", self.gen1_clicked )
self.unibutton.connect( "clicked", self.uni_clicked )
# add components to vbox
self.vbox1.pack_start( self.instr, False )
self.vbox1.pack_start( hbox )
self.vbox1.pack_start( self.unibutton, False )
self.vbox1.pack_start( self.gen1button, False )
self.vbox1.show()
self.add( self.vbox1 )
#
# vbox2
# name field
# button layout info field
#
self.vbox2 = gtk.VBox()
hbox1 = gtk.HBox()
# title
title_label = gtk.Label( 'Controller Title: ' )
self.title_entry = gtk.Entry()
hbox1.pack_start(title_label,False)
hbox1.pack_start(self.title_entry)
# tl
hbox2 = gtk.HBox()
tl_label = gtk.Label( 'Top-Left Description: ' )
self.tl_entry = gtk.Entry()
hbox2.pack_start(tl_label,False)
hbox2.pack_start(self.tl_entry)
# tr
hbox3 = gtk.HBox()
tr_label = gtk.Label( 'Top-Right Description: ' )
self.tr_entry = gtk.Entry()
hbox3.pack_start(tr_label,False)
hbox3.pack_start(self.tr_entry)
# bl
hbox4 = gtk.HBox()
bl_label = gtk.Label( 'Bottom-Left Description: ' )
self.bl_entry = gtk.Entry()
hbox4.pack_start(bl_label,False)
hbox4.pack_start(self.bl_entry)
# bm
hbox5 = gtk.HBox()
bm_label = gtk.Label( 'Bottom-Middle Description: ' )
self.bm_entry = gtk.Entry()
hbox5.pack_start(bm_label,False)
hbox5.pack_start(self.bm_entry)
# br
hbox6 = gtk.HBox()
br_label = gtk.Label( 'Bottom-Right Description: ' )
self.br_entry = gtk.Entry()
hbox6.pack_start(br_label,False)
hbox6.pack_start(self.br_entry)
# buttons
self.back = gtk.Button('Go Back')
self.gen2button = gtk.Button('Generate Code')
# hook em up
self.back.connect( 'clicked', self.go2gen1 )
self.gen2button.connect( 'clicked', self.gen2_clicked )
self.vbox2.pack_start( hbox1 )
self.vbox2.pack_start( hbox2 )
self.vbox2.pack_start( hbox3 )
self.vbox2.pack_start( hbox4 )
self.vbox2.pack_start( hbox5 )
self.vbox2.pack_start( hbox6 )
self.vbox2.pack_start( self.back, False )
self.vbox2.pack_start( self.gen2button, False )
self.vbox2.show_all()
#
# embed the unicode picker in the view
self.unicode_view = UnicodePickerView(self)
self.unicode_view.show_all()
def update( self ):
pass
def clear( self ):
self.entry.set_text("")
@staticmethod
def instructions():
y = 'Instructions:\n'
y += '- insert text in the \'Desired Button Label\' field below and click a corner button to fill with that text.\n'
y += '- clicking the \'Unicode Picker\' button will display a table of clickable Unicode characters\n'
y += '- clicking the \'Generate Code\' button will display an interface to create a template Controller class '
y += 'with the specified button labels'
return y
def main():
app = App(root=PrototypeController(),dim=(800,600))
app.start()
if __name__ == "__main__":
main()
| briansan/BreadInterface | py/BreadInterface/Prototype.py | Python | bsd-2-clause | 10,927 | [
"Brian"
] | 39edff96cc6a66b2a0f9edede47d37fe5399ce1c2b385ef0cd8a2af820f76d0a |
"""
Acceptance tests for Studio.
"""
from unittest import skip
from bok_choy.web_app_test import WebAppTest
from nose.plugins.attrib import attr
from ..pages.studio.asset_index import AssetIndexPage
from ..pages.studio.auto_auth import AutoAuthPage
from ..pages.studio.checklists import ChecklistsPage
from ..pages.studio.course_import import ImportPage
from ..pages.studio.course_info import CourseUpdatesPage
from ..pages.studio.edit_tabs import PagesPage
from ..pages.studio.export import ExportPage
from ..pages.studio.howitworks import HowitworksPage
from ..pages.studio.index import DashboardPage
from ..pages.studio.login import LoginPage
from ..pages.studio.manage_users import CourseTeamPage
from ..pages.studio.overview import CourseOutlinePage
from ..pages.studio.settings import SettingsPage
from ..pages.studio.settings_advanced import AdvancedSettingsPage
from ..pages.studio.settings_graders import GradingPage
from ..pages.studio.signup import SignupPage
from ..pages.studio.textbooks import TextbooksPage
from ..fixtures.course import XBlockFixtureDesc
from acceptance.tests.base_studio_test import StudioCourseTest
@attr('shard_1')
class LoggedOutTest(WebAppTest):
"""
Smoke test for pages in Studio that are visible when logged out.
"""
def setUp(self):
super(LoggedOutTest, self).setUp()
self.pages = [LoginPage(self.browser), HowitworksPage(self.browser), SignupPage(self.browser)]
def test_page_existence(self):
"""
Make sure that all the pages are accessible.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
for page in self.pages:
page.visit()
@attr('shard_1')
class LoggedInPagesTest(WebAppTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and do not have a course yet.
"""
def setUp(self):
super(LoggedInPagesTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_dashboard_no_courses(self):
"""
Make sure that you can get to the dashboard page without a course.
"""
self.auth_page.visit()
self.dashboard_page.visit()
@attr('shard_1')
class CoursePagesTest(StudioCourseTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and have a course.
"""
COURSE_ID_SEPARATOR = "."
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CoursePagesTest, self).setUp()
self.pages = [
clz(self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'])
for clz in [
AssetIndexPage, ChecklistsPage, ImportPage, CourseUpdatesPage,
PagesPage, ExportPage, CourseTeamPage, CourseOutlinePage, SettingsPage,
AdvancedSettingsPage, GradingPage, TextbooksPage
]
]
@skip('Intermittently failing with Page not found error for Assets. TE-418')
def test_page_existence(self):
"""
Make sure that all these pages are accessible once you have a course.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
# In the real workflow you will be at the dashboard page
# after you log in. This test was intermittently failing on the
# first (asset) page load with a 404.
# Not exactly sure why, so adding in a visit
# to the dashboard page here to replicate the usual flow.
self.dashboard_page = DashboardPage(self.browser)
self.dashboard_page.visit()
# Verify that each page is available
for page in self.pages:
page.visit()
@attr('shard_1')
class CourseSectionTest(StudioCourseTest):
"""
Tests that verify the sections name editable only inside headers in Studio Course Outline that you can get to
when logged in and have a course.
"""
COURSE_ID_SEPARATOR = "."
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CourseSectionTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.course_outline_page.visit()
def populate_course_fixture(self, course_fixture):
""" Populates the course fixture with a test section """
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section')
)
def test_section_name_editable_in_course_outline(self):
"""
Check that section name is editable on course outline page.
"""
section_name = self.course_outline_page.get_section_name()[0]
self.assertEqual(section_name, "Test Section")
self.course_outline_page.change_section_name("Test Section New")
section_name = self.course_outline_page.get_section_name(page_refresh=True)[0]
self.assertEqual(section_name, "Test Section New")
def test_section_name_not_editable_inside_modal(self):
"""
Check that section name is not editable inside "Section Release Date" modal on course outline page.
"""
parent_css='div.modal-window'
self.course_outline_page.click_release_date()
section_name = self.course_outline_page.get_section_name(parent_css)[0]
self.assertEqual(section_name, '"Test Section"')
self.course_outline_page.click_section_name(parent_css)
section_name_edit_form = self.course_outline_page.section_name_edit_form_present(parent_css)
self.assertFalse(section_name_edit_form)
@attr('shard_1')
class DiscussionPreviewTest(StudioCourseTest):
"""
Tests that Inline Discussions are rendered with a custom preview in Studio
"""
def setUp(self):
super(DiscussionPreviewTest, self).setUp()
cop = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
cop.visit()
self.unit = cop.section('Test Section').subsection('Test Subsection').toggle_expand().unit('Test Unit')
self.unit.go_to()
def populate_course_fixture(self, course_fixture):
"""
Return a test course fixture containing a discussion component.
"""
course_fixture.add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
)
)
)
)
)
def test_is_preview(self):
"""
Ensure that the preview version of the discussion is rendered.
"""
self.assertTrue(self.unit.q(css=".discussion-preview").present)
self.assertFalse(self.unit.q(css=".discussion-show").present)
| TangXT/edx-platform | common/test/acceptance/tests/test_studio_general.py | Python | agpl-3.0 | 7,371 | [
"VisIt"
] | 61de5f1f2d6f6502e3a462c133af446209e8b510ad2855a84e5f7dd317ee5be6 |
#!/usr/bin/env python
import sys
from distutils.core import setup
# this affects the names of all the directories we do stuff with
sys.path.insert(0, './')
from layman.version import VERSION
setup(name = 'layman',
version = VERSION,
description = 'Python script for retrieving gentoo overlays',
author = 'Brian Dolbec, Gunnar Wrobel (original author retired)',
author_email = 'dolsen@gentoo',
url = 'http://layman.sourceforge.net/, ' +\
'http://git.overlays.gentoo.org/gitweb/?p=proj/layman.git;a=summary',
packages = ['layman', 'layman.overlays'],
scripts = ['bin/layman', 'bin/layman-updater'],
license = 'GPL',
)
| jmesmon/layman | setup.py | Python | gpl-2.0 | 735 | [
"Brian"
] | 41eb1c3b64ce50d2728ffae8dd16ce30fd7891bb14b3f01721eacdd30bba453e |
#This program will output permanent red blob counts
#Hematoxylin (Basic blue)= binds to nuclei
#CD3 (T cells) are DAB
import numpy as np
from numpy import linalg
from math import sqrt
import matplotlib.pyplot as plt
from skimage import data
from skimage.color import separate_stains, rgb2grey
from skimage.exposure import rescale_intensity
from skimage.feature import blob_dog
#Color deconvolution
#DAB and perm red(1)
rgb_from_drx = np.array([[0.270, 0.562, 0.781],
[0.0326, 0.873, 0.487],
[0.0, 0.0, 0.0]])
rgb_from_drx[2, :] = np.cross(rgb_from_drx[0, :], rgb_from_drx[1, :])
drx_from_rgb = linalg.inv(rgb_from_drx)
#Import picture
ihc_rgb = data.imread(r'TestImage.jpg')
#Stain space conversion
#ihc_hax = separate_stains(ihc_rgb, hax_from_rgb)
ihc_drx = separate_stains(ihc_rgb, drx_from_rgb)
#Rescale signals? - might help, might not
#[:, :, 012 color]
permred_rescale = rescale_intensity(ihc_drx[:, :, 1], out_range=(0, 1))
permred_array = np.dstack((np.zeros_like(permred_rescale), permred_rescale, permred_rescale))
#Blob detection
image2d = rgb2grey(permred_array)
blobs_dog = blob_dog(image2d, min_sigma=1, max_sigma=40, threshold=.5, overlap=0.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs = [blobs_dog]
colors = ['red']
titles = ['Determinant of Gaussian: IFN-$\gamma$']
sequence = zip(blobs, colors, titles)
for blobs, color, title in sequence:
fig, ax = plt.subplots(1, 1)
ax.set_title(title)
ax.imshow(image2d, interpolation='nearest', cmap=plt.cm.gray)
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=1, fill=False)
ax.add_patch(c)
num_blobs = len(blobs_dog)
print('----------')
print('Number of blobs detected: ' + str(num_blobs))
plt.show()
| griffincalme/MicroDeconvolution | ScriptsUsedInPaper/BlobCounterRedPaper.py | Python | apache-2.0 | 1,813 | [
"Gaussian"
] | 457f65f66cac9aa35713d7e0bb03393d1a2949d95fb194bd5c214f7c714ae5f4 |
#!/usr/bin/env python
"""Oink is a Python to Javascript compiler.
It differs from Pyjamas in that it tries to be as thin a layer as possible
between Python and Javascript.
"""
import ast
import optparse
import os
import sys
from textwrap import dedent
from contextlib import contextmanager
class Error(Exception):
"""Base Oink exception...boink!"""
def __init__(self, node, message):
self.node = node
self.msg = message
def __str__(self):
return '%s:%s: %s' % (self.node.lineno, self.node.col_offset, self.msg)
class CompileError(Error):
"""A generic compilation error."""
class NotImplemented(Error):
"""A feature is not implemented."""
class Scope(object):
def __init__(self, node, parent=None):
self._node = node
self._parent = parent
self._symbols = set()
def add_symbol(self, name):
self._symbols.add(name)
def __eq__(self, ast):
return ast is type(self._node)
def __contains__(self, name):
return name in self._symbols
class Compiler(ast.NodeVisitor):
SUPPORTED_META_FUNCTIONS = ('__init__', '__iter__')
BUILTINS = {'None': 'null', 'True': 'true', 'False': 'false',
'xrange': 'Oink.range', 'range': 'Oink.range',
'sum': 'Oink.sum', 'str': 'Oink.str', 'repr': 'Oink.repr'}
def __init__(self, *args, **kwargs):
super(Compiler, self).__init__(*args, **kwargs)
self.scopes = []
self.classes = {}
def compile(self, source, filename=None):
tree = ast.parse(source, filename or '<undefined>')
return self.visit(tree)
def generic_visit(self, node):
raise NotImplemented(node, ast.dump(node))
def visit_Module(self, node):
with self.scoped(node):
return ';\n\n'.join(self.visit_list(node.body)) + ';'
def visit_BinOp(self, node):
left = self.visit(node.left)
right = self.visit(node.right)
if isinstance(node.op, ast.Pow):
return 'Math.pow(%s, %s)' % (left, right)
return '%s %s %s' % (left, self.visit(node.op), right)
def visit_If(self, node):
if not node.orelse:
format = """
if ({test}) {{
{body}
}}
"""
else:
format = """
if ({test}) {{
{body}
}} else {{
{orelse}
}}
"""
return self.format(format, test=self.visit(node.test),
body=self.body(node),
orelse=self.body(node, attr='orelse'))
def visit_Compare(self, node):
if len(node.ops) != 1:
raise NotImplemented(node, 'multi-expression comparison not supported')
op_type = type(node.ops[0])
lhs = self.visit(node.left)
rhs = self.visit(node.comparators[0])
if op_type is ast.In:
return 'Oink.in_(%s, %s)' % (rhs, lhs)
if op_type is ast.NotIn:
return '!Oink.in_(%s, %s)' % (rhs, lhs)
return '%s %s %s' % (lhs, self.visit(node.ops[0]), rhs)
def visit_Yield(self, node):
raise NotImplemented(node, 'yield is not supported')
def visit_Import(self, node):
raise NotImplemented(node, 'use "from oink import x"')
def visit_ImportFrom(self, node):
if node.module != 'oink':
raise CompileError(node, 'can only import oink runtime')
return None
def visit_FunctionDef(self, node):
if node.name == 'new':
return ''
if node.name not in self.SUPPORTED_META_FUNCTIONS and \
node.name.startswith('__'):
raise NotImplemented(node, 'meta function %r is not supported'
% node.name)
if self.scope == ast.ClassDef:
prelude = 'var self = this'
function_format = """
{name}: function ({args}) {{
{body}
}}
"""
else:
prelude = ''
function_format = """
{comment}function {name}({args}) {{
{body}
}}
"""
with self.scoped(node):
name = node.name
args = self.visit(node.args)
if node.args.vararg:
prelude += ';\nvar %s = arguments' % node.args.vararg
comment = ''
first = node.body[0]
if isinstance(first, ast.Expr) and \
isinstance(first.value, ast.Str):
comment = '/** %s */\n' % first.value.s.replace('*/', '*\/')
body = self.body(node, prelude=prelude)
return self.format(function_format, name=name, body=body,
args=args, comment=comment)
def visit_arguments(self, node):
if node.kwarg:
raise NotImplemented(node, 'keyword arguments not supported')
args = self.visit_list(node.args)
if args and args[0] == 'self' and self.parent_scope == ast.ClassDef:
args.pop(0)
return ', '.join(args)
def visit_For(self, node):
if isinstance(node.target, ast.Tuple):
raise NotImplemented(
node, 'loop iteration unpacking is not supported')
iter = self.visit(node.iter)
target = self.visit(node.target)
body = self.body(node)
return self.format("""
Oink.each({iter}, function ({target}) {{
{body}
}});
""", target=target, body=body, iter=iter)
def visit_While(self, node):
return self.format("""
while ({test}) {{
{body}
}}
""", test=self.visit(node.test), body=self.body(node))
def visit_List(self, node):
return '[%s]' % ', '.join(self.visit_list(node.elts))
def visit_Assign(self, node):
out = []
if len(node.targets) != 1:
raise NotImplemented(
node, 'only single-level assignment is supported')
# Assume unpacking assignment. Is this a valid assumption?
if isinstance(node.targets[0], ast.Tuple):
for i, target in enumerate(node.targets[0].elts):
out.append('%s = %s' % (self.visit(target),
self.visit(node.value.elts[i])))
else:
lhs = self.visit(node.targets[0])
# XXX This...is hackish.
if lhs.startswith('Oink.'):
raise CompileError(node, 'can not override builtin %r' % lhs)
if '.' not in lhs and lhs not in self.scope:
prefix = 'var '
self.scope.add_symbol(lhs)
else:
prefix = ''
out.append('%s%s = %s' % (prefix, lhs, self.visit(node.value)))
return ';\n'.join(out)
def visit_AugAssign(self, node):
lhs = self.visit(node.target)
if lhs.startswith('Oink.'):
raise CompileError(node, 'can not override builtin %r' % lhs)
if '.' not in lhs and lhs not in self.scope:
prefix = 'var '
self.scope.add_symbol(lhs)
else:
prefix = ''
return '%s%s %s= %s' % (prefix, lhs, self.visit(node.op),
self.visit(node.value))
def visit_Tuple(self, node):
values = ', '.join(self.visit_list(node.elts))
return '[%s]' % values
def visit_Dict(self, node):
keys = self.visit_list(node.keys)
values = self.visit_list(node.values)
return '{%s}' % ', '.join('%s: %s' % i for i in zip(keys, values))
def visit_Attribute(self, node):
value = self.visit(node.value)
return '%s.%s' % (value, node.attr)
def visit_Num(self, node):
return str(node.n)
def visit_Str(self, node):
return '%r' % node.s
def visit_Return(self, node):
if node.value:
return 'return %s' % self.visit(node.value)
return 'return'
def visit_BoolOp(self, node):
op = ' %s ' % self.visit(node.op)
return op.join(self.visit_list(node.values))
def visit_UnaryOp(self, node):
return '%s(%s)' % (self.visit(node.op), self.visit(node.operand))
def visit_Pass(self, node):
return ''
def visit_ClassDef(self, node):
# FIXME(aat) This is not ideal
if self.scope == ast.ClassDef:
class_format = """
{name}: {super}.extend({{
{body}
}})
"""
else:
class_format = """
var {name} = {super}.extend({{
{body}
}})
"""
with self.scoped(node):
if len(node.bases) > 1:
raise NotImplemented(node,
'multi-inheritance is not supported')
name = node.name
super = node.bases[0].id if node.bases else 'Oink.Class'
if super == 'object':
super = 'Oink.Class'
body = self.indent(',\n\n'.join(self.visit_list(node.body)))
return self.format(class_format, name=name, super=super, body=body)
def visit_Print(self, node):
text = ' + " " + '.join(self.visit_list(node.values))
return 'console.log(%s)' % (text or '""')
def visit_ListComp(self, node):
if len(node.generators) != 1:
raise NotImplemented(node,
'only single-level generators are supported')
if len(node.generators[0].ifs) > 1:
raise NotImplemented(node,
'only single-level conditions are supported')
target = self.visit(node.generators[0].target)
iter = self.visit(node.generators[0].iter)
expr = self.visit(node.elt)
if node.generators[0].ifs:
test = self.visit(node.generators[0].ifs[0])
return self.format("""
Oink.listComprehension({iter}, function ({target}) {{
return {expr};
}}, function ({target}) {{
return {test};
}})
""", target=target, iter=iter, expr=expr, test=test)
else:
return self.format("""
Oink.listComprehension({iter}, function ({target}) {{
return {expr};
}})
""", target=target, iter=iter, expr=expr)
visit_GeneratorExp = visit_ListComp
def visit_Expr(self, node):
if isinstance(node.value, ast.Str):
return ''
return self.visit(node.value)
def visit_Call(self, node):
if node.args and node.starargs:
raise NotImplemented(node, 'simultaneous use of *args and normal '
'args not supported')
if node.kwargs:
raise NotImplemented(node, 'keywords args are not supported')
name = self.visit(node.func)
if name == 'super':
return 'self._super'
if node.starargs:
starargs = self.visit(node.starargs)
# FIXME(aat) "self" is being hardcoded here...this is not good, but
# the parent name is only available from the caller of visit_Call()
return '%s.apply(self, %s)' % (name, starargs)
args = ', '.join(self.visit_list(node.args))
# XXX Hack to explicitly call constructor if type inference fails
if name == 'new':
return '%s %s' % (name, args)
return '%s(%s)' % (name, args)
def visit_Subscript(self, node):
return '%s[%s]' % (self.visit(node.value), self.visit(node.slice))
def visit_Index(self, node):
return self.visit(node.value)
def visit_Slice(self, node):
raise NotImplemented(node, 'Slicing is not implemented')
def visit_Name(self, node):
return self.BUILTINS.get(node.id, node.id)
def visit_Lambda(self, node):
args = self.visit(node.args)
body = self.visit(node.body)
return self.format("""
function ({args}) {{ return {body}; }}
""", args=args, body=body)
# Internal methods
def format(self, text, **args):
return dedent(text).format(**args).strip()
def visit_list(self, l):
return filter(None, map(self.visit, l))
def indent(self, text):
return '\n'.join(' ' + line for line in text.splitlines())
def body(self, node, newline='\n', prelude=None, attr='body'):
body = self.visit_list(getattr(node, attr))
body = [line if line.endswith('*/') else line + ';'
for line in body]
body = newline.join(body)
text = self.indent((prelude + ';' + newline if prelude else '')
+ body)
return text
@contextmanager
def scoped(self, scope):
if self.scopes:
parent = self.scopes[-1]
else:
parent = None
self.scopes.append(Scope(scope, parent=parent))
yield
self.scopes.pop()
@property
def scope(self):
return self.scopes[-1] if self.scopes else None
@property
def parent_scope(self):
return self.scopes[-2] if len(self.scopes) > 1 else None
# hardcoded these Nodes to return string argument when visited.
def strmap(show):
"""Hardcode a particular ast Node to string representation 'show'."""
return lambda self, node=None: show
visit_Add = strmap('+')
visit_Break = strmap('break')
visit_Continue = strmap('continue')
visit_Sub = strmap('-')
visit_Mult = strmap('*')
visit_Div = strmap('/')
visit_Mod = strmap('%')
visit_LShift = strmap('<<')
visit_RShift = strmap('>>')
visit_FloorDiv = strmap('//')
visit_Not = strmap('!')
visit_And = strmap('&&')
visit_Or = strmap('||')
visit_Eq = strmap('==')
visit_NotEq = strmap('!=')
visit_Lt = strmap('<')
visit_LtE = strmap('<=')
visit_Gt = strmap('>')
visit_GtE = strmap('>=')
visit_Is = strmap('===')
visit_IsNot = strmap('!==')
def run_script(js):
import PyV8
class Console(PyV8.JSClass):
def log(self, text):
print text
class Global(PyV8.JSClass):
console = Console()
context = PyV8.JSContext(Global())
context.enter()
context.eval(js)
if __name__ == '__main__':
parser = optparse.OptionParser(usage='%prog [<flags>] <file>')
parser.add_option('-I', '--include', default='.',
help='path to runtime [%default]')
parser.add_option('--runtime', default='oink.js',
help='files to include in runtime [%default]')
parser.add_option('--run', action='store_true',
help='attempt to run the compiled script')
options, args = parser.parse_args()
if not args:
parser.error('python source file required')
filename = args[0]
source = open(filename).read()
compiler = Compiler()
try:
script = compiler.compile(source, filename)
except Error, e:
print >> sys.stderr, 'error: %s:%s' % (filename, e)
sys.exit(1)
if options.run:
runtime = [os.path.join(options.include, p)
for p in options.runtime.split(',')]
runtime = '\n'.join(open(f).read() for f in runtime)
run_script(runtime + '\n' + script)
else:
print script
| alecthomas/oink | oink.py | Python | mit | 15,506 | [
"VisIt"
] | 956f3e5b7f944a5e34b78ee6c34e83751c00417af0f271d945f466047352a372 |
import moose
def deleteSolver(modelRoot):
compts = moose.wildcardFind(modelRoot+'/##[ISA=ChemCompt]')
for compt in compts:
if moose.exists(compt.path+'/stoich'):
st = moose.element(compt.path+'/stoich')
st_ksolve = st.ksolve
moose.delete(st)
if moose.exists((st_ksolve).path):
moose.delete(st_ksolve)
def addSolver(modelRoot,solver):
compt = moose.wildcardFind(modelRoot+'/##[ISA=ChemCompt]')
if compt:
comptinfo = moose.Annotator(moose.element(compt[0]).path+'/info')
previousSolver = comptinfo.solver
currentSolver = previousSolver
if solver == "Gillespie":
currentSolver = "gssa"
elif solver == "Runge Kutta":
currentSolver = "gsl"
elif solver == "Exponential Euler":
currentSolver = "ee"
if previousSolver != currentSolver:
# if previousSolver != currentSolver
comptinfo.solver = currentSolver
if (moose.exists(compt[0].path+'/stoich')):
# "A: and stoich exists then delete the stoich add solver"
deleteSolver(modelRoot)
setCompartmentSolver(modelRoot,currentSolver)
return True
else:
# " B: stoich doesn't exists then addSolver, this is when object is deleted which delete's the solver "
# " and solver is also changed, then add addsolver "
setCompartmentSolver(modelRoot,currentSolver)
return True
else:
if moose.exists(compt[0].path+'/stoich'):
# " stoich exist, doing nothing"
return False
else:
# "but stoich doesn't exist,this is when object is deleted which deletes the solver
# " but solver are not changed, then also call addSolver"
setCompartmentSolver(modelRoot,currentSolver)
return True
return False
def positionCompt( compt, side, shiftUp ):
y0 = compt.y0
y1 = compt.y1
if ( shiftUp ):
compt.y0 = (y0 + side)
compt.y1 = (y1 + side)
else:
compt.y0 = (y0 - y1 )
compt.y1 = 0
def setCompartmentSolver(modelRoot,solver):
compts = moose.wildcardFind(modelRoot+'/##[ISA=ChemCompt]')
if ( len(compts) > 3 ):
print "Warning: setSolverOnCompt Cannot handle " ,
len(compts) , " chemical compartments\n"
return;
if ( len(compts) == 2 ):
positionCompt( compts[0], compts[1].dy, True )
if ( len(compts) == 3 ):
positionCompt( compts[0], compts[1].dy, True )
positionCompt( compts[2], compts[1].dy, False )
for compt in compts:
if ( solver == 'gsl' ) or (solver == 'Runge Kutta'):
ksolve = moose.Ksolve( compt.path+'/ksolve' )
if ( solver == 'gssa' ) or (solver == 'Gillespie'):
ksolve = moose.Gsolve( compt.path+'/gsolve' )
if ( solver != 'ee' ):
stoich = moose.Stoich( compt.path+'/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
if moose.exists(compt.path):
stoich.path = compt.path+"/##"
stoichList = moose.wildcardFind(modelRoot+'/##[ISA=Stoich]')
if len( stoichList ) == 2:
stoichList[1].buildXreacs( stoichList[0] )
if len( stoichList ) == 3:
stoichList[1].buildXreacs (stoichList [0])
stoichList[1].buildXreacs (stoichList [2])
for i in stoichList:
i.filterXreacs() | BhallaLab/moose | moose-gui/plugins/setsolver.py | Python | gpl-3.0 | 3,007 | [
"MOOSE"
] | e1d9ef1fa061fd7671582d12764a5ed81c55db41e796526d6730eb59473cfe56 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from os import path
import urllib
import ocw.data_source.local as local
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# One Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "knmi_temporal_std"
# Download necessary NetCDF file if needed
if path.exists(FILE_1):
pass
else:
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects """
print "Loading %s into an OCW Dataset Object" % (FILE_1,)
# 'tasmax' is variable name of values
knmi_dataset = local.load_file(FILE_1, "tasmax")
print "KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,)
# Acessing latittudes and longitudes of netCDF file
lats = knmi_dataset.lats
lons = knmi_dataset.lons
""" Step 2: Build a Metric to use for Evaluation - Temporal STD for this example """
# You can build your own metrics, but OCW also ships with some common metrics
print "Setting up a Temporal STD metric to use for evaluation"
std = metrics.TemporalStdDev()
""" Step 3: Create an Evaluation Object using Datasets and our Metric """
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists. Evaluation will iterate over the lists
print "Making the Evaluation definition"
# Temporal STD Metric gets one target dataset then reference dataset should be None
std_evaluation = evaluation.Evaluation(None, [knmi_dataset], [std])
print "Executing the Evaluation using the object's run() method"
std_evaluation.run()
""" Step 4: Make a Plot from the Evaluation.results """
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print "Accessing the Results of the Evaluation run"
results = std_evaluation.unary_results[0][0]
print "The results are of type: %s" % type(results)
# From the temporal std output I want to make a Contour Map of the region
print "Generating a contour map using ocw.plotter.draw_contour_map()"
fname = OUTPUT_PLOT
gridshape = (4, 5) # 20 Years worth of plots. 20 rows in 1 column
plot_title = "TASMAX Temporal Standard Deviation (1989 - 2008)"
sub_titles = range(1989, 2009, 1)
plotter.draw_contour_map(results, lats, lons, fname,
gridshape=gridshape, ptitle=plot_title,
subtitles=sub_titles)
| MJJoyce/climate | examples/simple_model_tstd.py | Python | apache-2.0 | 3,639 | [
"NetCDF"
] | 99643b3f289172b177934c2b26a6a60f6d4d32f1f18fba6a2c7e0f6316ccd8b7 |
# $HeadURL $
""" PublisherHandler
This service has been built to provide the RSS web views with all the information
they need. NO OTHER COMPONENT THAN Web controllers should make use of it.
"""
from datetime import datetime
from types import NoneType
# DIRAC
from DIRAC import gLogger, S_OK, gConfig, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers, Utils
ResourceManagementClient = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient' ),'ResourceManagementClient')
__RCSID__ = '$Id: PublisherHandler.py 65921 2013-05-14 13:05:43Z ubeda $'
# RSS Clients
rsClient = None
rmClient = None
def initializePublisherHandler( _serviceInfo ):
"""
Handler initialization in the usual horrible way.
"""
global rsClient
rsClient = ResourceStatusClient()
global rmClient
rmClient = ResourceManagementClient()
return S_OK()
class PublisherHandler( RequestHandler ):
"""
RPCServer used to deliver data to the web portal.
"""
def __init__( self, *args, **kwargs ):
"""
Constructor
"""
super( PublisherHandler, self ).__init__( *args, **kwargs )
# ResourceStatusClient .......................................................
types_getSites = []
def export_getSites( self ):
"""
Returns list of all sites considered by RSS
:return: S_OK( [ sites ] ) | S_ERROR
"""
gLogger.info( 'getSites' )
return CSHelpers.getSites()
types_getSitesResources = [ ( str, list, NoneType ) ]
def export_getSitesResources( self, siteNames ):
"""
Returns dictionary with SEs and CEs for the given site(s). If siteNames is
None, all sites are taken into account.
:return: S_OK( { site1 : { ces : [ ces ], 'ses' : [ ses ] },... } ) | S_ERROR
"""
gLogger.info( 'getSitesResources' )
if siteNames is None:
siteNames = CSHelpers.getSites()
if not siteNames[ 'OK' ]:
return siteNames
siteNames = siteNames[ 'Value' ]
if isinstance( siteNames, str ):
siteNames = [ siteNames ]
sitesRes = {}
for siteName in siteNames:
res = {}
res[ 'ces' ] = CSHelpers.getSiteComputingElements( siteName )
# Convert StorageElements to host names
ses = CSHelpers.getSiteStorageElements( siteName )
sesHosts = CSHelpers.getStorageElementsHosts( ses )
if not sesHosts[ 'OK' ]:
return sesHosts
# Remove duplicates
res[ 'ses' ] = list( set( sesHosts[ 'Value' ] ) )
sitesRes[ siteName ] = res
return S_OK( sitesRes )
types_getElementStatuses = [ str, ( str, list, NoneType ), ( str, list, NoneType ),
( str, list, NoneType ), ( str, list, NoneType ),
( str, list, NoneType ) ]
def export_getElementStatuses( self, element, name, elementType, statusType, status, tokenOwner ):
"""
Returns element statuses from the ResourceStatusDB
"""
gLogger.info( 'getElementStatuses' )
return rsClient.selectStatusElement( element, 'Status', name = name, elementType = elementType,
statusType = statusType, status = status,
tokenOwner = tokenOwner )
types_getElementHistory = [ str, ( str, list, NoneType ), ( str, list, NoneType ),
( str, list, NoneType ) ]
def export_getElementHistory( self, element, name, elementType, statusType ):
"""
Returns element history from ResourceStatusDB
"""
gLogger.info( 'getElementHistory' )
columns = [ 'Status', 'DateEffective', 'Reason' ]
return rsClient.selectStatusElement( element, 'History', name = name, elementType = elementType,
statusType = statusType,
meta = { 'columns' : columns } )
types_getElementPolicies = [ str, ( str, list, NoneType ), ( str, list, NoneType ) ]
def export_getElementPolicies( self, element, name, statusType ):
"""
Returns policies for a given element
"""
gLogger.info( 'getElementPolicies' )
columns = [ 'Status', 'PolicyName', 'DateEffective', 'LastCheckTime', 'Reason' ]
return rmClient.selectPolicyResult( element = element, name = name,
statusType = statusType,
meta = { 'columns' : columns } )
types_getTree = [ str, str, str ]
def export_getTree( self, element, elementType, elementName ):
"""
Given an element, finds its parent site and returns all descendants of that
site.
"""
gLogger.info( 'getTree' )
site = self.getSite( element, elementType, elementName )
if not site:
return S_ERROR( 'No site' )
siteStatus = rsClient.selectStatusElement( 'Site', 'Status', name = site,
meta = { 'columns' : [ 'StatusType', 'Status' ] } )
if not siteStatus[ 'OK' ]:
return siteStatus
tree = { site : { 'statusTypes' : dict( siteStatus[ 'Value' ] ) } }
ces = CSHelpers.getSiteComputingElements( site )
cesStatus = rsClient.selectStatusElement( 'Resource', 'Status', name = ces,
meta = { 'columns' : [ 'Name', 'StatusType', 'Status'] } )
if not cesStatus[ 'OK' ]:
return cesStatus
ses = CSHelpers.getSiteStorageElements( site )
sesStatus = rsClient.selectStatusElement( 'Resource', 'Status', name = ses,
meta = { 'columns' : [ 'Name', 'StatusType', 'Status'] } )
if not sesStatus[ 'OK' ]:
return sesStatus
def feedTree( elementsList ):
elements = {}
for elementTuple in elementsList[ 'Value' ]:
name, statusType, status = elementTuple
if not name in elements:
elements[ name ] = {}
elements[ name ][ statusType ] = status
return elements
tree[ site ][ 'ces' ] = feedTree( cesStatus )
tree[ site ][ 'ses' ] = feedTree( sesStatus )
return S_OK( tree )
#-----------------------------------------------------------------------------
def getSite( self, element, elementType, elementName ):
"""
Given an element, return its site
"""
if elementType == 'StorageElement':
elementType = 'SE'
domainNames = gConfig.getSections( 'Resources/Sites' )
if not domainNames[ 'OK' ]:
return domainNames
domainNames = domainNames[ 'Value' ]
for domainName in domainNames:
sites = gConfig.getSections( 'Resources/Sites/%s' % domainName )
if not sites[ 'OK' ]:
continue
for site in sites[ 'Value' ]:
elements = gConfig.getValue( 'Resources/Sites/%s/%s/%s' % ( domainName, site, elementType ), '' )
if elementName in elements:
return site
return ''
# ResourceManagementClient ...................................................
types_getDowntimes = [ str, str, str ]
def export_getDowntimes( self, element, elementType, name ):
if elementType == 'StorageElement':
name = CSHelpers.getSEHost( name )
if not name['OK']:
return name
name = name['Value']
return rmClient.selectDowntimeCache( element = element, name = name,
meta = { 'columns' : [ 'StartDate', 'EndDate',
'Link', 'Description',
'Severity' ] } )
types_getCachedDowntimes = [ ( str, NoneType, list ), ( str, NoneType, list ), ( str, NoneType, list ),
( str, NoneType, list ), datetime, datetime ]
def export_getCachedDowntimes( self, element, elementType, name, severity, startDate, endDate ):
if elementType == 'StorageElement':
name = CSHelpers.getSEHost( name )
if not name['OK']:
return name
name = name['Value']
if startDate > endDate:
return S_ERROR( 'startDate > endDate' )
res = rmClient.selectDowntimeCache( element = element, name = name, severity = severity,
meta = { 'columns' : [ 'Element', 'Name', 'StartDate',
'EndDate', 'Severity',
'Description', 'Link' ] } )
if not res[ 'OK' ]:
return res
downtimes = []
for dt in res[ 'Value' ]:
dtDict = dict( zip( res[ 'Columns' ], dt ) )
if dtDict[ 'StartDate' ] < endDate and dtDict[ 'EndDate' ] > startDate:
downtimes.append( dt )
result = S_OK( downtimes )
result[ 'Columns' ] = res[ 'Columns' ]
return result
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| vmendez/DIRAC | ResourceStatusSystem/Service/PublisherHandler.py | Python | gpl-3.0 | 9,452 | [
"DIRAC"
] | 08b9656084c40ab22b9297d12960074dd030db8912934eba05f016d0ebbf8c79 |
# -*- coding: utf-8 -*-
"""
====================
ZenTools
====================
* Tools for reading and writing files for Zen and processing software
* Tools for copying data from SD cards
* Tools for copying schedules to SD cards
Created on Tue Jun 11 10:53:23 2013
@author: jpeacock-pr
"""
#==============================================================================
import numpy as np
import scipy.signal as sps
import time
import datetime
import os
import struct
import string
import win32api
import shutil
from collections import Counter
import mtpy.utils.filehandling as mtfh
import mtpy.processing.birrp as birrp
import mtpy.utils.configfile as mtcfg
import mtpy.utils.exceptions as mtex
import mtpy.utils.configfile as mtcf
import matplotlib.pyplot as plt
import mtpy.imaging.plotspectrogram as plotspectrogram
import mtpy.imaging.plotnresponses as plotnresponses
import mtpy.imaging.plotresponse as plotresponse
from cStringIO import StringIO
import sys
import mtpy.processing.filter as mtfilt
try:
import mtpy.utils.mseed as mtmseed
except ImportError:
print ('Can not convert data to mini seed format need to install Obspy, '
'good luck! You can find information on Obspy at '
'https://github.com/obspy/obspy/wiki')
#==============================================================================
datetime_fmt = '%Y-%m-%d,%H:%M:%S'
datetime_sec = '%Y-%m-%d %H:%M:%S'
#==============================================================================
#
#==============================================================================
class Z3D_Header(object):
"""
class for z3d header. This will read in the header information of a
Z3D file and make each metadata entry an attirbute
Arguments
------------
**fn** : string
full path to Z3D file
**fid** : file object
ie. open(Z3D_file, 'rb')
======================== ==================================================
Attributes Definition
======================== ==================================================
_header_len lenght of header in bits (512)
ad_gain gain of channel
ad_rate sampling rate in Hz
alt altitude of the station (not reliable)
attenchannelsmask not sure
box_number ZEN box number
box_serial ZEN box serial number
channel channel number of the file
channelserial serial number of the channel board
duty duty cycle of the transmitter
fpga_buildnum build number of one of the boards
gpsweek GPS week
header_str full header string
lat latitude of station
logterminal not sure
long longitude of the station
main_hex_buildnum build number of the ZEN box in hexidecimal
numsats number of gps satelites
period period of the transmitter
tx_duty transmitter duty cycle
tx_freq transmitter frequency
version version of the firmware
======================== ==================================================
======================== ==================================================
Methods Description
======================== ==================================================
convert_values convert the read in header metadata to
appropriate units and data types.
read_header read in the header data from the given file
======================== ==================================================
Example
--------------
>>> import mtpy.usgs.zen as zen
>>> z3d_fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> header_obj = zen.Z3d_Header()
>>> header_obj.read_header()
"""
def __init__(self, fn=None, fid=None, **kwargs):
self.fn = fn
self.fid = fid
self.header_str = None
self._header_len = 512
self.ad_gain = None
self.ad_rate = None
self.alt = None
self.attenchannelsmask = None
self.box_number = None
self.box_serial = None
self.channel = None
self.channelserial = None
self.duty = None
self.fpga_buildnum = None
self.gpsweek = 1740
self.lat = None
self.logterminal = None
self.long = None
self.main_hex_buildnum = None
self.numsats = None
self.period = None
self.tx_duty = None
self.tx_freq = None
self.version = None
for key in kwargs:
setattr(self, key, kwargs[key])
def read_header(self, fn=None, fid=None):
"""
read in the header string
"""
if fn is not None:
self.fn = fn
if fid is not None:
self.fid = fid
if self.fn is None and self.fid is None:
print 'no file to read'
elif self.fn is None:
if self.fid is not None:
self.fid.seek(0)
self.header_str = self.fid.read(self._header_len)
elif self.fn is not None:
if self.fid is None:
self.fid = file(self.fn, 'rb')
self.header_str = self.fid.read(self._header_len)
else:
self.fid.seek(0)
self.header_str = self.fid.read(self._header_len)
header_list = self.header_str.split('\n')
for h_str in header_list:
if h_str.find('=') > 0:
h_list = h_str.split('=')
h_key = h_list[0].strip().lower()
h_key = h_key.replace(' ', '_').replace('/', '').replace('.', '_')
h_value = self.convert_value(h_key, h_list[1].strip())
setattr(self, h_key, h_value)
def convert_value(self, key_string, value_string):
"""
convert the value to the appropriate units given the key
"""
try:
return_value = float(value_string)
except ValueError:
return_value = value_string
if key_string.lower() == 'lat' or key_string.lower() == 'long':
return_value = np.rad2deg(float(value_string))
return return_value
#==============================================================================
# meta data
#==============================================================================
class Z3D_Schedule_metadata(object):
"""
class object for metadata of Z3d file. This will read in the schedule
information of a Z3D file and make each metadata entry an attirbute.
The attributes are left in capitalization of the Z3D file.
Arguments
------------
**fn** : string
full path to Z3D file
**fid** : file object
ie. open(Z3D_file, 'rb')
======================== ==================================================
Attributes Definition
======================== ==================================================
AutoGain Auto gain for the channel
Comment Any comments for the schedule
Date Date of when the schedule action was started
YYYY-MM-DD
Duty Duty cycle of the transmitter
FFTStacks FFT stacks from the transmitter
Filename Name of the file that the ZEN gives it
Gain Gain of the channel
Log Log the data [ Y | N ]
NewFile Create a new file [ Y | N ]
Period Period of the transmitter
RadioOn Turn on the radio [ Y | N ]
SR Sampling Rate in Hz
SamplesPerAcq Samples per aquisition for transmitter
Sleep Set the box to sleep [ Y | N ]
Sync Sync with GPS [ Y | N ]
Time Time the schedule action started
HH:MM:SS (GPS time)
_header_len length of header in bits (512)
_schedule_metadata_len length of schedule metadata in bits (512)
fid file object of the file
fn file name to read in
meta_string string of the schedule
======================== ==================================================
======================== ==================================================
Methods Description
======================== ==================================================
read_schedule_metadata read in the schedule information from the given
file
======================== ==================================================
Example
--------------
>>> import mtpy.usgs.zen as zen
>>> z3d_fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> header_obj = zen.Z3d_Schedule_metadata()
>>> header_obj.read_schedule_metadata()
"""
def __init__(self, fn=None, fid=None, **kwargs):
self.fn = fn
self.fid = None
self.meta_string = None
self._schedule_metadata_len = 512
self._header_len = 512
self.AutoGain = None
self.Comment = None
self.Date = None
self.Duty = None
self.FFTStacks = None
self.Filename = None
self.Gain = None
self.Log = None
self.NewFile = None
self.Period = None
self.RadioOn = None
self.SR = None
self.SamplesPerAcq = None
self.Sleep = None
self.Sync = None
self.Time = None
for key in kwargs:
setattr(self, key, kwargs[key])
def read_schedule_metadata(self, fn=None, fid=None):
"""
read meta data string
"""
if fn is not None:
self.fn = fn
if fid is not None:
self.fid = fid
if self.fn is None and self.fid is None:
print 'no file to read'
elif self.fn is None:
if self.fid is not None:
self.fid.seek(self._header_len)
self.meta_string = self.fid.read(self._header_len)
elif self.fn is not None:
if self.fid is None:
self.fid = file(self.fn, 'rb')
self.fid.seek(self._header_len)
self.meta_string = self.fid.read(self._header_len)
else:
self.fid.seek(self._header_len)
self.meta_string = self.fid.read(self._header_len)
meta_list = self.meta_string.split('\n')
for m_str in meta_list:
if m_str.find('=') > 0:
m_list = m_str.split('=')
m_key = m_list[0].split('.')[1].strip()
m_key = m_key.replace('/', '')
m_value = m_list[1].strip()
setattr(self, m_key, m_value)
#==============================================================================
# Meta data class
#==============================================================================
class Z3D_Metadata(object):
"""
class object for metadata of Z3d file. This will read in the metadata
information of a Z3D file and make each metadata entry an attirbute.
The attributes are left in capitalization of the Z3D file.
Arguments
------------
**fn** : string
full path to Z3D file
**fid** : file object
ie. open(Z3D_file, 'rb')
======================== ==================================================
Attributes Definition
======================== ==================================================
_header_length length of header in bits (512)
_metadata_length length of metadata blocks (512)
_schedule_metadata_len length of schedule meta data (512)
board_cal board calibration np.ndarray()
cal_ant antenna calibration
cal_board board calibration
cal_ver calibration version
ch_azimuth channel azimuth
ch_cmp channel component
ch_length channel length (or # of coil)
ch_number channel number on the ZEN board
ch_xyz1 channel xyz location (not sure)
ch_xyz2 channel xyz location (not sure)
coil_cal coil calibration np.ndarray (freq, amp, phase)
fid file object
find_metadata boolean of finding metadata
fn full path to Z3D file
gdp_operator operater of the survey
gdp_progver program version
job_by job preformed by
job_for job for
job_name job name
job_number job number
m_tell location in the file where the last metadata
block was found.
rx_aspace electrode spacing
rx_sspace not sure
rx_xazimuth x azimuth of electrode
rx_xyz0 not sure
rx_yazimuth y azimuth of electrode
survey_type type of survey
unit_length length units (m)
======================== ==================================================
======================== ==================================================
Methods Description
======================== ==================================================
read_metadata read in the metadata information from the given
file
======================== ==================================================
Example
--------------
>>> import mtpy.usgs.zen as zen
>>> z3d_fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> header_obj = zen.Z3d_Metadata()
>>> header_obj.read_metadata()
"""
def __init__(self, fn=None, fid=None, **kwargs):
self.fn = fn
self.fid = None
self.find_metadata = True
self.board_cal = None
self.coil_cal = None
self._metadata_length = 512
self._header_length = 512
self._schedule_metadata_len = 512
self.m_tell = 0
self.cal_ant = None
self.cal_board = None
self.cal_ver = None
self.ch_azimuth = None
self.ch_cmp = None
self.ch_length = None
self.ch_number = None
self.ch_xyz1 = None
self.ch_xyz2 = None
self.gdp_operator = None
self.gdp_progver = None
self.job_by = None
self.job_for = None
self.job_name = None
self.job_number = None
self.rx_aspace = None
self.rx_sspace = None
self.rx_xazimuth = None
self.rx_xyz0 = None
self.rx_yazimuth = None
self.survey_type = None
self.unit_length = None
for key in kwargs:
setattr(self, key, kwargs[key])
def read_metadata(self, fn=None, fid=None):
"""
read meta data
"""
if fn is not None:
self.fn = fn
if fid is not None:
self.fid = fid
if self.fn is None and self.fid is None:
print 'no file to read'
elif self.fn is None:
if self.fid is not None:
self.fid.seek(self._header_length+self._schedule_metadata_len)
elif self.fn is not None:
if self.fid is None:
self.fid = file(self.fn, 'rb')
self.fid.seek(self._header_length+self._schedule_metadata_len)
else:
self.fid.seek(self._header_length+self._schedule_metadata_len)
# read in calibration and meta data
self.find_metadata = True
self.board_cal = []
self.coil_cal = []
self.count = 0
while self.find_metadata == True:
test_str = self.fid.read(self._metadata_length)
if test_str.lower().find('metadata record') > 0:
self.count += 1
cal_find = False
test_str = test_str.strip().split('\n')[1]
if test_str.count('|') > 1:
for t_str in test_str.split('|'):
if t_str.find('=') == -1:
pass
else:
t_list = t_str.split('=')
t_key = t_list[0].strip().replace('.', '_')
t_value = t_list[1].strip()
setattr(self, t_key.lower(), t_value)
elif test_str.lower().find('cal.brd') >= 0:
t_list = test_str.split(',')
t_key = t_list[0].strip().replace('.', '_')
setattr(self, t_key.lower(), t_list[1])
for t_str in t_list[2:]:
t_str = t_str.replace('\x00', '').replace('|', '')
self.board_cal.append([float(tt.strip())
for tt in t_str.strip().split(':')])
# some times the coil calibration does not start on its own line
# so need to parse the line up and I'm not sure what the calibration
# version is for so I have named it odd
elif test_str.lower().find('cal.ant') >= 0:
# check to see if the coil calibration exists
cal_find = True
if test_str.find('|') > 0:
odd_str = test_str.split('|')[0]
odd_list = odd_str.split(',')
odd_key = odd_list[0].strip().replace('.', '_')
setattr(self, odd_key.lower(), odd_list[1].strip())
#this may be for a specific case so should test this
test_str = test_str.split('|')[1]
test_list = test_str.split(',')
if test_list[0].lower().find('cal.ant') >= 0:
m_list = test_list[0].split('=')
m_key = m_list[0].strip().replace('.', '_')
setattr(self, m_key.lower(), m_list[1].strip())
else:
for t_str in test_list[1:]:
self.coil_cal.append([float(tt.strip())
for tt in t_str.split(':')])
elif cal_find:
t_list = test_str.split(',')
for t_str in t_list:
if t_str.find('\x00') >= 0:
pass
else:
self.coil_cal.append([float(tt.strip())
for tt in t_str.strip().split(':')])
else:
self.find_metadata = False
# need to go back to where the meta data was found wo
# we don't skip a gps time stamp
self.m_tell = self.fid.tell()-self._metadata_length
# make coil calibration and board calibration structured arrays
if len(self.coil_cal) > 0:
self.coil_cal = np.core.records.fromrecords(self.coil_cal,
names='frequency, amplitude, phase',
formats='f8, f8, f8')
if len(self.board_cal) > 0:
self.board_cal = np.core.records.fromrecords(self.board_cal,
names='frequency, rate, amplitude, phase',
formats='f8, f8, f8, f8')
#==============================================================================
#
#==============================================================================
class Zen3D(object):
"""
Deals with the raw Z3D files output by zen.
Arguments
-----------
**fn** : string
full path to .Z3D file to be read in
======================== ================================ =================
Attributes Description Default Value
======================== ================================ =================
_block_len length of data block to read in 65536
as chunks faster reading
_counts_to_mv_conversion conversion factor to convert 9.53674316406e-10
counts to mv
_gps_bytes number of bytes for a gps stamp 16
_gps_dtype data type for a gps stamp see below
_gps_epoch starting date of GPS time
format is a tuple (1980, 1, 6, 0,
0, 0, -1, -1, 0)
_gps_f0 first gps flag in raw binary
_gps_f1 second gps flag in raw binary
_gps_flag_0 first gps flag as an int32 2147483647
_gps_flag_1 second gps flag as an int32 -2147483648
_gps_stamp_length bit length of gps stamp 64
_leap_seconds leap seconds, difference 16
between UTC time and GPS
time. GPS time is ahead
by this much
_week_len week length in seconds 604800
df sampling rate of the data 256
fn Z3D file name None
gps_flag full gps flag _gps_f0+_gps_f1
gps_stamps np.ndarray of gps stamps None
header Z3D_Header object Z3D_Header
metadata Z3D_Metadata Z3D_Metadata
schedule Z3D_Schedule_metadata Z3D_Schedule
time_series np.ndarra(len_data) None
units units in which the data is in counts
zen_schedule time when zen was set to None
run
======================== ================================ =================
* gps_dtype is formated as np.dtype([('flag0', np.int32),
('flag1', np.int32),
('time', np.int32),
('lat', np.float64),
('lon', np.float64),
('num_sat', np.int32),
('gps_sens', np.int32),
('temperature', np.float32),
('voltage', np.float32),
('num_fpga', np.int32),
('num_adc', np.int32),
('pps_count', np.int32),
('dac_tune', np.int32),
('block_len', np.int32)])
============================ ==============================================
Methods Description
============================ ==============================================
apply_addaptive_notch_filter apply a notch filter to the data, usually
to remove 60 Hz noise and harmonics
get_gps_time converts the gps counts to relative epoch
seconds according to gps week.
get_UTC_date_time converts gps seconds into the actual date and
time in UTC. Note this is different than GPS
time which is how the zen is scheduled, so
the time will be off by the current amount of
leap seconds.
plot_timeseries make a generic plot of the time series
plot_spectra plot a the spectra in loglog scales.
plot_spectrogram plot the spectragram of the data.
read_z3d read 3D file making sure all the time stamps
are correctly spaced. Returned time series
starts at the first stamp which has the
correct amount of data points between it and
the next time stamp. Note there are usually
a few seconds at the end and maybe beginning
that aren't correct because the internal
computer is busy switchin sampling rate.
read_header read just the header data from the Z3D file
read_metadata read just the metadata from the Z3D file
read_schedule read just the schedule info from the Z3D file
validate_gps_time make sure each time stamp is 1 second apart
validate_time_blocks make sure that the size of each time block
between stamps is equal to the sampling rate
write_ascii_mt_file write an mtpy ascii file of the data
============================ ==============================================
Example
----------------
>>> import mtpy.usgs.zen as zen
>>> zt = zen.Zen3D(r"/home/mt/mt00/mt00_20150522_080000_256_EX.Z3D")
>>> zt.read_z3d()
>>> ------- Reading /home/mt/mt00/mt00_20150522_080000_256_EX.Z3D -----
--> Reading data took: 0.322 seconds
Scheduled time was 2015-05-22,08:00:16 (GPS time)
1st good stamp was 2015-05-22,08:00:18 (GPS time)
difference of 2.00 seconds
found 6418 GPS time stamps
found 1642752 data points
>>> zt.plot_time_series()
"""
def __init__(self, fn=None, **kwargs):
self.fn = fn
self.header = Z3D_Header(fn)
self.schedule = Z3D_Schedule_metadata(fn)
self.metadata = Z3D_Metadata(fn)
self._gps_stamp_length = kwargs.pop('stamp_len', 64)
self._gps_bytes = self._gps_stamp_length/4
self.gps_stamps = None
self._gps_flag_0 = np.int32(2147483647)
self._gps_flag_1 = np.int32(-2147483648)
self._gps_f0 = self._gps_flag_0.tostring()
self._gps_f1 = self._gps_flag_1.tostring()
self.gps_flag = self._gps_f0+self._gps_f1
self._gps_dtype = np.dtype([('flag0', np.int32),
('flag1', np.int32),
('time', np.int32),
('lat', np.float64),
('lon', np.float64),
('num_sat', np.int32),
('gps_sens', np.int32),
('temperature', np.float32),
('voltage', np.float32),
('num_fpga', np.int32),
('num_adc', np.int32),
('pps_count', np.int32),
('dac_tune', np.int32),
('block_len', np.int32)])
self._week_len = 604800
self._gps_epoch = (1980, 1, 6, 0, 0, 0, -1, -1, 0)
self._leap_seconds = 16
self._block_len = 2**16
self.zen_schedule = None
self._counts_to_mv_conversion = 9.5367431640625e-10
self.units = 'counts'
self.df = None
self.time_series = None
#======================================
def read_header(self, fn=None, fid=None):
"""
read header information from Z3D file
Arguments
---------------
**fn** : string
full path to Z3D file to read
**fid** : file object
if the file is open give the file id object
Outputs:
----------
* fills the Zen3ZD.header object's attributes
Example with just a file name
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> z3d_obj = zen.Zen3D()
>>> z3d_obj.read_header(fn)
Example with file object
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> z3d_fid = open(fn, 'rb')
>>> z3d_obj = zen.Zen3D()
>>> z3d_obj.read_header(fid=z3d_fid)
"""
if fn is not None:
self.fn = fn
self.header.read_header(fn=self.fn, fid=fid)
self.df = self.header.ad_rate
#======================================
def read_schedule(self, fn=None, fid=None):
"""
read schedule information from Z3D file
Arguments
---------------
**fn** : string
full path to Z3D file to read
**fid** : file object
if the file is open give the file id object
Outputs:
----------
* fills the Zen3ZD.schedule object's attributes
Example with just a file name
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> z3d_obj = zen.Zen3D()
>>> z3d_obj.read_schedule(fn)
Example with file object
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> z3d_fid = open(fn, 'rb')
>>> z3d_obj = zen.Zen3D()
>>> z3d_obj.read_schedule(fid=z3d_fid)
"""
if fn is not None:
self.fn = fn
self.schedule.read_schedule_metadata(fn=self.fn, fid=fid)
# set the zen schedule time
self.zen_schedule = '{0},{1}'.format(self.schedule.Date,
self.schedule.Time)
#======================================
def read_metadata(self, fn=None, fid=None):
"""
read header information from Z3D file
Arguments
---------------
**fn** : string
full path to Z3D file to read
**fid** : file object
if the file is open give the file id object
Outputs:
----------
* fills the Zen3ZD.metadata object's attributes
Example with just a file name
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> z3d_obj = zen.Zen3D()
>>> z3d_obj.read_metadata(fn)
Example with file object
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> z3d_fid = open(fn, 'rb')
>>> z3d_obj = zen.Zen3D()
>>> z3d_obj.read_metadata(fid=z3d_fid)
"""
if fn is not None:
self.fn = fn
self.metadata.read_metadata(fn=self.fn, fid=fid)
#======================================
def read_z3d(self):
"""
read in z3d file and populate attributes accordingly
read in the entire file as if everything but header and metadata are
np.int32, then extract the gps stamps and convert accordingly
Checks to make sure gps time stamps are 1 second apart and incrementing
as well as checking the number of data points between stamps is the
same as the sampling rate.
Converts gps_stamps['time'] to seconds relative to header.gps_week
We skip the first two gps stamps because there is something wrong with
the data there due to some type of buffering.
Therefore the first GPS time is when the time series starts, so you
will notice that gps_stamps[0]['block_len'] = 0, this is because there
is nothing previous to this time stamp and so the 'block_len' measures
backwards from the corresponding time index.
"""
print '------- Reading {0} ---------'.format(self.fn)
st = time.time()
#get the file size to get an estimate of how many data points there are
file_size = os.path.getsize(self.fn)
# using the with statement works in Python versions 2.7 or higher
# the added benefit of the with statement is that it will close the
# file object upon reading completion.
with open(self.fn, 'rb') as file_id:
self.read_header(fid=file_id)
self.read_schedule(fid=file_id)
self.read_metadata(fid=file_id)
# move the read value to where the end of the metadata is
file_id.seek(self.metadata.m_tell)
# initalize a data array filled with zeros, everything goes into
# this array then we parse later
data = np.zeros((file_size-512*(2+self.metadata.count))/4,
dtype=np.int32)
# go over a while loop until the data cound exceed the file size
data_count = 0
while data_count+self.metadata.m_tell/4 < data.size:
test_str = np.fromstring(file_id.read(self._block_len),
dtype=np.int32)
data[data_count:data_count+len(test_str)] = test_str
data_count += test_str.size
# find the gps stamps
gps_stamp_find = np.where(data==self._gps_flag_0)[0]
# skip the first two stamps and trim data
data = data[gps_stamp_find[3]:]
gps_stamp_find = np.where(data==self._gps_flag_0)[0]
self.gps_stamps = np.zeros(len(gps_stamp_find), dtype=self._gps_dtype)
for ii, gps_find in enumerate(gps_stamp_find):
if data[gps_find+1] == self._gps_flag_1:
gps_str = struct.pack('<'+'i'*self._gps_bytes,
*data[gps_find:gps_find+self._gps_bytes])
self.gps_stamps[ii] = np.fromstring(gps_str,
dtype=self._gps_dtype)
if ii > 0:
self.gps_stamps[ii]['block_len'] = gps_find-\
gps_stamp_find[ii-1]-self._gps_bytes
elif ii == 0:
self.gps_stamps[ii]['block_len'] = 0
data[gps_find:gps_find+self._gps_bytes] = 0
# trim the data after taking out the gps stamps
self.time_series = data[np.nonzero(data)]
# time it
et = time.time()
print '--> Reading data took: {0:.3f} seconds'.format(et-st)
self.validate_time_blocks()
self.convert_gps_time()
self.check_start_time()
print ' found {0} GPS time stamps'.format(self.gps_stamps.shape[0])
print ' found {0} data points'.format(self.time_series.size)
#=======================================
def read_z3d_slow(self):
"""
read Z3D file out put by Zen, this a slow method but if you want to
be sure the data is read in correctly, this method is the most
correct way. It will be deprecated as soon as I field test the
read_z3d method
"""
print '------- Reading {0} ---------'.format(self.fn)
st = time.time()
file_id = file(self.fn, 'rb')
self.read_header(fid=file_id)
self.read_schedule(fid=file_id)
self.read_metadata(fid=file_id)
file_id.seek(self.metadata.m_tell)
f_str = file_id.read()
# find number of gps stamps from the data string
num_gps_stamps = int(f_str.count(self.gps_flag))
#num_data = int(num_gps_stamps*header.ad_rate)
#df = int(header.ad_rate)
# make and empty array for gps time stamps in the appropriate format
self.gps_stamps = np.zeros(num_gps_stamps, dtype=self._gps_dtype)
#--> find first time stamp
find_0 = f_str.find(self.gps_flag)
gps_stamp_str = f_str[find_0:find_0+self._gps_stamp_length]
gps_stamp = np.fromstring(gps_stamp_str, dtype=self._gps_dtype)
gps_stamp['block_len'] = 0
self.gps_stamps[0] = gps_stamp
# make the input string start from the end of the first GPS stamp
f_str = f_str[find_0+self._gps_stamp_length:]
# make an empty list to append each time block, this is relatively quick
# though might get caught up in large data files. But this is the safest
# to avoid misplacing the time series data into a predefined array
# aslo insures sequential time series data
ts_string_list = []
# this loop starts from the end of the first time stamp and searches for the
# next time stamp. It will convert the time stamp and append the time series
# bintary string between the time stamps to ts_string_list to insure
# the time series is sequential.
# boolean for testing whether a gps time stamp is found or not
stamp_count = 1
while stamp_count < num_gps_stamps:
# get the index of the next gps stamp
stamp_index = f_str.find(self.gps_flag)
# get the gps string and convert it according to the format
gps_stamp_str = f_str[stamp_index:stamp_index+self._gps_stamp_length]
gps_stamp = np.fromstring(gps_stamp_str, dtype=self._gps_dtype)
# get length between time stamp and put it in the empty slot of the
# gps time stamp
gps_stamp['block_len'] = len(f_str[0:stamp_index])/4
self.gps_stamps[stamp_count] = gps_stamp
# append the time series binary string to the list, this is faster
# than string concatenation
ts_string_list.append(f_str[0:stamp_index])
# remove the found time stamp and time series data and start the
# data binary string from the end of the found time stamp.
f_str = f_str[stamp_index+self._gps_stamp_length:]
stamp_count += 1
# convert the time series data into int32 format
self.time_series = np.fromstring(''.join(ts_string_list),
dtype=np.int32)
# time it
et = time.time()
print '--> Reading data took: {0:.3f} seconds'.format(et-st)
self.trim_data()
self.validate_time_blocks()
self.convert_gps_time()
self.check_start_time()
print ' found {0} GPS time stamps'.format(self.gps_stamps.shape[0])
print ' found {0} data points'.format(self.time_series.size)
#=================================================
def trim_data(self):
"""
apparently need to skip the first 3 seconds of data because of
something to do with the SD buffer
This method will be deprecated after field testing
"""
# the block length is the number of data points before the time stamp
# therefore the first block length is 0. The indexing in python
# goes to the last index - 1 so we need to put in 3
ts_skip = self.gps_stamps['block_len'][0:3].sum()
self.gps_stamps = self.gps_stamps[2:]
self.gps_stamps[0]['block_len'] = 0
self.time_series = self.time_series[ts_skip:]
#=================================================
def check_start_time(self):
"""
check to make sure the scheduled start time is similar to
the first good gps stamp
"""
# make sure the time is in gps time
zen_start = self.get_UTC_date_time(self.header.gpsweek,
self.gps_stamps['time'][0]+\
self._leap_seconds)
# set the zen schedule to the first gps stamp
self.zen_schedule = zen_start
zen_time = time.strptime(zen_start, datetime_fmt)
# calculate the scheduled start time
s_start = '{0},{1}'.format(self.schedule.Date, self.schedule.Time)
schedule_time = time.strptime(s_start, datetime_fmt)
# reset the data and time in the schedule meta data so there is no
# confusion on when the time series starts
self.schedule.Date = zen_start.split(',')[0]
self.schedule.Time = zen_start.split(',')[1]
# estimate the time difference between the two
time_diff = time.mktime(zen_time)-time.mktime(schedule_time)
print ' Scheduled time was {0} (GPS time)'.format(s_start)
print ' 1st good stamp was {0} (GPS time)'.format(zen_start)
print ' difference of {0:.2f} seconds'.format(time_diff)
#==================================================
def validate_gps_time(self):
"""
make sure each time stamp is 1 second apart
"""
t_diff = np.zeros_like(self.gps_stamps['time'])
for ii in range(len(t_diff)-1):
t_diff[ii] = self.gps_stamps['time'][ii]-self.gps_stamps['time'][ii+1]
bad_times = np.where(abs(t_diff) > 0.5)[0]
if len(bad_times) > 0:
print '-'*50
for bb in bad_times:
print 'bad time at index {0} > 0.5 s'.format(bb)
#==================================================
def validate_time_blocks(self):
"""
validate gps time stamps and make sure each block is the proper length
"""
# first check if the gps stamp blocks are of the correct length
bad_blocks = np.where(self.gps_stamps['block_len'][1:] !=
self.header.ad_rate)[0]
if len(bad_blocks) > 0:
if bad_blocks.max() < 5:
ts_skip = self.gps_stamps['block_len'][0:bad_blocks[-1]+1].sum()
self.gps_stamps = self.gps_stamps[bad_blocks[-1]:]
self.time_series = self.time_series[ts_skip:]
print '{0}Skipped the first {1} seconds'.format(' '*4,
bad_blocks[-1])
print '{0}Skipped first {1} poins in time series'.format(' '*4,
ts_skip)
#==================================================
def convert_gps_time(self):
"""
convert gps time integer to relative seconds from gps_week
"""
# need to convert gps_time to type float from int
dt = self._gps_dtype.descr
dt[2] = ('time', np.float32)
self.gps_stamps = self.gps_stamps.astype(np.dtype(dt))
# convert to seconds
# these are seconds relative to the gps week
time_conv = self.gps_stamps['time'].copy()/1024.
time_ms = (time_conv-np.floor(time_conv))*1.024
time_conv = np.floor(time_conv)+time_ms
self.gps_stamps['time'][:] = time_conv
#==================================================
def convert_counts(self):
"""
convert the time series from counts to millivolts
"""
return self.time_series*self._counts_to_mv_conversion
#==================================================
def convert_mV(self):
"""
convert millivolts to counts assuming no other scaling has been applied
"""
return self.time_series/self._counts_to_mv_conversion
#==================================================
def get_gps_time(self, gps_int, gps_week=0):
"""
from the gps integer get the time in seconds.
Arguments
-------------
**gps_int**: int
integer from the gps time stamp line
**gps_week**: int
relative gps week, if the number of seconds is
larger than a week then a week is subtracted from
the seconds and computed from gps_week += 1
Returns
---------
**gps_time**: int
number of seconds from the beginning of the relative
gps week.
"""
gps_seconds = gps_int/1024.
gps_ms = (gps_seconds-np.floor(gps_int/1024.))*(1.024)
cc = 0
if gps_seconds > self._week_len:
gps_week += 1
cc = gps_week*self._week_len
gps_seconds -= self._week_len
gps_time = np.floor(gps_seconds)+gps_ms+cc
return gps_time, gps_week
#==================================================
def get_UTC_date_time(self, gps_week, gps_time):
"""
get the actual date and time of measurement as UTC.
Note that GPS time is curently off by 16 seconds from actual UTC time.
Arguments
-------------
**gps_week**: int
integer value of gps_week that the data was collected
**gps_time**: int
number of seconds from beginning of gps_week
**leap_seconds**: int
number of seconds gps time is off from UTC time.
It is currently off by 16 seconds.
Returns
------------
**date_time**: YYYY-MM-DD,HH:MM:SS
formated date and time from gps seconds.
"""
# need to check to see if the time in seconds is more than a gps week
# if it is add 1 to the gps week and reduce the gps time by a week
if gps_time > self._week_len:
gps_week += 1
gps_time -= self._week_len
mseconds = gps_time % 1
#make epoch in seconds, mktime computes local time, need to subtract
#time zone to get UTC
epoch_seconds = time.mktime(self._gps_epoch)-time.timezone
#gps time is 14 seconds ahead of GTC time, but I think that the zen
#receiver accounts for that so we will leave leap seconds to be 0
gps_seconds = epoch_seconds+(gps_week*self._week_len)+gps_time-\
self._leap_seconds
#compute date and time from seconds
(year, month, day, hour, minutes, seconds, dow, jday, dls) = \
time.gmtime(gps_seconds)
date_time = time.strftime(datetime_fmt ,(year,
month,
day,
hour,
minutes,
int(seconds+mseconds),
0, 0, 0))
return date_time
#==================================================
def apply_adaptive_notch_filter(self, notch_dict):
"""
apply notch filter to the data that finds the peak around each
frequency.
see mtpy.processing.filter.adaptive_notch_filter
Arguments
-------------
**notch_dict** : dictionary
dictionary of filter parameters.
if an empty dictionary is input the filter looks
for 60 Hz and harmonics to filter out.
"""
try:
self.time_series
except AttributeError:
self.read_3d()
notches = notch_dict.pop('notches', list(np.arange(60, 2048, 60)))
notchradius = notch_dict.pop('notchradius', 0.5)
freqrad = notch_dict.pop('freqrad', 0.5)
rp = notch_dict.pop('rp', 0.1)
kwargs = {'df':self.df, 'notches':notches, 'notchradius':notchradius,
'freqrad':freqrad, 'rp':rp}
self.time_series, self.filt_list = \
mtfilt.adaptive_notch_filter(self.time_series, **kwargs)
#==================================================
def write_ascii_mt_file(self, save_fn=None, save_station='mb', fmt='%.8e',
ex=100., ey=100., notch_dict=None):
"""
write an mtpy time series data file
Arguments
-------------
**save_fn** : full path to save file, if None file is saved as:
station_YYYYMMDD_hhmmss_df.component
ex. mt01_20130206_120000_256.HX
**save_station** : string
prefix string to add to station number as only
integers can be input into metadata of the zen
boxes. ex. mb001
**fmt** : string format
format of data numbers output to ascii file.
*default* is '%.8e' for 8 significan figures in
scientific notation.
**ex** : float
scaling parameter of ex line, the length of the dipole
be careful to not scale when creating an .edi file
*default* is 1
**ey** : float
scaling parameter of ey line, the length of the dipole
be careful to not scale when creating an .edi file
*default* is 1
**notch_dict** : dictionary
dictionary of notch filter parameters
*default* is None
if an empty dictionary is input then the
filter looks for 60 Hz and harmonics to filter
Output
-------------
**fn_mt_ascii** : full path to saved file
Example
------------
>>> import mtpy.usgs.zen as zen
>>> fn = r"/home/mt/mt01/mt01_20150522_080000_256_EX.Z3D"
>>> z3d_obj = zen.Zen3D(fn)
>>> asc_fn = z3d.write_ascii_mt_file(save_station='mt',
notch_dict={})
"""
if self.time_series is None:
self.read_3d()
time_series = self.convert_counts()
if save_fn is None:
svfn_directory = os.path.join(os.path.dirname(self.fn), 'TS')
if not os.path.exists(svfn_directory):
os.mkdir(svfn_directory)
svfn_date = ''.join(self.schedule.Date.split('-'))
svfn_time = ''.join(self.schedule.Time.split(':'))
svfn_station = save_station+self.metadata.rx_xyz0.split(':')[0]
save_fn = os.path.join(svfn_directory,
'{0}_{1}_{2}_{3}.{4}'.format(svfn_station,
svfn_date,
svfn_time,
int(self.df),
self.metadata.ch_cmp.upper()))
#calibrate electric channels
if self.metadata.ch_cmp == 'ex':
time_series /= ex
elif self.metadata.ch_cmp == 'ey':
time_series /= ey
#apply notch filter if desired
if notch_dict is not None:
self.apply_adaptive_notch_filter(notch_dict)
print 'Filtered notches: '
for nfilt in self.filt_list:
if type(nfilt[0]) != str:
print '{0}{1:.2f} Hz'.format(' '*4, nfilt[0])
header_tuple = (save_station+self.metadata.rx_xyz0.split(':')[0],
self.metadata.ch_cmp.lower(),
self.df,
time.mktime(time.strptime(self.zen_schedule,
datetime_fmt )),
time_series.shape[0],
'mV',
'{0:.3f}'.format(np.median(np.rad2deg(self.gps_stamps['lat']))),
'{0:.3f}'.format(np.median(np.rad2deg(self.gps_stamps['lon']))),
0.0,
time_series)
self.fn_mt_ascii = mtfh.write_ts_file_from_tuple(save_fn, header_tuple,
fmt=fmt)
print 'Wrote mtpy timeseries file to {0}'.format(self.fn_mt_ascii)
#==================================================
def plot_time_series(self, fig_num=1):
"""
plots the time series
"""
time_series = self.convert_counts()
fig = plt.figure(fig_num )
ax = fig.add_subplot(1,1,1)
ax.plot(time_series)
#ax.xaxis.set_minor_locator(MultipleLocator(self.df))
#ax.xaxis.set_major_locator(MultipleLocator(self.df*15))
#ax.xaxis.set_ticklabels([self.date_time[ii]
# for ii in range(0,len(self.date_time), 15)])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Amplitude (mV)')
plt.show()
self.convert_mV()
return fig, ax
#==================================================
def plot_spectrogram(self, time_window=2**8, time_step=2**6, s_window=11,
frequency_window=1, n_freq_bins=2**9, sigma_L=None):
"""
plot the spectrogram of the data using the S-method
Arguments:
-----------
**s_window** : int (should be odd)
length of window for S-method calculation, higher numbers tend
toward WVD
**time_window** : int (should be power of 2)
window length for each time step
*default* is 2**8 = 256
**frequency_window** : int (should be odd)
length of smoothing window along frequency plane
**time_step** : int
number of sample between short windows
*default* is 2**7 = 128
**sigmaL** : float
full width half max of gaussian window for L
**n_freq_bins** : int
(should be power of 2 and equal or larger than nh)
number of frequency bins
Returns:
---------
**ptf** : mtpy.imaging.plotspectrogram.PlotTF object
"""
time_series = self.convert_counts()
kwargs = {'nh':time_window, 'tstep':time_step, 'L':s_window,
'ng':frequency_window, 'df':self.df, 'nfbins':n_freq_bins,
'sigmaL': sigma_L}
ptf = plotspectrogram.PlotTF(time_series, **kwargs)
return ptf
#==================================================
def plot_spectra(self, fig_num=2):
"""
plot the spectra of time series
"""
if self.time_series is None:
self.read_3d()
time_series = self.convert_counts()
spect = np.fft.fft(mtfilt.zero_pad(time_series))
plot_freq = np.fft.fftfreq(spect.shape[0], 1./self.df)
fig = plt.figure(fig_num, [4,4], dpi=200)
ax = fig.add_subplot(1,1,1)
ax.loglog(plot_freq, abs(spect)**2, lw=.5)
ax.grid(which='both', lw=.25)
ax.set_xlabel('Frequency (Hz)')
#ax.set_xlim(1./plot_freq.max(), 1./plot_freq.min())
ax.set_ylabel('Amplitude')
plt.show()
return fig, ax
#==============================================================================
# for older Z3d files
#==============================================================================
class Zen3D_old(object):
"""
Deal with the raw data output from the Zen box as Z3D files, which is in
a formatted binary file with GPS time stamps every second. Each time
stamp contains information about position, GPS lock, temperature and a
few other things. The stamp is 32 bytes long.
The read_file makes sure that there is the correct number of points
between time stamps, and the time stamp is correct. The data read from
the file begins on the first coherent time stamp and ends on the last one.
Usually the first coherent time stamp is a few seconds after scheduled
start time.
Arguments:
------------
**fn**: string
full path to .Z3D file to be manipulated
====================== ====================================================
Methods Description
====================== ====================================================
read_3d read 3D file making sure all the time stamps are
correctly spaced. Returned time series starts at
the first stamp which has the correct amount of data
points between it and the next time stamp. Note
there are usually a few seconds at the end and maybe
beginning that aren't correct because the internal
computer is busy switchin sampling rate.
get_gps_stamp_location locates the gps stamp location
get_gps_time converts the gps counts to relative epoch seconds
according to gps week.
get_date_time converts gps seconds into the actual date and time
in UTC. Note this is different than GPS time which
is how the zen is scheduled, so the time will be
off by the current amount of leap seconds.
====================== ====================================================
=================== =======================================================
Attributes Description
=================== =======================================================
ch_adcard_sn serial number of a/d card in channel
ch_cmp MT component of channel
ch_length distance between electrodes for channel,
doesn't matter for magnetic channels
ch_number number of channel
date_time np.ndarray of date,time of gps stamps
df sampling rate
fn full path to file name read in
gps_diff difference between gps time stamps
gps_list list of gps stamps
gps_time np.ndarray of gps times from time stamps
gps_week gps week
header_dict dictionary of header parameters
log_lines list of information to write into a log file later
meta_dict dictionary of meta data parameters
rx_stn name of station
start_time starting time and date of first time stamp with
correct number of samples
temperature np.ndarray of temperature measurements at each time
stamp
time_series np.ndarray of time series data in counts
tx_id name of transmitter if used
units [ 'counts' | 'mv' ] units of time series *default* is
counts. Plotting will convert to mV.
verbose [ True | False ] for printing information to the
interpreter
_data_type np.dtype to convert binary formatted string
_data_types list of data types in binary formatted string
_gps_epoch gps_epoch in time.gmtime format.
_gps_stamp string of gps_stamp
_header_len length of header string in bytes. (512)
_meta_len length of meta data in bytes. (512)
_raw_data data in binary format
_seconds_diff difference in seconds from start time to look for
gps stamp. *default* is 5
_stamp_len length of gps time stamp in bits
_stamp_list list of gps time stamp variables
_week_len length of a gps week in seconds
=================== =======================================================
"""
def __init__(self, fn=None, **kwargs):
self.fn = fn
self._header_len = kwargs.pop('header_len', 512)
self._meta_len = kwargs.pop('meta_len', 512)
self._stamp_len = kwargs.pop('stamp_len', 36)
self._gps_stamp = kwargs.pop('gps_stamp', '\xff\xff\xff\xff')
self._stamp_list = ['gps', 'time', 'lat', 'lon', 'status',
'gps_accuracy', 'temperature']
self._data_types = [np.int32, np.int32, np.float64, np.float64,
np.uint32, np.int32, np.float32]
self._data_type = np.dtype([(st, dt) for st, dt in
zip(self._stamp_list, self._data_types)])
self._week_len = 604800
self._gps_epoch = (1980, 1, 6, 0, 0, 0, -1, -1, 0)
self._leap_seconds = 16
#seconds different between scheduling time and actual collection time
self._seconds_diff = 5
self.log_lines = []
self.verbose = True
self._skip_sample_tolerance = 5
self.sample_diff_list = []
self.counts_to_mv_conversion = 9.5367431640625e-10
self.units = 'counts'
self.gps_week = 1740
self.time_series = None
self.date_time = None
self.header_dict = None
self.df = None
self.gain = None
self.gps_week = None
self.schedule_date = None
self.schedule_time = None
self.start_dt = None
self.start_time = None
self.start_date = None
self.ch_adcard_sn = None
self.meta_dict = None
self.ch_number = None
self.ch_cmp = None
self.ch_length = None
self.rx_stn = None
self.tx_id = None
self.gps_diff = None
self.gps_time = None
self.gps_list = None
self.temperature = None
self.lat = None
self.lon = None
#==================================================
def read_header(self, header_string):
"""
read header information and fill attribute:
* header_dict --> dictionary of head information
* df --> sampling frequency in Hz
* gain --> gain from within Zen box for that channel
* gps_week --> current gps week
* schedule_time --> schedule start time
* schedule_date --> schedule start date
* start_dt --> schedule start date and time
* ch_adcard_sn --> a/d card serial number
**Note:** there are different versions of the header keywords from
different generations of the Zen firmware.
"""
#----read in header information----------------------------------------
header_list = header_string.replace('\n', ',').split(',')
header_dict = {}
for hh in header_list:
if hh != '' and hh.find('builddate') == -1:
hkv = hh.split(':')
if len(hkv) == 2:
if hkv[0].lower() == 'period' or \
hkv[0].lower() == 'duty':
try:
header_dict[hkv[0].strip().lower()] +=\
hkv[1].strip()
except KeyError:
header_dict[hkv[0].strip().lower()] =\
hkv[1].strip()
else:
header_dict[hkv[0].strip().lower()] = hkv[1].strip()
elif len(hkv) == 3:
header_dict['start_time'] = hh.strip()
else:
pass
elif hh == '':
pass
else:
hline = hh.split(';')
for ll in hline:
if ll.find('builddate') > 0:
hlist = ll.split('&')
for kk in hlist:
klist = kk.split(':')
header_dict[klist[0].strip().lower()] = klist[1].strip()
else:
hlist = ll.split(':')
try:
header_dict[hlist[0].strip().lower()] = hlist[1].strip()
except IndexError:
pass
#make attributes that will be useful latter
self.header_dict = header_dict
self.df = float(header_dict['a/d rate'])
self.gain = float(header_dict['a/d gain'])
self.gps_week = int(header_dict['gpsweek'])
try:
self.schedule_date = header_dict['schedule for this file']
except KeyError:
self.schedule_date = header_dict['schedule']
self.schedule_time = header_dict['start_time']
#get the start date/time in UTC time
self.start_dt = self.compute_schedule_start(self.schedule_date,
self.schedule_time)
self.start_time = self.schedule_time
self.start_date = self.schedule_date
#--> get serial number of a/d board
try:
self.ch_adcard_sn = header_dict['serial']
except KeyError:
self.ch_adcard_sn = header_dict['brd339 serial']
#==================================================
def read_metadata(self, meta_data_string):
"""
read in meta data and make important information attributes
Fills attributes:
* meta_dict --> dictionary of metadata
* ch_number --> channel number
* ch_cmp --> channel component
* ch_length --> length of dipole
* rx_stn --> station name (can only be an integer)
* tx.id --> name of transmitter if used
"""
meta_list = meta_data_string.replace('\n','|').split('|')
meta_dict = {}
for mm in meta_list:
mlist = mm.split(',')
if len(mlist) == 2:
meta_dict[mlist[0].strip().lower()] = mlist[1].strip().lower()
else:
pass
self.meta_dict = meta_dict
self.ch_number = meta_dict['ch.number']
self.ch_cmp = meta_dict['ch.cmp'].replace('b','h')
self.ch_length = meta_dict['ch.varasp']
self.rx_stn = meta_dict['rx.stn']
self.tx_id = meta_dict['tx.id']
#==================================================
def get_info(self):
"""
read header and meta data
"""
#beginning index of data blocks
ds = self._header_len+self._meta_len
#read in as a binary file.
rfid = open(self.fn, 'rb')
raw_data = rfid.read(ds+4)
self._raw_data = raw_data
rfid.close()
if len(raw_data) < ds:
print 'Data file is not complete cannot read header information'
return
try:
self.log_lines[0] != '-'*72+'\n'
except IndexError:
self.log_lines.append('-'*72+'\n')
self.log_lines.append('--> Reading File: {0}\n'.format(self.fn))
#----read in header information----------------------------------------
header_string = raw_data[0:self._header_len]
self.read_header(header_string)
print('-'*40)
print(' ad card sn = {0}'.format(self.ch_adcard_sn))
print(' sampling rate = {0:.0f}'.format(self.df))
print(' gain = {0:.1f}'.format(self.gain))
print(' gps_week = {0:.0f}'.format(self.gps_week))
print(' schedule date = {0}'.format(self.schedule_date))
print(' schedule time = {0}'.format(self.schedule_time))
#---read in meta raw_data----------------------------------------------
meta_string = raw_data[self._header_len-1:ds]
self.read_metadata(meta_string)
print(' channel no = {0}'.format(self.ch_number))
print(' channel comp = {0}'.format(self.ch_cmp))
print(' channel len = {0}'.format(self.ch_length))
print(' rx station = {0}'.format(self.rx_stn))
print(' tx id = {0}'.format(self.tx_id))
print('-'*40)
#==================================================
def read_3d(self):
"""
read in the time series and gps time stamps.
Makes sure that the number of samples between each time stamp is
the sampling rate. If it is not an error is raised if the difference
is more than _skip_sample_tolerance.
Creates a time series that starts at the time where the first gps
time stamp has the correct number of points, and stops where the first
incorrect number of points occurs. A corresponding time,date array
is created.
"""
#read in as a binary file.
raw_data = open(self.fn, 'rb').read()
self._raw_data = raw_data
try:
self.log_lines[0] != '-'*72+'\n'
except IndexError:
self.log_lines.append('-'*72+'\n')
self.log_lines.append('--> Reading File: {0}\n'.format(self.fn))
#number of bytes in the file
num_bytes = len(raw_data)
#beginning index of data blocks
ds = self._header_len+self._meta_len
#----read in header information----------------------------------------
header_string = raw_data[0:self._header_len]
self.read_header(header_string)
#---read in meta raw_data----------------------------------------------
meta_string = raw_data[self._header_len-1:ds]
self.read_metadata(meta_string)
#---read in gps raw_data-----------------------------------------------
#sampling rate times 4 bytes for 32 bit measurement
df = int(self.df)
dt = df*4
#length of data block plus gps stamp
block_len = self._stamp_len+dt
#number of data blocks
num_blocks = int(np.ceil(num_bytes/float(block_len)))
#get position of gps stamps
gps_list = np.zeros(num_blocks, dtype=np.int)
gps_dict = dict([(key, np.zeros(num_blocks, dtype=dtp))
for key, dtp in zip(self._stamp_list,
self._data_types)])
#make the time array floats instead of ints so can get the decimal
#place if it isn't 0.
gps_dict['time'] = gps_dict['time'].astype(np.float32)
#get gps information from the data
#get first time stamp that matches the starting time
s1 = 0
gps_list[0] = self.get_gps_stamp_location()
gps_info = np.fromstring(raw_data[gps_list[0]:gps_list[0]+self._stamp_len],
dtype=self._data_type)
gps_info['time'] = gps_info['time'].astype(np.float32)
gps_info['time'] = self.get_gps_time(gps_info['time'])[0]
start_test = self.get_date_time(self.gps_week, gps_info['time'])
#--> test to make sure the first time corresponds to the scheduled
#start time
time_stop = 0
while start_test != self.start_dt and s1 <= self._seconds_diff and \
time_stop <= self._seconds_diff:
s1 += 1
gps_list[0] = self.get_gps_stamp_location(gps_list[0]+7)
gps_info = np.fromstring(raw_data[gps_list[0]:gps_list[0]+\
self._stamp_len],
dtype=self._data_type)
gps_info['time'] = gps_info['time'].astype(np.float32)
gps_info['time'], gps_dweek = self.get_gps_time(gps_info['time'])
start_test = self.get_date_time(self.gps_week+gps_dweek,
gps_info['time'])
if s1 == self._seconds_diff:
s1 = 0
self.start_dt = self.start_dt[:-2]+\
'{0:02}'.format(int(self.start_dt[-2:])+1)
gps_list[0] = self.get_gps_stamp_location()
time_stop += 1
#----Raise an error if the first gps stamp is more than allowed time
# difference.
if time_stop >= self._seconds_diff:
print ('GPS start time is more than '+\
'{0} '.format(self._seconds_diff)+\
'seconds different than scheduled start time of '+\
'{0}. \n '.format(self.start_dt)+\
'Estimated start time is {0} +/- {1} sec'.format(
start_test, self._seconds_diff))
#put the information into the correct arrays via dictionary
for jj, key in enumerate(self._stamp_list):
gps_dict[key][0] = gps_info[0][jj]
#find the next time stamp
for ii in range(s1,num_blocks-1):
sfind = self.get_gps_stamp_location(gps_list[ii-1]+7)
#make sure it isn't the same time stamp as before
if sfind != gps_list[ii-1] and sfind != -1:
gps_info, gps_index, gps_week = self.get_gps_stamp(sfind)
gps_list[ii] = gps_index
if gps_info is not None:
for jj, key in enumerate(self._stamp_list):
gps_dict[key][ii] = gps_info[0][jj]
#get only the values that are non zero
gps_dict['time'] = gps_dict['time'][np.nonzero(gps_dict['time'])]
num_samples = len(gps_dict['time'])
#calculate the difference between time stamps
gps_diff = np.array([gps_dict['time'][ii+1]-gps_dict['time'][ii]
for ii in range(num_samples-1)])
#check for any spots where gps was not locked or mised a sampling interval
bad_lock = np.where(gps_diff[np.nonzero(gps_diff)] != 1.0)[0]
if len(bad_lock) > 0:
for bb in bad_lock:
if gps_diff[bb] > 5:
self.log_lines.append(' '*4+\
'point {0:^15},'.format(gps_list[bb])+\
'gps diff {0:^15}\n'.format(gps_diff[bb]))
self.log_lines.append(' '*4+'*'*52+'\n')
#need to be sure that the number of data points between time stamps is
#equal to the sampling rate, if it is not then remove that interval.
#Most likely it is at the beginning or end of time series.
dsamples = np.array([(gps_list[nn+1]-gps_list[nn]-self._stamp_len-df*4)/4
for nn in range(num_samples)])
bad_interval = np.where(abs(dsamples)>self._skip_sample_tolerance)[0]
bmin = 0
bmax = num_samples
if len(bad_interval) > 0:
#need to locate the bad interval numbers
for bb in bad_interval:
if bb <= 10:
bmin = bb+1
if bb > num_samples-10:
bmax = bb
gps_list = gps_list[bmin:bmax]
num_samples = len(gps_list)
if self.verbose:
print 'Found {0} gps time stamps, '.format(num_samples)+\
'with equal intervals of {0} samples'.format(int(self.df))
self.log_lines.append(' '*4+\
'Found {0} gps time stamps, '.format(num_samples)+\
'with equal intervals of {0} samples\n'.format(int(self.df)))
#read in data
data_array = np.zeros((num_samples+1)*df, dtype=np.float32)
for ll, kk in enumerate(gps_list[0:-1]):
pdiff = ((gps_list[ll+1]-(kk+self._stamp_len))-(df*4))/4
self.sample_diff_list.append(pdiff)
dblock = raw_data[kk+self._stamp_len:gps_list[ll+1]]
try:
data_array[ll*df:(ll+1)*df+pdiff] = np.fromstring(dblock,
dtype=np.int32)
except ValueError:
print 'samples between time step {0} is off by {1} samples'.format(ll,
abs(pdiff))
if sum(self.sample_diff_list) != 0:
if self.verbose:
print 'time series is off by {0} seconds'.format(
float(sum(self.sample_diff_list))/df)
self.log_lines.append('time series is off by {0} seconds'.format(
float(sum(self.sample_diff_list))/df))
#get only the non-zero data bits, this is dangerous if there is
#actually an exact 0 in the data, but rarely happens
self.time_series = data_array[np.nonzero(data_array)]
#need to cut all the data arrays to have the same length and corresponding
#data points
for key in gps_dict.keys():
gps_dict[key] = gps_dict[key][bmin:bmax]
#make attributes of imporant information
self.gps_diff = gps_diff[bmin:bmax]
self.gps_time = gps_dict['time']
self.gps_list = gps_list
self.temperature = gps_dict['temperature']
self.lat = gps_dict['lat']
self.lon = gps_dict['lon']
self.date_time = np.zeros_like(gps_dict['time'], dtype='|S24')
for gg, gtime in enumerate(gps_dict['time']):
self.date_time[gg]= self.get_date_time(self.gps_week, gtime)
try:
self.start_dt = self.date_time[0]
self.start_date = self.date_time[0].split(',')[0]
self.start_time = self.date_time[0].split(',')[1]
if self.verbose:
print 'Starting time of time series is '+\
'{0} UTC'.format(self.date_time[0])
self.log_lines.append(' '*4+'Starting time of time series is '+\
'{0} UTC\n'.format(self.date_time[0]))
except IndexError:
print 'No quality data was collected'
self.log_lines.append(' '*4+'No quality data was collected\n')
self.start_dt = None
self.start_date = None
self.start_time = None
if self.units == 'mv':
self.time_series = self.convert_counts()
#==================================================
def convert_counts(self):
"""
convert the time series from counts to millivolts
"""
return self.time_series*self.counts_to_mv_conversion
#==================================================
def convert_mV(self):
"""
convert millivolts to counts assuming no other scaling has been applied
"""
return self.time_series/self.counts_to_mv_conversion
#==================================================
def compute_schedule_start(self, start_date, start_time,
leap_seconds=None):
"""
compute the GMT time for scheduling from start time of the gps
according to the leap seconds.
Arguments:
-----------
**start_date**: YYYY-MM-DD
schedule start date
**start_time**: hh:mm:ss
time of schedule start on a 24 hour basis
**leap_seconds**: int
number of seconds that GPS is off from UTC time.
as of 2013 GPS is ahead by 16 seconds.
Returns:
--------
**ndate_time**: YYYY-MM-DD,hh:mm:ss
calibrated date and time in UTC time.
"""
month_dict = {1:31, 2:28, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30,
10:31, 11:30, 12:31}
if leap_seconds is not None:
self._leap_seconds = leap_seconds
year, month, day = start_date.split('-')
hour, minutes, seconds = start_time.split(':')
new_year = int(year)
new_month = int(month)
new_day = int(day)
new_hour = int(hour)
new_minutes = int(minutes)
new_seconds = int(seconds)-self._leap_seconds
if new_seconds < 0:
new_seconds = (int(seconds)-self._leap_seconds)%60
new_minutes = int(minutes)-1
if new_minutes < 0:
new_minutes = (int(minutes)-1)%60
new_hour = int(hour)-1
if new_hour < 0:
new_hour = (int(hour)-1)%24
new_day = int(day)-1
if new_day <= 0:
new_day = (int(day)-1)%30
new_month = int(month)-1
if new_month <= 0:
new_month = (12-new_month)
new_day = month_dict[new_month]-int(day)+1
print 'need to check date, have not implemented '+\
'leap years yet'
ndate_time = time.strftime(datetime_fmt ,
(new_year,
new_month,
new_day,
new_hour,
new_minutes,
new_seconds, 0, 0, 0))
return ndate_time
#==================================================
def get_gps_stamp_location(self, start_index=None):
"""
get the location in the data file where there is a gps stamp. Makes
sure that the time stamp is what it should be.
Arguments:
-----------
**start_index**: int
starting point to look for the time stamp within
the file.
Returns:
---------
**gps_index**: int
the index in the file where the start of the
time stamp is.
"""
gps_index = self._raw_data.find(self._gps_stamp, start_index)
if self._raw_data[gps_index+4] == '\xff':
gps_index += 1
if self._raw_data[gps_index+4] == '\xff':
gps_index += 1
if self._raw_data[gps_index+4] == '\xff':
gps_index += 1
if self._raw_data[gps_index+4] == '\xff':
gps_index += 1
return gps_index
#==================================================
def get_gps_stamp(self, gps_index):
"""
get the gps stamp data
"""
#get numbers from binary format
try:
gps_info = np.fromstring(self._raw_data[gps_index:gps_index+self._stamp_len],
dtype=self._data_type)
while gps_info['time'] < 0:
gps_index = self.get_gps_stamp_location(start_index=gps_index+7)
print 'time ', gps_index
gps_info = np.fromstring(self._raw_data[gps_index:gps_index+self._stamp_len],
dtype=self._data_type)
while gps_info['status'] < 0:
gps_index = self.get_gps_stamp_location(start_index=gps_index+7)
print 'status ', gps_index
gps_info = np.fromstring(self._raw_data[gps_index:gps_index+self._stamp_len],
dtype=self._data_type)
while abs(gps_info['temperature']) > 80:
gps_index = self.get_gps_stamp_location(start_index=gps_index+7)
print 'temperature ', gps_index
gps_info = np.fromstring(self._raw_data[gps_index:gps_index+self._stamp_len],
dtype=self._data_type)
while abs(gps_info['lat']) > np.pi:
gps_index = self.get_gps_stamp_location(start_index=gps_index+7)
print 'lat ', gps_index
gps_info = np.fromstring(self._raw_data[gps_index:gps_index+self._stamp_len],
dtype=self._data_type)
#convert lat and lon into decimal degrees
gps_info['lat'] = self.get_degrees(gps_info['lat'])
gps_info['lon'] = self.get_degrees(gps_info['lon'])
gps_info['time'] = gps_info['time'].astype(np.float32)
gps_info['time'], gps_week = self.get_gps_time(gps_info['time'])
if gps_info == []:
print gps_index
raise ZenGPSError('Something is fucked')
if gps_index == -1:
print gps_info
raise ZenGPSError('Something is fucked')
return gps_info, gps_index, gps_week
except ValueError:
print 'Ran into end of file, gps stamp not complete.'+\
' Only {0} points.'.format(len(self._raw_data[gps_index:]))
return None, gps_index, 0
#==================================================
def get_gps_time(self, gps_int, gps_week=0):
"""
from the gps integer get the time in seconds.
Arguments:
----------
**gps_int**: int
integer from the gps time stamp line
**gps_week**: int
relative gps week, if the number of seconds is
larger than a week then a week is subtracted from
the seconds and computed from gps_week += 1
Returns:
---------
**gps_time**: int
number of seconds from the beginning of the relative
gps week.
"""
gps_seconds = gps_int/1024.
gps_ms = (gps_seconds-np.floor(gps_int/1024.))*(1.024)
cc = 0
if gps_seconds > self._week_len:
gps_week += 1
cc = gps_week*self._week_len
gps_seconds -= self._week_len
gps_time = np.floor(gps_seconds)+gps_ms+cc
return gps_time, gps_week
#==================================================
def get_date_time(self, gps_week, gps_time):
"""
get the actual date and time of measurement as UTC.
Note that GPS time is curently off by 16 seconds from actual UTC time.
Arguments:
----------
**gps_week**: int
integer value of gps_week that the data was collected
**gps_time**: int
number of seconds from beginning of gps_week
**leap_seconds**: int
number of seconds gps time is off from UTC time.
It is currently off by 16 seconds.
Returns:
--------
**date_time**: YYYY-MM-DD,HH:MM:SS
formated date and time from gps seconds.
"""
mseconds = gps_time % 1
#make epoch in seconds, mktime computes local time, need to subtract
#time zone to get UTC
epoch_seconds = time.mktime(self._gps_epoch)-time.timezone
#gps time is 14 seconds ahead of GTC time, but I think that the zen
#receiver accounts for that so we will leave leap seconds to be 0
gps_seconds = epoch_seconds+(gps_week*self._week_len)+gps_time-\
self._leap_seconds
#compute date and time from seconds
(year, month, day, hour, minutes, seconds, dow, jday, dls) = \
time.gmtime(gps_seconds)
date_time = time.strftime(datetime_fmt ,(year,
month,
day,
hour,
minutes,
int(seconds+mseconds),
0, 0, 0))
return date_time
#==================================================
def get_degrees(self, radian_value):
"""
convert lat or lon into decimal degrees
"""
degrees = radian_value*180/np.pi
return degrees
#==================================================
def apply_adaptive_notch_filter(self, notch_dict):
"""
apply notch filter to the data that finds the peak around each
frequency.
see mtpy.processing.filter.adaptive_notch_filter
"""
try:
self.time_series
except AttributeError:
self.read_3d()
notches = notch_dict.pop('notches', list(np.arange(60,2048,60)))
notchradius = notch_dict.pop('notchradius', 0.5)
freqrad = notch_dict.pop('freqrad', 0.5)
rp = notch_dict.pop('rp', 0.1)
kwargs = {'df':self.df, 'notches':notches, 'notchradius':notchradius,
'freqrad':freqrad, 'rp':rp}
self.time_series, self.filt_list = \
mtfilt.adaptive_notch_filter(self.time_series, **kwargs)
#==================================================
def write_ascii_mt_file(self, save_fn=None, save_station='mb', fmt='%.8e',
ex=1, ey=1, notch_dict=None):
"""
write an mtpy time series data file
Arguments:
-----------
**save_fn** : full path to save file, if None file is saved as:
station_YYYYMMDD_hhmmss_df.component
ex. mt01_20130206_120000_256.HX
**save_station** : string
prefix string to add to station number as only
integers can be input into metadata of the zen
boxes. ex. mb001
**fmt** : string format
format of data numbers output to ascii file.
*default* is '%.8e' for 8 significan figures in
scientific notation.
Output:
--------
**fn_mt_ascii** : full path to saved file
"""
try:
self.start_date
except AttributeError:
self.read_3d()
time_series = self.convert_counts()
if save_fn is None:
svfn_directory = os.path.join(os.path.dirname(self.fn), 'TS')
if not os.path.exists(svfn_directory):
os.mkdir(svfn_directory)
svfn_date = ''.join(self.start_date.split('-'))
svfn_time = ''.join(self.start_time.split(':'))
svfn_station = save_station+self.rx_stn
save_fn = os.path.join(svfn_directory,
'{0}_{1}_{2}_{3}.{4}'.format(svfn_station,
svfn_date,
svfn_time,
int(self.df),
self.ch_cmp.upper()))
#calibrate electric channels
if self.ch_cmp == 'ex':
time_series /= ex
elif self.ch_cmp == 'ey':
time_series /= ey
#apply notch filter if desired
if notch_dict is not None:
self.apply_adaptive_notch_filter(notch_dict)
print 'Filtered notches: '
for nfilt in self.filt_list:
if type(nfilt[0]) != str:
print '{0}{1:.2f} Hz'.format(' '*4, nfilt[0])
header_tuple = (save_station+self.rx_stn,
self.ch_cmp,
self.df,
time.mktime(time.strptime(self.start_dt,
datetime_fmt )),
time_series.shape[0],
'mV',
np.median(self.lat),
np.median(self.lon),
0.0,
time_series)
self.fn_mt_ascii = mtfh.write_ts_file_from_tuple(save_fn, header_tuple,
fmt=fmt)
print 'Wrote mtpy timeseries file to {0}'.format(self.fn_mt_ascii)
#==================================================
def write_mseed_mt_file(self, save_fn=None, save_station='mb',
location='Mono Basin', network='USGS'):
"""
write a miniseed file, note need to have Obspy installed. This proves
to be difficult under windows
Arguments:
----------
**save_fn** : full path to file to save data to
**save_station** : string
prefix to add onto station name
**location** : string
description of where the data was collected
**network** : string
network or company that collected the data
Outputs:
--------
**save_fn** : string
full path to file where data was saved.
"""
try:
self.start_date
except AttributeError:
self.read_3d()
time_series = self.convert_counts()
svfn_date = ''.join(self.start_date.split('-'))
svfn_time = ''.join(self.start_time.split(':'))
svfn_station = save_station+self.rx_stn
svfn_chn = self.ch_cmp.upper()
delta_t = 1./self.df
t0 = self.start_dt.replace(',','T')
if save_fn is None:
save_fn = os.path.join(os.path.dirname(self.fn),
'{0}_{1}_{2}_{3}_{4}.mseed'.format(svfn_station,
svfn_date,
svfn_time,
int(self.df),
svfn_chn))
self.fn_mt_mseed = mtmseed.writefile_obspy_singletrace(save_fn,
svfn_station,
svfn_chn,
network,
location,
delta_t,
t0,
time_series)
return save_fn
#==================================================
def plot_time_series(self, fig_num=1):
"""
plots the time series
"""
time_series = self.convert_counts()
fig = plt.figure(fig_num, dpi=300)
ax = fig.add_subplot(1,1,1)
ax.plot(time_series)
#ax.xaxis.set_minor_locator(MultipleLocator(self.df))
#ax.xaxis.set_major_locator(MultipleLocator(self.df*15))
#ax.xaxis.set_ticklabels([self.date_time[ii]
# for ii in range(0,len(self.date_time), 15)])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Amplitude (mV)')
plt.show()
self.convert_mV()
return fig, ax
#==================================================
def plot_spectrogram(self, time_window=2**8, time_step=2**6, s_window=11,
frequency_window=1, n_freq_bins=2**9, sigma_L=None):
"""
plot the spectrogram of the data using the S-method
Arguments:
-----------
**s_window** : int (should be odd)
length of window for S-method calculation, higher numbers tend
toward WVD
**time_window** : int (should be power of 2)
window length for each time step
*default* is 2**8 = 256
**frequency_window** : int (should be odd)
length of smoothing window along frequency plane
**time_step** : int
number of sample between short windows
*default* is 2**7 = 128
**sigmaL** : float
full width half max of gaussian window for L
**n_freq_bins** : int
(should be power of 2 and equal or larger than nh)
number of frequency bins
Returns:
---------
**ptf** : mtpy.imaging.plotspectrogram.PlotTF object
"""
time_series = self.convert_counts()
kwargs = {'nh':time_window, 'tstep':time_step, 'L':s_window,
'ng':frequency_window, 'df':self.df, 'nfbins':n_freq_bins,
'sigmaL': sigma_L}
ptf = plotspectrogram.PlotTF(time_series, **kwargs)
return ptf
#==================================================
def plot_spectra(self, fig_num=2):
"""
plot the spectra of time series
"""
if self.time_series is None:
self.read_3d()
time_series = self.convert_counts()
spect = np.fft.fft(mtfilt.zero_pad(time_series))
plot_freq = np.fft.fftfreq(spect.shape[0], 1./self.df)
fig = plt.figure(fig_num, [4,4], dpi=200)
ax = fig.add_subplot(1,1,1)
ax.loglog(plot_freq, abs(spect)**2, lw=.5)
ax.grid(which='both', lw=.25)
ax.set_xlabel('Frequency (Hz)')
#ax.set_xlim(1./plot_freq.max(), 1./plot_freq.min())
ax.set_ylabel('Amplitude')
plt.show()
return fig, ax
#==============================================================================
# Cache files
#==============================================================================
class Cache_Metadata(object):
def __init__(self, fn=None, **kwargs):
self.fn = fn
self.ch_adcardsn = None
self.ch_azimuth = None
self.ch_cmp = None
self.ch_cres = None
self.ch_factor = None
self.ch_gain = None
self.ch_gainfactor = None
self.ch_gdpslot = None
self.ch_length = None
self.ch_lowpass = None
self.ch_number = None
self.ch_numon = None
self.data_version = None
self.gdp_cardtype = None
self.gdp_date = None
self.gdp_operator = None
self.gdp_progver = None
self.gdp_time = None
self.gdp_type = None
self.gps_alt = None
self.gps_lat = None
self.gps_lon = None
self.gps_numsat = None
self.gps_sec = None
self.gps_utmzone = None
self.gps_week = None
self.header_type = None
self.job_by = None
self.job_for = None
self.job_name = None
self.job_number = None
self.line_name = None
self.rx_aspace = None
self.rx_sspace = None
self.rx_utm0 = None
self.rx_utm1 = None
self.rx_utm2 = None
self.rx_xyz0 = None
self.rx_xyz1 = None
self.rx_xyz2 = None
self.survey_acqmethod = None
self.survey_type = None
self.ts_adfreq = None
self.ts_npnt = None
self.unit_length = None
self.station_number = None
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def read_meta_string(self, meta_string=None):
"""
read in a meta from the raw string
"""
if meta_string is not None:
self._meta_string = meta_string
meta_list = self._meta_string.split('\n')
for m_str in meta_list:
line_list = m_str.strip().split(',')
l_key = line_list[0].replace('.', '_').lower()
l_value = line_list[1:]
if len(l_value) == 1:
try:
l_value = float(l_value[0])
except ValueError:
l_value = l_value[0]
setattr(self, l_key, l_value)
self._get_station_number()
def _get_station_number(self):
"""
get station name from metadata from all versions of .cac files
"""
try:
self.station_number = str(int(self.rx_stn))
except AttributeError:
try:
self.station_number = self.rx_xyz0.split(':')[0]
except AttributeError:
print ('Could not find station number in rx.stn or rx.xyz0'
' setting station_number to 000')
class Board_Calibration(object):
"""
deal with baord calibration
"""
def __init__(self, board_cal_str=None, **kwargs):
self.board_cal_str = board_cal_str
self.cal_sys = {}
self.cal_ant = {}
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def read_board_cal_str(self, board_cal_str=None):
"""
read board calibration data
"""
if board_cal_str is not None:
self.board_cal_str = board_cal_str
cal_list = self.board_cal_str.split('\n')
for c_str in cal_list:
c_list = c_str.split(',')
c_key = c_list[0].replace('.', '_').lower()
if len(c_list) == 2:
c_value = c_list[1]
setattr(self, c_key, c_value)
elif len(c_list) > 2:
c_key2 = c_list[1]
c_arr = np.zeros(len(c_list[2:]),
dtype=[('frequency', np.float),
('amplitude', np.float),
('phase', np.float)])
for ii, cc in enumerate(c_list[2:]):
c_arr[ii] = np.array([float(kk) for kk in cc.split(':')])
self.__dict__[c_key][c_key2] = c_arr
class Cache(object):
"""
deal with Zonge .cac files
"""
def __init__(self, fn=None, **kwargs):
self.fn = fn
self.metadata = None
self.time_series = None
self.other = None
self.calibration = None
self._flag_len = 10
self._len_bytes = 4
self._flag_dtype = [('length', np.int32),
('flag', np.int32),
('type', np.int16)]
self._type_dict = {4:'navigation',
514:'metadata',
768:'calibration',
16:'time_series',
15:'other',
640:'status'}
self._f_tell = 0
def _read_file_block(self, file_id):
"""
read a cache block
"""
file_pointer = np.fromstring(file_id.read(self._flag_len),
dtype=self._flag_dtype)
f_str = file_id.read(file_pointer['length']-2)
end_len = np.fromstring(file_id.read(self._len_bytes),
dtype=np.int32)
if self._validate_block_len(file_pointer, end_len) is True:
self._f_tell = file_id.tell()
return file_pointer, f_str
def _validate_block_len(self, file_pointer, end_length):
"""
validate that the block lengths as defined at the beginning and
the end are the same
"""
try:
assert file_pointer['length'] == end_length
return True
except AssertionError:
raise ValueError('File pointer length {0} != end length {1}'.format(
file_pointer['length'], end_length))
def read_cache_metadata(self, fn=None):
"""
read .cac file
"""
if fn is not None:
self.fn = fn
f_pointer = True
with open(self.fn, 'rb') as fid:
while f_pointer:
# read in first pointer
f_pointer, f_str = self._read_file_block(fid)
# if the data type is the meta data
if int(f_pointer['type']) == 514:
meta_obj = Cache_Metadata()
meta_obj.read_meta_string(f_str)
key = self._type_dict[int(f_pointer['type'])]
setattr(self, key, meta_obj)
print 'Read in metadata'
return
def read_cache_file(self, fn=None):
"""
read .cac file
"""
if fn is not None:
self.fn = fn
f_pointer = True
with open(self.fn, 'rb') as fid:
while f_pointer:
# read in first pointer
f_pointer, f_str = self._read_file_block(fid)
# if the data type is the meta data
if int(f_pointer['type']) == 514:
meta_obj = Cache_Metadata()
meta_obj.read_meta_string(f_str)
key = self._type_dict[int(f_pointer['type'])]
setattr(self, key, meta_obj)
print 'Read in metadata'
continue
# if the data type is calibration
elif int(f_pointer['type']) == 768:
cal_obj = Board_Calibration(f_str)
cal_obj.read_board_cal_str()
key = self._type_dict[int(f_pointer['type'])]
setattr(self, key, cal_obj)
print 'Read in calibration'
continue
# if the data type is time series
elif int(f_pointer['type']) == 16:
ts_arr = np.fromstring(f_str, dtype=np.int32)
ts_arr = np.resize(ts_arr, (int(self.metadata.ts_npnt),
len(self.metadata.ch_cmp)))
ts = np.zeros(1,
dtype=[(cc.lower(), np.int32,
(int(self.metadata.ts_npnt),)) for
cc in self.metadata.ch_cmp])
for ii, cc in enumerate(self.metadata.ch_cmp):
ts[cc.lower()][:] = ts_arr[:, ii]
key = self._type_dict[int(f_pointer['type'])]
setattr(self, key, ts)
print 'Read in time series, # points = {0}'.format(
self.metadata.ts_npnt)
return
# if the data type is time series
elif int(f_pointer['type']) == 15:
ts = np.fromstring(f_str, dtype=np.int32)
key = self._type_dict[int(f_pointer['type'])]
setattr(self, key, ts)
print 'Read in other'
continue
#==============================================================================
#
#==============================================================================
class ZenCache(object):
"""
deals with cache files or combined time series files.
This will combine all coincident files into a .cac file for use in the
Zonge processing software. It will start at the first coherent time
stamp and go to the longest coherent time stamp, such that each channel
will have the same start time and end time and same number of data points.
================== ========================================================
Attributes Description
================== ========================================================
cal_data list of calibrations, as is from file
fn_list list of filenames merged together
log_lines list of information to put into a log file
meta_data dictionary of meta data key words and values
nav_data list of navigation data, as is from file
save_fn file to save merged file to
ts np.ndarray(len(ts), num_channels) of time series
verbose [ True | False ] True prints information to console
zt_list list of class: Zen3D objects
_ch_factor scaling factor for the channels, got this from Zonge
_ch_gain gain on channel, not sure of the format
_ch_lowpass_dict dictionary of values for lowpass filter, not sure how
they get the values
_data_type np.dtype of data type for cache block
_flag flag for new data block
_nav_len length of navigation information in bytes
_stamp_len length of new block stamp in bytes
_type_dict dictionary of cache block types, from Zonge.
================== ========================================================
Methods:
---------
* *check_sampling_rate* : makes sure sampling rate is the same for all
files being merged.
* *check_time_series* : makes sure all time series start at the same
time and have the same length.
* *write_cache_file* : writes a cache file for given filenames.
* *read_cache* : reads in a cache file.
:Example: ::
>>> import ZenTools as zen
>>> zc = zen.ZenCache()
>>> # read a cache file
>>> zc.read_cache(fn=r"/home/MT/mt01_20130601_190001_4096.cac")
>>> # write a cache file
>>> import os
>>> file_path = r"/home/MT/Station1"
>>> fn_list = [os.path.join(file_path, fn)
>>> ... for fn in os.listdir(file_path)
>>> ... if fn.find('.Z3D')>0]
>>> zc.write_cache_file(fn_list, r"/home/MT/Station1", station='s1')
>>> Saved File to: /home/MT/Station1/Merged/s1_20130601_190001_4096.cac
"""
def __init__(self):
self.fn_list = None
self.zt_list = None
self.save_fn = None
self._ch_factor = '9.5367431640625e-10'
self._ch_gain = '01-0'
self._ch_lowpass_dict = {'256':'112',
'1024':'576',
'4096':'1792'}
self._flag = -1
self._ts_dtype = np.int32
self._type_dict = {'nav' : 4,
'meta' : 514,
'cal' : 768,
'ts' : 16}
self._data_type = np.dtype([('len',np.int32),
('flag', np.int32),
('input_type', np.int16)])
self._stamp_len = 10
self._nav_len = 43
self.nav_data = None
self.cal_data = None
self.ts = None
self.verbose = True
self.log_lines = []
self.chn_order = ['hx','hy','hz','ex','ey']
self.meta_data = {'SURVEY.ACQMETHOD' : ',timeseries',
'SURVEY.TYPE' : ',',
'LENGTH.UNITS' : ',m',
'DATA.DATE0' : '',
'DATA.TIME0' : '',
'TS.ADFREQ' : '',
'TS.NPNT': '',
'CH.NUNOM' : ',',
'CH.FACTOR' : '',
'CH.GAIN' : '',
'CH.NUMBER' : '',
'CH.CMP' : '',
'CH.LENGTH' : '',
'CH.EXTGAIN' : '',
'CH.NOTCH' : '',
'CH.HIGHPASS' : '',
'CH.LOWPASS' : '',
'CH.ADCARDSN' : '',
'CH.STATUS' : ',',
'CH.SP' : ',',
'CH.GDPSLOT' : ',',
'RX.STN' : '',
'RX.AZIMUTH' : ',',
'LINE.NAME' : ',',
'LINE.NUMBER' : ',',
'LINE.DIRECTION' : ',',
'LINE.SPREAD' : ',',
'JOB.NAME' : ',',
'JOB.FOR' : ',',
'JOB.BY' : ',',
'JOB.NUMBER' : ',',
'GDP.File' : ',',
'GDP.SN' : ',',
'GDP.TCARDSN' : ',',
'GDP.NUMCARD' : ',',
'GDP.ADCARDSN' : ',',
'GDP.ADCARDSND' : ',',
'GDP.CARDTYPE' : ',',
'GDP.BAT' : ',',
'GDP.TEMP' : ',',
'GDP.HUMID' : ',',
'TS.NCYCLE' : ',',
'TS.NWAVEFORM' : ',',
'TS.DECFAC' : ',',
'TX.SN,NONE' : ',',
'TX.STN' : ',',
'TX.FREQ' : ',',
'TX.DUTY' : ',',
'TX.AMP' : ',',
'TX.SHUNT' : ','}
#==================================================
def check_sampling_rate(self, zt_list):
"""
check to make sure the sampling rate is the same for all channels
Arguments:
-----------
**zt_list** : list of Zen3D instances
Outputs:
--------
**None** : raises an error if sampling rates are not all the same
"""
nz = len(zt_list)
df_list = np.zeros(nz)
for ii, zt in enumerate(zt_list):
df_list[ii] = zt.df
tf_array = np.zeros((nz, nz))
for jj in range(nz):
tf_array[jj] = np.in1d(df_list, [df_list[jj]])
false_test = np.where(tf_array==False)
if len(false_test[0]) != 0:
raise IOError('Sampling rates are not the same for all channels '+\
'Check file(s)'+zt_list[false_test[0]])
#==================================================
def check_time_series(self, zt_list, decimate=1):
"""
check to make sure timeseries line up with eachother.
"""
n_fn = len(zt_list)
#test start time
#st_list = np.array([int(zt.date_time[0][-2:]) for zt in zt_list])
st_list = np.array([int(zt.gps_time[0]) for zt in zt_list])
time_max = max(st_list)
#time_max = np.where(st_list==st_list.max())[0]
#get the number of seconds each time series is off by
skip_dict = {}
for ii, zt in enumerate(list(zt_list)):
try:
skip_dict[ii] = np.where(zt.gps_time==time_max)[0][0]
except IndexError:
zt_list.remove(zt_list[ii])
print '***SKIPPING {0} '.format(zt.fn)
print ' because it does not contain correct gps time'
print ' {0} --> {1}'.format(time_max,
zt.get_date_time(zt.gps_week,
time_max))
#change data by amount needed
for ii, zt in zip(skip_dict.keys(), zt_list):
if skip_dict[ii] != 0:
skip_points = skip_dict[ii]*zt.df
print 'Skipping {0} points for {1}'.format(skip_points,
zt.ch_cmp)
zt.time_series = zt.time_series[skip_points:]
zt.gps_diff = zt.gps_diff[skip_dict[ii]:]
zt.gps_list = zt.gps_list[skip_dict[ii]:]
zt.date_time = zt.date_time[skip_dict[ii]:]
zt.gps_time = zt.gps_time[skip_dict[ii]:]
#test length of time series
ts_len_list = np.array([len(zt.time_series) for zt in zt_list])
#get the smallest number of points in the time series
ts_min = ts_len_list.min()
#make a time series array for easy access
ts_min /= decimate
ts_array = np.zeros((ts_min, n_fn))
#trim the time series if needed
for ii, zt in enumerate(zt_list):
if decimate > 1:
zt.time_series = sps.resample(zt.time_series,
zt.time_series.shape[0]/decimate,
window='hanning')
if len(zt.time_series) != ts_min:
ts_trim = zt.time_series[:ts_min]
else:
ts_trim = zt.time_series
zt.time_series = ts_trim
ts_array[:, ii] = ts_trim
if self.verbose:
print 'TS length for channel {0} '.format(zt.ch_number)+\
'({0}) '.format(zt.ch_cmp)+\
'= {0}'.format(len(ts_trim))
print ' T0 = {0}\n'.format(zt.date_time[0])
self.log_lines.append(' '*4+\
'TS length for channel {0} '.format(zt.ch_number)+\
'({0}) '.format(zt.ch_cmp)+\
'= {0}'.format(len(ts_trim)))
self.log_lines.append(', T0 = {0}\n'.format(zt.date_time[0]))
if decimate is not 1:
ts_array = sps.resample(ts_array, ts_min/decimate,
window='hanning')
ts_min = ts_array.shape[0]
return ts_array, ts_min
#==================================================
def write_cache_file(self, fn_list, save_fn, station='ZEN', decimate=1):
"""
write a cache file from given filenames
"""
#sort the files so they are in order
fn_sort_list = []
for cs in self.chn_order:
for fn in fn_list:
if cs in fn.lower():
fn_sort_list.append(fn)
fn_list = fn_sort_list
print fn_list
n_fn = len(fn_list)
self.zt_list = []
for fn in fn_list:
zt1 = Zen3D(fn=fn)
zt1.verbose = self.verbose
try:
zt1.read_3d()
except ZenGPSError:
zt1._seconds_diff = 59
zt1.read_3d()
self.zt_list.append(zt1)
#fill in meta data from the time series file
self.meta_data['DATA.DATE0'] = ','+zt1.date_time[0].split(',')[0]
self.meta_data['DATA.TIME0'] = ','+zt1.date_time[0].split(',')[1]
self.meta_data['TS.ADFREQ'] = ',{0}'.format(int(zt1.df))
self.meta_data['CH.FACTOR'] += ','+self._ch_factor
self.meta_data['CH.GAIN'] += ','+self._ch_gain
self.meta_data['CH.CMP'] += ','+zt1.ch_cmp.upper()
self.meta_data['CH.LENGTH'] += ','+zt1.ch_length
self.meta_data['CH.EXTGAIN'] += ',1'
self.meta_data['CH.NOTCH'] += ',NONE'
self.meta_data['CH.HIGHPASS'] += ',NONE'
self.meta_data['CH.LOWPASS'] += ','+\
self._ch_lowpass_dict[str(int(zt1.df))]
self.meta_data['CH.ADCARDSN'] += ','+zt1.ch_adcard_sn
self.meta_data['CH.NUMBER'] += ',{0}'.format(zt1.ch_number)
self.meta_data['RX.STN'] += ','+zt1.rx_stn
#make sure all files have the same sampling rate
self.check_sampling_rate(self.zt_list)
#make sure the length of time series is the same for all channels
self.ts, ts_len = self.check_time_series(self.zt_list,
decimate=decimate)
self.meta_data['TS.NPNT'] = ',{0}'.format(ts_len)
#get the file name to save to
if save_fn[-4:] == '.cac':
self.save_fn = save_fn
elif save_fn[-4] == '.':
raise ZenInputFileError('File extension needs to be .cac, not'+\
save_fn[-4:])
else:
general_fn = station+'_'+\
self.meta_data['DATA.DATE0'][1:].replace('-','')+\
'_'+self.meta_data['DATA.TIME0'][1:].replace(':','')+\
'_'+self.meta_data['TS.ADFREQ'][1:]+'.cac'
if os.path.basename(save_fn) != 'Merged':
save_fn = os.path.join(save_fn, 'Merged')
if not os.path.exists(save_fn):
os.mkdir(save_fn)
self.save_fn = os.path.join(save_fn, general_fn)
cfid = file(self.save_fn, 'wb+')
#--> write navigation records first
cfid.write(struct.pack('<i', self._nav_len))
cfid.write(struct.pack('<i', self._flag))
cfid.write(struct.pack('<h', self._type_dict['nav']))
for nd in range(self._nav_len-2):
cfid.write(struct.pack('<b', 0))
cfid.write(struct.pack('<i', self._nav_len))
#--> write meta data
meta_str = ''.join([key+self.meta_data[key]+'\n'
for key in np.sort(self.meta_data.keys())])
meta_len = len(meta_str)
cfid.write(struct.pack('<i', meta_len+2))
cfid.write(struct.pack('<i', self._flag))
cfid.write(struct.pack('<h', self._type_dict['meta']))
cfid.write(meta_str)
cfid.write(struct.pack('<i', meta_len+2))
#--> write calibrations
cal_data1 = 'HEADER.TYPE,Calibrate\nCAL.VER,019\nCAL.SYS,0000,'+\
''.join([' 0.000000: '+'0.000000 0.000000,'*3]*27)
cal_data2 = '\nCAL.SYS,0000,'+\
''.join([' 0.000000: '+'0.000000 0.000000,'*3]*27)
cal_data = cal_data1+(cal_data2*(n_fn-1))
cal_len = len(cal_data)
cfid.write(struct.pack('<i', cal_len+2))
cfid.write(struct.pack('<i', self._flag))
cfid.write(struct.pack('<h', self._type_dict['cal']))
cfid.write(cal_data[:-1]+'\n')
cfid.write(struct.pack('<i', cal_len+2))
#--> write data
ts_block_len = int(ts_len)*n_fn*4+2
#--> Need to scale the time series into counts cause that is apparently
# what MTFT24 expects
self.ts = self.ts.astype(np.int32)
#--> make sure none of the data is above the allowed level
self.ts[np.where(self.ts>2.14e9)] = 2.14e9
self.ts[np.where(self.ts<-2.14e9)] = -2.14e9
#--> write time series block
cfid.write(struct.pack('<i', ts_block_len))
cfid.write(struct.pack('<i', self._flag))
cfid.write(struct.pack('<h', self._type_dict['ts']))
#--> need to pack the data as signed integers
for zz in range(ts_len):
cfid.write(struct.pack('<'+'i'*n_fn, *self.ts[zz]))
cfid.write(struct.pack('<i', ts_block_len))
cfid.close()
if self.verbose:
print 'Saved File to: ', self.save_fn
self.log_lines.append('='*72+'\n')
self.log_lines.append('Saved File to: \n')
self.log_lines.append(' '*4+'{0}\n'.format(self.save_fn))
self.log_lines.append('='*72+'\n')
#==================================================
def rewrite_cache_file(self):
"""
rewrite a cache file if parameters changed
assuming data that was read in is in counts.
"""
self.save_fn_rw = mtfh.make_unique_filename(self.save_fn)
cfid = file(self.save_fn_rw, 'wb+')
n_fn = self.ts.shape[1]
#--> write navigation records first
cfid.write(struct.pack('<i', self._nav_len))
cfid.write(struct.pack('<i', self._flag))
cfid.write(struct.pack('<h', self._type_dict['nav']))
for nd in range(self._nav_len-2):
cfid.write(struct.pack('<b', 0))
cfid.write(struct.pack('<i', self._nav_len))
#--> write meta data
meta_str = ''.join([key+','+','.join(self.meta_data[key])+'\n'
for key in np.sort(self.meta_data.keys())
if key != ''])
meta_len = len(meta_str)
cfid.write(struct.pack('<i', meta_len+2))
cfid.write(struct.pack('<i', self._flag))
cfid.write(struct.pack('<h', self._type_dict['meta']))
cfid.write(meta_str)
cfid.write(struct.pack('<i', meta_len+2))
#--> write calibrations
cal_data1 = 'HEADER.TYPE,Calibrate\nCAL.VER,019\nCAL.SYS,0000,'+\
''.join([' 0.000000: '+'0.000000 0.000000,'*3]*1)
cal_data2 = '\nCAL.SYS,0000,'+\
''.join([' 0.000000: '+'0.000000 0.000000,'*3]*1)
cal_data = cal_data1+(cal_data2*(self.ts.shape[1]-1))
cal_len = len(cal_data)
cfid.write(struct.pack('<i', cal_len+2))
cfid.write(struct.pack('<i', self._flag))
cfid.write(struct.pack('<h', self._type_dict['cal']))
cfid.write(cal_data[:-1]+'\n')
cfid.write(struct.pack('<i', cal_len+2))
#--> write data
ts_block_len = self.ts.shape[0]*n_fn*4+2
#--> make sure none of the data is above the allowed level
self.ts[np.where(self.ts>2.14e9)] = 2.14e9
self.ts[np.where(self.ts<-2.14e9)] = -2.14e9
#--> write time series block
cfid.write(struct.pack('<i', ts_block_len))
cfid.write(struct.pack('<i', self._flag))
cfid.write(struct.pack('<h', self._type_dict['ts']))
for zz in range(self.ts.shape[0]):
cfid.write(struct.pack('<'+'i'*n_fn, *self.ts[zz]))
cfid.write(struct.pack('<i', ts_block_len))
cfid.close()
print 'Rewrote {0}\n to {1}'.format(self.save_fn, self.save_fn_rw)
#==================================================
def read_cache_metadata(self, cache_fn):
"""
read only the meta data from the cache file
"""
self.save_fn = cache_fn
#open cache file to read in as a binary file
cfid = file(cache_fn, 'rb')
#read into a long string
cdata = cfid.read(1050)
#--> read navigation data
nav_block = np.fromstring(cdata[0:self._stamp_len],
dtype=self._data_type)
#get starting and ending indices for navigation block
ii = int(self._stamp_len)
jj = self._stamp_len+nav_block['len']-2
self.nav_data = np.fromstring(cdata[ii:jj], dtype=np.int8)
#get indicies for length of block
ii = int(jj)
jj = ii+4
nav_len_check = np.fromstring(cdata[ii:jj], np.int32)
if nav_len_check != nav_block['len']:
if self.verbose:
print 'Index for second navigation length is {0}'.format(ii)
raise CacheNavigationError('Navigation length in data block are'
'not equal: {0} != {1}'.format(
nav_block['len'], nav_len_check))
#--> read meta data
ii = int(jj)
jj = ii+self._stamp_len
meta_block = np.fromstring(cdata[ii:jj], dtype=self._data_type)
ii = int(jj)
jj = ii+meta_block['len']-2
self.meta_data = {}
meta_list = cdata[ii:jj].split('\n')
for mm in meta_list:
mfind = mm.find(',')
self.meta_data[mm[0:mfind]] = [ms.strip() for ms in
mm[mfind+1:].split(',')]
#get index for second length test
ii = int(jj)
jj = ii+4
meta_len_check = np.fromstring(cdata[ii:jj], dtype=np.int32)
if meta_len_check != meta_block['len']:
if self.verbose:
print 'Index for second meta length is {0}'.format(ii)
raise CacheMetaDataError('Meta length in data blocks are not '
'equal: {0} != {1}'.format(
meta_block['len'], meta_len_check))
cfid.close()
#==================================================
def read_cache(self, cache_fn):
"""
read a cache file
"""
self.save_fn = cache_fn
#open cache file to read in as a binary file
cfid = file(cache_fn, 'rb')
#read into a long string
cdata = cfid.read()
#--> read navigation data
nav_block = np.fromstring(cdata[0:self._stamp_len],
dtype=self._data_type)
#get starting and ending indices for navigation block
ii = int(self._stamp_len)
jj = self._stamp_len+nav_block['len']-2
self.nav_data = np.fromstring(cdata[ii:jj], dtype=np.int8)
#get indicies for length of block
ii = int(jj)
jj = ii+4
nav_len_check = np.fromstring(cdata[ii:jj], np.int32)
if nav_len_check != nav_block['len']:
if self.verbose:
print 'Index for second navigation length is {0}'.format(ii)
raise CacheNavigationError('Navigation length in data block are'
'not equal: {0} != {1}'.format(
nav_block['len'], nav_len_check))
#--> read meta data
ii = int(jj)
jj = ii+self._stamp_len
meta_block = np.fromstring(cdata[ii:jj], dtype=self._data_type)
ii = int(jj)
jj = ii+meta_block['len']-2
self.meta_data = {}
meta_list = cdata[ii:jj].split('\n')
for mm in meta_list:
mfind = mm.find(',')
self.meta_data[mm[0:mfind]] = mm[mfind+1:].split(',')
#get index for second length test
ii = int(jj)
jj = ii+4
meta_len_check = np.fromstring(cdata[ii:jj], dtype=np.int32)
if meta_len_check != meta_block['len']:
if self.verbose:
print 'Index for second meta length is {0}'.format(ii)
raise CacheMetaDataError('Meta length in data blocks are not'
'equal: {0} != {1}'.format(
meta_block['len'], meta_len_check))
#--> read calibrations
ii = int(jj)
jj = ii+self._stamp_len
cal_block = np.fromstring(cdata[ii:jj], dtype=self._data_type)
ii = int(jj)
jj = ii+cal_block['len']-2
self.cal_data = cdata[ii:jj]
ii = int(jj)
jj = ii+4
cal_len_check = np.fromstring(cdata[ii:jj], dtype=np.int32)
if cal_len_check != cal_block['len']:
if self.verbose:
print 'Index for second cal length is {0}'.format(ii)
raise CacheCalibrationError('Cal length in data blocks are not'
'equal: {0} != {1}'.format(
cal_block['len'], cal_len_check))
#--> read data
ii = int(jj)
jj = ii+self._stamp_len
ts_block = np.fromstring(cdata[ii:jj], dtype=self._data_type)
#get time series data
ii = int(jj)
jj = ii+ts_block['len']-2
self.ts = np.fromstring(cdata[ii:jj], dtype=self._ts_dtype)
#resize time series to be length of each channel
num_chn = len(self.meta_data['ch.cmp'.upper()])
if self.ts.shape[0]%num_chn != 0:
print 'Trimming TS by {0} points'.format(self.ts.shape[0]%num_chn)
self.ts = np.resize(self.ts, (int(self.ts.shape[0]/num_chn), num_chn))
ii = int(jj)
jj = ii+4
ts_len_check = np.fromstring(cdata[ii:jj], dtype=np.int32)
if ts_len_check != ts_block['len']:
if self.verbose:
print 'Index for second ts length is {0}'.format(ii)
raise CacheTimeSeriesError('ts length in data blocks are not'
'equal: {0} != {1}'.format(
ts_block['len'], ts_len_check))
#==============================================================================
# read and write a zen schedule
#==============================================================================
class ZenSchedule(object):
"""
deals with reading, writing and copying schedule
Creates a repeating schedule based on the master_schedule. It will
then change the first scheduling action to coincide with the master
schedule, such that all deployed boxes will have the same schedule.
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> zs = zen.ZenSchedule()
>>> zs.write_schedule('MT01', dt_offset='2013-06-23,04:00:00')
====================== ====================================================
Attributes Description
====================== ====================================================
ch_cmp_dict dictionary for channel components with keys being
the channel number and values being the channel
label
ch_num_dict dictionary for channel components whith keys
being channel label and values being channel number
df_list sequential list of sampling rates to repeat in
schedule
df_time_list sequential list of time intervals to measure for
each corresponding sampling rate
dt_format date and time format. *default* is
YYY-MM-DD,hh:mm:ss
dt_offset start date and time of schedule in dt_format
gain_dict dictionary of gain values for channel number
initial_dt initial date, or dummy zero date for scheduling
light_dict dictionary of light color values for schedule
master_schedule the schedule that all data loggers should schedule
at. Will taylor the schedule to match the master
schedule according to dt_offset
meta_dict dictionary for meta data
meta_keys keys for meta data dictionary
sa_keys keys for schedule actions
sa_list list of schedule actions including time and df
sr_dict dictionary of sampling rate values
verbose [ True | False ] True to print information to
console
====================== ====================================================
"""
def __init__(self):
self.verbose = True
self.sr_dict = {'256':'0', '512':'1', '1024':'2', '2048':'3',
'4096':'4'}
self.gain_dict = dict([(mm, 2**mm) for mm in range(7)])
self.sa_keys = ['date', 'time', 'resync_yn', 'log_yn', 'tx_duty',
'tx_period', 'sr', 'gain', 'nf_yn']
self.sa_list = []
self.ch_cmp_dict = {'1':'hx', '2':'hy', '3':'hz', '4':'ex', '5':'ey',
'6':'hz'}
self.ch_num_dict = dict([(self.ch_cmp_dict[key], key)
for key in self.ch_cmp_dict])
self.meta_keys = ['TX.ID', 'RX.STN', 'Ch.Cmp', 'Ch.Number',
'Ch.varAsp']
self.meta_dict = {'TX.ID':'none', 'RX.STN':'01', 'Ch.Cmp':'HX',
'Ch.Number':'1', 'Ch.varAsp':50}
self.light_dict = {'YellowLight':0,
'BlueLight':1,
'RedLight':0,
'GreenLight':1}
self.dt_format = datetime_fmt
self.initial_dt = '2000-01-01,00:00:00'
self.dt_offset = time.strftime(datetime_fmt ,time.gmtime())
self.df_list = (4096, 1024, 256)
self.df_time_list = ('00:05:00','00:15:00','05:40:00')
self.master_schedule = self.make_schedule(self.df_list,
self.df_time_list,
repeat=21)
#==================================================
def read_schedule(self, fn):
"""
read zen schedule file
"""
sfid = file(fn, 'r')
lines = sfid.readlines()
for line in lines:
if line.find('scheduleaction') == 0:
line_list = line.strip().split(' ')[1].split(',')
sa_dict = {}
for ii, key in enumerate(self.sa_keys):
sa_dict[key] = line_list[ii]
self.sa_list.append(sa_dict)
elif line.find('metadata'.upper()) == 0:
line_list = line.strip().split(' ')[1].split('|')
for md in line_list[:-1]:
md_list = md.strip().split(',')
self.meta_dict[md_list[0]] = md_list[1]
elif line.find('offset') == 0:
line_str = line.strip().split(' ')
self.offset = line_str[1]
elif line.find('Light') > 0:
line_list = line.strip().split(' ')
try:
self.light_dict[line_list[0]]
self.light_dict[line_list[0]] = line_list[1]
except KeyError:
pass
#==================================================
def add_time(self, date_time, add_minutes=0, add_seconds=0, add_hours=0,
add_days=0):
"""
add time to a time string
assuming date_time is in the format YYYY-MM-DD,HH:MM:SS
"""
fulldate = datetime.datetime.strptime(date_time, self.dt_format)
fulldate = fulldate + datetime.timedelta(days=add_days,
hours=add_hours,
minutes=add_minutes,
seconds=add_seconds)
return fulldate
#==================================================
def make_schedule(self, df_list, df_length_list, repeat=5, t1_dict=None):
"""
make a repeated schedule given list of sampling frequencies and
duration for each.
Arguments:
-----------
**df_list** : list
list of sampling frequencies in Hz, note needs to be
powers of 2 starting at 256
**df_length_list** : list
list of durations in hh:mm:ss format
**repeat** : int
number of times to repeat the sequence
**t1_dict** : dictionary
dictionary returned from get_schedule_offset
Returns:
--------
**time_list**: list of dictionaries with keys:
* 'dt' --> date and time of schedule event
* 'df' --> sampling rate for that event
"""
df_list = np.array(df_list)
df_length_list = np.array(df_length_list)
ndf = len(df_list)
if t1_dict is not None:
time_list = [{'dt':self.initial_dt,'df':t1_dict['df']}]
kk = np.where(np.array(df_list)==t1_dict['df'])[0]-ndf+1
df_list = np.append(df_list[kk:], df_list[:kk])
df_length_list = np.append(df_length_list[kk:], df_length_list[:kk])
time_list.append(dict([('dt',t1_dict['dt']), ('df',df_list[0])]))
ii = 1
else:
time_list = [{'dt':self.initial_dt,'df':df_list[0]}]
ii = 0
for rr in range(1,repeat+1):
for df, df_length, jj in zip(df_list, df_length_list, range(ndf)):
dtime = time.strptime(df_length, '%H:%M:%S')
ndt = self.add_time(time_list[ii]['dt'],
add_hours=dtime.tm_hour,
add_minutes=dtime.tm_min,
add_seconds=dtime.tm_sec)
time_list.append({'dt':ndt.strftime(self.dt_format),
'df':df_list[jj-ndf+1]})
ii += 1
for nn, ns in enumerate(time_list):
sdate, stime = ns['dt'].split(',')
ns['date'] = sdate
ns['time'] = stime
ns['log_yn'] = 'Y'
ns['nf_yn'] = 'Y'
ns['sr'] = self.sr_dict[str(ns['df'])]
ns['tx_duty'] = '0'
ns['tx_period'] = '0'
ns['resync_yn'] = 'Y'
ns['gain'] = '0'
return time_list
#==================================================
def get_schedule_offset(self, time_offset, schedule_time_list):
"""
gets the offset in time from master schedule list and time_offset so
that all schedules will record at the same time according to master
schedule list schedule_time_list
Attributes:
-----------
**time_offset** : hh:mm:ss
the time offset given to the zen reciever
**schedule_time_list** : list
list of actual schedule times returned
from make_schedule
Returns:
--------
**s1** : dictionary
dictionary with keys:
* 'dt' --> date and time of offset from next schedule
event from schedule_time_list
* 'df' --> sampling rate of that event
"""
dt_offset = '{0},{1}'.format('2000-01-01', time_offset)
t0 = time.mktime(time.strptime('2000-01-01,00:00:00', self.dt_format))
for ii, tt in enumerate(schedule_time_list):
ssec = time.mktime(time.strptime(tt['dt'], self.dt_format))
osec = time.mktime(time.strptime(dt_offset, self.dt_format))
if ssec > osec:
sdiff = time.localtime(t0+(ssec-osec))
t1 = self.add_time('2000-01-01,00:00:00',
add_hours=sdiff.tm_hour,
add_minutes=sdiff.tm_min,
add_seconds=sdiff.tm_sec)
s1 = {'dt':t1.strftime(self.dt_format),
'df':schedule_time_list[ii-1]['df']}
return s1
#==================================================
def write_schedule(self, station, clear_schedule=True,
clear_metadata=True, varaspace=100,
savename=0, dt_offset=None,
df_list=None,
df_time_list=None,
repeat=8, gain=0):
"""
write a zen schedule file
**Note**: for the older boxes use 'Zeus3Ini.cfg' for the savename
Arguments:
----------
**station** : int
station name must be an integer for the Zen, can
be changed later
**clear_schedule** : [ True | False ]
write the line clearschedule in .cfg file
**clear_metadata** : [ True | False ]
write the line metadata clear in .cfg file
**varaspace** : electrode spacing in meters, can be changed later
**savename** : [ 0 | 1 | 2 | string]
* 0 --> saves as zenini.cfg
* 1 --> saves as Zeus2Ini.cfg
* 2 --> saves as ZEN.cfg
* string --> saves as the string, note the zen
boxes look for either 0 or 1, so this
option is useless
**dt_offset** : YYYY-MM-DD,hh:mm:ss
date and time off offset to start the scheduling.
if this is none then current time on computer is
used. **In UTC Time**
**Note**: this will shift the starting point to
match the master schedule, so that all
stations have the same schedule.
**df_list** : list
list of sampling rates in Hz
**df_time_list** : list
list of time intervals corresponding to df_list
in hh:mm:ss format
**repeat** : int
number of time to repeat the cycle of df_list
**gain** : int
gain on instrument, 2 raised to this number.
Returns:
--------
* writes .cfg files to any connected SD card according to channel
number and ch_num_dict
"""
if dt_offset is not None:
self.dt_offset = dt_offset
s1_dict = self.get_schedule_offset(self.dt_offset.split(',')[1],
self.master_schedule)
if df_list is not None:
self.df_list = df_list
if df_time_list is not None:
self.df_time_list = df_time_list
self.master_schedule = self.make_schedule(self.df_list,
self.df_time_list,
repeat=repeat*3)
self.sa_list = self.make_schedule(self.df_list,
self.df_time_list,
t1_dict=s1_dict, repeat=repeat)
drive_names = get_drive_names()
self.meta_dict['RX.STN'] = station
self.meta_dict['Ch.varAsp'] = '{0}'.format(varaspace)
if savename == 0:
save_name = 'zenini.cfg'
elif savename == 1:
save_name = 'Zeus3Ini.cfg'
elif savename == 2:
save_name = 'ZEN.cfg'
sfid = file(os.path.normpath(os.path.join('c:\\MT', save_name)),
'w')
for sa_dict in self.sa_list:
new_time = self.add_time(self.dt_offset,
add_hours=int(sa_dict['time'][0:2]),
add_minutes=int(sa_dict['time'][3:5]),
add_seconds=int(sa_dict['time'][6:]))
sa_line = ','.join([new_time.strftime(self.dt_format),
sa_dict['resync_yn'],
sa_dict['log_yn'],
'2047',
'1999999999',
sa_dict['sr'],
'0','0','0','y','n','n','n'])
sfid.write('scheduleaction '.upper()+sa_line[:-1]+'\n')
meta_line = ''.join(['{0},{1}|'.format(key,self.meta_dict[key])
for key in self.meta_keys])
sfid.write('METADATA '+meta_line+'\n')
for lkey in self.light_dict.keys():
sfid.write('{0} {1}\n'.format(lkey, self.light_dict[lkey]))
sfid.close()
#print 'Wrote {0}:\{1} to {2} as {3}'.format(dd, save_name, dname,
# self.ch_cmp_dict[dname[-1]])
for dd in drive_names.keys():
dname = drive_names[dd]
sfid = file(os.path.normpath(os.path.join(dd+':\\', save_name)),
'w')
for sa_dict in self.sa_list:
new_time = self.add_time(self.dt_offset,
add_hours=int(sa_dict['time'][0:2]),
add_minutes=int(sa_dict['time'][3:5]),
add_seconds=int(sa_dict['time'][6:]))
sa_line = ','.join([new_time.strftime(self.dt_format),
sa_dict['resync_yn'],
sa_dict['log_yn'],
'2047',
'1999999999',
sa_dict['sr'],
'0','0','0','y','n','n','n'])
sfid.write('scheduleaction '.upper()+sa_line[:-1]+'\n')
self.meta_dict['Ch.Cmp'] = self.ch_cmp_dict[dname[-1]]
self.meta_dict['Ch.Number'] = dname[-1]
meta_line = ''.join(['{0},{1}|'.format(key,self.meta_dict[key])
for key in self.meta_keys])
sfid.write('METADATA '+meta_line+'\n')
for lkey in self.light_dict.keys():
sfid.write('{0} {1}\n'.format(lkey, self.light_dict[lkey]))
sfid.close()
print 'Wrote {0}:\{1} to {2} as {3}'.format(dd, save_name, dname,
self.ch_cmp_dict[dname[-1]])
return
else:
save_name = savename
for dd in drive_names.keys():
dname = drive_names[dd]
sfid = file(os.path.normpath(os.path.join(dd+':\\', save_name)),
'w')
if clear_schedule:
sfid.write('clearschedule\n')
if clear_metadata:
sfid.write('metadata clear\n')
for sa_dict in self.sa_list:
if gain != 0:
sa_dict['gain'] = gain
sa_line = ''.join([sa_dict[key]+',' for key in self.sa_keys])
sfid.write('scheduleaction '+sa_line[:-1]+'\n')
sfid.write('offsetschedule {0}\n'.format(self.dt_offset))
self.meta_dict['Ch.Cmp'] = self.ch_cmp_dict[dname[-1]]
self.meta_dict['Ch.Number'] = dname[-1]
meta_line = ''.join(['{0},{1}|'.format(key,self.meta_dict[key])
for key in self.meta_keys])
sfid.write('METADATA '+meta_line+'\n')
for lkey in self.light_dict.keys():
sfid.write('{0} {1}\n'.format(lkey, self.light_dict[lkey]))
sfid.close()
print 'Wrote {0}:\{1} to {2} as {3}'.format(dd, save_name, dname,
self.ch_cmp_dict[dname[-1]])
def write_schedule_for_gui(self, zen_start=None, df_list=None,
df_time_list=None, repeat=8, gain=0,
save_path=None,
schedule_fn='zen_schedule.MTsch'):
"""
write a zen schedule file
**Note**: for the older boxes use 'Zeus3Ini.cfg' for the savename
Arguments:
----------
**zen_start** : hh:mm:ss
start time you want the zen to start collecting
data.
if this is none then current time on computer is
used. **In UTC Time**
**Note**: this will shift the starting point to
match the master schedule, so that all
stations have the same schedule.
**df_list** : list
list of sampling rates in Hz
**df_time_list** : list
list of time intervals corresponding to df_list
in hh:mm:ss format
**repeat** : int
number of time to repeat the cycle of df_list
**gain** : int
gain on instrument, 2 raised to this number.
Returns:
--------
* writes a schedule file to input into the ZenAcq Gui
"""
if df_list is not None:
self.df_list = df_list
if df_time_list is not None:
self.df_time_list = df_time_list
if save_path is None:
save_path = os.getcwd()
# make a master schedule first
self.master_schedule = self.make_schedule(self.df_list,
self.df_time_list,
repeat=repeat*3)
# estimate the first off set time
t_offset_dict = self.get_schedule_offset(zen_start,
self.master_schedule)
# make the schedule with the offset of the first schedule action
self.sa_list = self.make_schedule(self.df_list,
self.df_time_list,
t1_dict=t_offset_dict,
repeat=repeat)
# make a list of lines to write to a file for ZenAcq
zacq_list = []
for ii, ss in enumerate(self.sa_list[:-1]):
t0 = self._convert_time_to_seconds(ss['time'])
t1 = self._convert_time_to_seconds(self.sa_list[ii+1]['time'])
if ss['date'] != self.sa_list[ii+1]['date']:
t1 += 24*3600
t_diff = t1-t0
zacq_list.append('$schline{0:.0f} = {1:.0f},{2:.0f},{3:.0f}\n'.format(
ii+1,
t_diff,
int(self.sr_dict[str(ss['df'])]),
1))
fn = os.path.join(save_path, schedule_fn)
fid = file(fn, 'w')
fid.writelines(zacq_list[0:16])
fid.close()
print 'Wrote schedule file to {0}'.format(fn)
print '+--------------------------------------+'
print '| SET ZEN START TIME TO: {0} |'.format(zen_start)
print '+--------------------------------------+'
def _convert_time_to_seconds(self, time_string):
"""
convert a time string given as hh:mm:ss into seconds
"""
t_list = [float(tt) for tt in time_string.split(':')]
t_seconds = t_list[0]*3600+t_list[1]*60+t_list[2]
return t_seconds
#==============================================================================
# interface with birrp
#==============================================================================
class ZenBIRRP():
"""
class to deal with Birrp from Zen outputs
survey file is .cfg file
Need to create a processing file which has information on how to
process the data. See read_processing_file and BIRRP documentation
for details.
The program will run BIRRP from python and convert the outputs into
an .edi file.
Arguments:
------------
**station_path** : string
full path to station data that will be processed
**station** : string
name of station to be processes.
*default* is os.path.basename(station_path)
**birrp_exe** : string
full path to BIRRP executable.
**calibration_path** : string
full path to calibration file directory
In this directory should be the calibration
files for the coils named by the coil number.
You need to make these files from the
amtant.cal, basically it into individual files
which are seperated by commas (csv) files.
ex: Ant2344_cal.csv
**processing_fn** : string
full path to processing file, see BIRRP
documentation and mtpy.zen.read_processing_file
for more details on the structure and key words.
**survey_config_fn** : string
full path to survey configuration file.
This file contains all the important information
on how the data was collected. For more see
mtpy.utils.configfile.read_survey_configfile
**df** : float
sampling rate in Hz of the data being processed.
**rr_path** : string
full path to remote reference data.
**rr_station** : string
name of remote reference station
*default* is os.path.basename(rr_path)
======================== ==================================================
Attributes Description
======================== ==================================================
birrp_config_fn configuration file written once BIRRP runs for
convenience if you want to rember what you did.
birrp_exe full path to the BIRRP executable
birrp_dict dictionary of birrp parameters, *default* is None
calibration_path full path to where calibration files exist
calibration_list list of coils numbers used in the measurement
calibration_dict dictionary of calibration values with keys
as coil numbers and values as calbration values.
df sampling frequency (Hz))
output_path path to put BIRRP output files
*default* is station_path/BF
processing_dict dictionary of porcessing information from
processin_fn
processing_fn full path to processing file. This contains the
the information BIRRP needs to process the
station. For more details on what key words and
values would be useful see BIRRP documentation and
mtpy.zen.read_processing_file
rr_path full path to remote reference station /home/mt/rr
rr_station name of remote reference station
rr_survey_dict dictionary of survey parameters from
survey_config_fn for remote reference
script_file full path to script file used to process BIRRP
station name of station to process
station_path full path to station directory ex. /home/mt/mt01
survey_config_fn full path to survey cofiguration file which
contains all the important information about how
the data was collected. For more details see on
what key words and values to put in see
mtpy.utils.configfile.read_survey_configfile
survey_dict dictionary with information about survey
parameters from survey_config_fn
======================== ==================================================
======================== ==================================================
Methods Description
======================== ==================================================
get_birrp_parameters gets the birrp parameters from processing_fn
get_calibrations reads in the files in calibration_path and gets
data for coil numbers in calibration_list
get_survey_parameters get the survey info from survey_config_fn
set_remote_reference_path set the remote refernce station and get
survey information
get_fn_list get filenames of data files to process
run_birrp writes a script file, run's BIRRP from Python
and then converts the outputs of BIRRP to .edi
write_edi_file writes and .edi file from the outputs of BIRRP
write_script_file writes a script file to control how BIRRP
processes the data
======================== ==================================================
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> zen_bp = zen.ZenBIRRP(r"/home/mt/mt01")
>>> zen.processing_fn = r"/home/mt/mt01/processing.txt"
>>> zen.survey_config_fn = r"/home/mt/survey.cfg"
>>> zen.df = 256
>>> zen_bp.birrp_exe = r"/home/bin/birrp.exe"
>>> zen.calibration_list = ['2234', '2244', '2254']
>>> zen.calibration_path = r"/home/zonge/ant_calibrations"
>>> zen.rr_path = r"/home/mt/rr01"
>>> zen.run_birrp()
"""
def __init__(self, station_path, **kwargs):
self.station_path = station_path
self.rr_path = kwargs.pop('rr_path', None)
self.survey_config_fn = kwargs.pop('survey_config_fn', None)
self.processing_fn = kwargs.pop('processing_fn', None)
self.calibration_path = kwargs.pop('calibration_path',
r"d:\Peacock\MTData\Ant_calibrations")
self.calibration_list = ['2254', '2264', '2274', '2284', '2294',
'2304', '2314', '2324', '2334', '2344']
self.birrp_dict = kwargs.pop('birrp_dict', None)
self.station = kwargs.pop('station',
os.path.basename(os.path.dirname(self.station_path)))
self.rr_station = None
self.rr_survey_dict = None
self.df = kwargs.pop('df', 256)
self.processing_dict = kwargs.pop('processing_dict', None)
self.survey_dict = kwargs.pop('survey_dict', None)
self.birrp_exe = r"c:\MinGW32-xy\Peacock\birrp52\birrp52_3pcs6e9pts_big.exe"
self.script_file = None
self.output_path = None
self.birrp_config_fn = None
self.calibration_dict = {}
def write_processing_fn(self, station_path=None, **kwargs):
"""
write a processing station file from the data files
"""
pass
def get_calibrations(self):
"""
get coil calibrations
"""
for cal_fn in os.listdir(self.calibration_path):
for cal_num in self.calibration_list:
if cal_num in cal_fn:
self.calibration_dict[cal_num] = \
os.path.join(self.calibration_path, cal_fn)
break
def get_birrp_parameters(self, processing_fn=None):
"""
get parameters to put into birrp from file
"""
if processing_fn is not None:
self.processing_fn = processing_fn
if self.processing_fn is None:
raise IOError('Need to input a processing file')
processing_list = read_processing_fn(self.processing_fn)
for pdict in processing_list:
if pdict['station'] == self.station and \
float(pdict['df']) == self.df:
return pdict
def get_survey_parameters(self, survey_config_fn=None, rr_station=None):
"""
get survey parameters from file
"""
if survey_config_fn is not None:
self.survey_config_fn = survey_config_fn
if self.survey_config_fn is None:
raise IOError('Need to input a survey config file')
survey_dict_list = mtcf.read_survey_configfile(self.survey_config_fn)
try:
self.survey_dict = survey_dict_list[self.station.upper()]
except KeyError:
print 'Did not find station information in {0}'.format(
self.survey_config_fn)
if self.rr_station is not None:
try:
self.rr_survey_dict = survey_dict_list[self.rr_station.upper()]
except KeyError:
print 'Did not find remote station information in {0}'.format(
self.survey_config_fn)
def set_remote_reference_path(self, rr_station, rr_path=None):
"""
set remote reference station and find survey information and filenames
"""
self.rr_station = rr_station
if rr_path is not None:
self.rr_path = rr_path
return
# look for path if none is given
if self.rr_station is not self.station:
rr_path = self.station_path
kk = 0
while os.path.basename(rr_path) != self.station and kk < 5:
rr_path = os.path.dirname(rr_path)
kk += 1
self.rr_path = os.path.join(os.path.dirname(rr_path),
self.rr_station, 'TS')
if not os.path.exists(self.rr_path):
raise IOError('Need to input rrpath, could not find it')
else:
self.rr_path = self.station_path
self.get_survey_parameters()
def get_fn_list(self, df=None, start_dt=None, end_dt=None, ncomps=5):
"""
get the file name list to process
"""
if df is not None:
self.df = df
comp_dict = dict([(cc, ii)
for ii, cc in enumerate(['ex','ey','hz','hx','hy'])])
rrcomp_dict = dict([(cc, ii)
for ii, cc in enumerate(['hx','hy'])])
if start_dt is not None:
start_seconds = time.mktime(time.strptime(start_dt, datetime_fmt))
else:
start_seconds = 0
if end_dt is not None:
end_seconds = time.mktime(time.strptime(end_dt, datetime_fmt))
else:
end_seconds = 10E11
if self.rr_path is None:
self.rr_path = self.station_path
fn_list = []
ii = 0
for fn in os.listdir(self.station_path):
try:
if np.remainder(ii, ncomps) == 0:
tarr = np.zeros(ncomps, dtype=[('fn','|S100'),
('npts',np.int),
('start_dt','|S19'),
('end_dt','|S19')])
header_dict = \
mtfh.read_ts_header(os.path.join(self.station_path,fn))
if header_dict['t_min'] >= start_seconds and \
header_dict['t_min'] <= end_seconds and \
header_dict['samplingrate'] == float(self.df):
kk = comp_dict[header_dict['channel'].lower()]
tarr[kk]['fn'] = os.path.join(self.station_path,fn)
tarr[kk]['npts'] = int(header_dict['nsamples'])
ts_start_dt = time.strftime(datetime_fmt.replace(',',' '),
time.localtime(header_dict['t_min']))
tarr[kk]['start_dt'] = ts_start_dt
ts_end_seconds = header_dict['t_min']+\
float(header_dict['nsamples']/header_dict['samplingrate'])
tarr[kk]['end_dt'] = time.strftime(datetime_fmt.replace(',',' '),
time.localtime(ts_end_seconds))
ii += 1
if ii == ncomps:
fn_list.append(tarr)
ii = 0
except mtex.MTpyError_ts_data:
pass
except mtex.MTpyError_inputarguments:
pass
#get remote reference time series
rrfn_list = []
ii = 0
for fn in os.listdir(self.rr_path):
try:
if np.remainder(ii, 2) == 0:
tarr = np.zeros(2, dtype=[('fn','|S100'),
('npts',np.int),
('start_dt','|S19'),
('end_dt','|S19')])
header_dict = \
mtfh.read_ts_header(os.path.join(self.rr_path,fn))
if header_dict['t_min'] >= start_seconds and \
header_dict['t_min'] <= end_seconds and \
header_dict['samplingrate'] == float(self.df):
try:
kk = rrcomp_dict[header_dict['channel'].lower()]
tarr[kk]['fn'] = os.path.join(self.rr_path,fn)
tarr[kk]['npts'] = int(header_dict['nsamples'])
ts_start_dt = time.strftime(datetime_fmt.replace(',',' '),
time.localtime(header_dict['t_min']))
tarr[kk]['start_dt'] = ts_start_dt
ts_end_seconds = header_dict['t_min']+\
float(header_dict['nsamples']/header_dict['samplingrate'])
tarr[kk]['end_dt'] = time.strftime(datetime_fmt.replace(',',' '),
time.localtime(ts_end_seconds))
ii += 1
except KeyError:
pass
if ii == 2:
rrfn_list.append(tarr)
ii = 0
except mtex.MTpyError_ts_data:
print 'MTpyError_ts_data'
except mtex.MTpyError_inputarguments:
print 'MTpyError_inputarguments'
if len(fn_list) > 3:
fn_list = fn_list[0:3]
if len(rrfn_list) > 3:
rrfn_list = rrfn_list[0:3]
return fn_list, rrfn_list
def write_script_file(self, df=None, processing_fn=None,
processing_dict=None, start_dt=None, end_dt=None,
ncomps=5, jmode=0, survey_config_fn=None):
"""
write a script file to guide birrp
"""
self.get_calibrations()
if df is not None:
self.df = df
#--> get survey parameters
if survey_config_fn is not None:
self.survey_config_fn = survey_config_fn
self.get_survey_parameters()
elif self.survey_dict is None:
self.get_survey_parameters()
#--> get processing dictionary
if processing_fn is not None:
self.processing_fn = processing_fn
self.processing_dict = self.get_birrp_parameters()
if processing_dict is not None:
self.processing_dict = processing_dict
if self.processing_fn is None and self.processing_dict is None:
raise IOError('Need to input a processing file')
#--> set jmode (how files are read in) as points
try:
self.processing_dict['jmode']
except KeyError:
self.processing_dict['jmode'] = jmode
#make sure that deltat is set to sampling rate
self.processing_dict['deltat'] = -self.df
#get start and end date and time if available
try:
start_dt = self.processing_dict['start_dt']
except KeyError:
pass
try:
end_dt = self.processing_dict['stop_dt']
except KeyError:
pass
try:
self.set_remote_reference_path(self.processing_dict['rrstation'])
except KeyError:
self.set_remote_reference_path(self.station)
#get list of files to process from the station folder
fn_list, rrfn_list = self.get_fn_list(self.df,
start_dt=start_dt,
end_dt=end_dt,
ncomps=ncomps)
self.processing_dict['fn_list'] = [fnlist['fn'] for fnlist in fn_list]
self.processing_dict['rrfn_list'] = [rrfnlist['fn']
for rrfnlist in rrfn_list]
#need to skip the header string
try:
self.processing_dict['nskip']
except KeyError:
self.processing_dict['nskip'] = 1
try:
self.processing_dict['nskipr']
except KeyError:
self.processing_dict['nskipr'] = 1
#if jmode == 0 for number of points
if self.processing_dict['jmode'] == 0:
self.processing_dict['nread'] = [fnlist['npts'].min()
for fnlist in fn_list]
#if jmode == 1 for entering start and end times
elif self.processing_dict['jmode'] == 1:
self.processing_dict['dstim'] = [fnlist['start_dt']
for fnlist in fn_list]
self.processing_dict['wstim'] = [fnlist['start_dt']
for fnlist in fn_list]
self.processing_dict['wetim'] = [fnlist['end_dt']
for fnlist in fn_list]
#get calibration files
#--> HX
try:
self.processing_dict['hx_cal'] = \
self.calibration_dict[self.survey_dict['hx']]
except KeyError:
print 'Did not find HX calibration in {0}'.format(
self.survey_config_fn)
self.processing_dict['hx_cal'] = self.calibration_dict['2284']
print 'Setting calibration coil number to 2284 as default.'
#--> HY
try:
self.processing_dict['hy_cal'] = \
self.calibration_dict[self.survey_dict['hy']]
except KeyError:
print 'Did not find HZ calibration in {0}'.format(
self.survey_config_fn)
self.processing_dict['hy_cal'] = self.calibration_dict['2284']
print 'Setting calibration coil number to 2284 as default.'
#--> HZ
try:
self.processing_dict['hz_cal'] = \
self.calibration_dict[self.survey_dict['hz']]
except KeyError:
print 'Did not find HZ calibration in {0}'.format(
self.survey_config_fn)
self.processing_dict['hz_cal'] = self.calibration_dict['2284']
print 'Setting calibration coil number to 2284 as default.'
if self.rr_survey_dict is not None:
try:
self.processing_dict['rrhx_cal'] = \
self.calibration_dict[self.rr_survey_dict['hx']]
except KeyError:
print 'Did not find RRHX calibration in {0}'.format(
self.survey_config_fn)
self.processing_dict['rrhx_cal'] = \
self.calibration_dict['2284']
print 'Setting calibration coil number to 2284 as default.'
try:
self.processing_dict['rrhy_cal'] = \
self.calibration_dict[self.rr_survey_dict['hy']]
except KeyError:
print 'Did not find RRHY calibration in {0}'.format(
self.survey_config_fn)
self.processing_dict['rrhy_cal'] = \
self.calibration_dict['2284']
print 'Setting calibration coil number to 2284 as default.'
#set the save path to include the sampling rate
self.output_path = os.path.join(os.path.dirname(
self.processing_dict['fn_list'][0][0]),
'BF_{0}'.format(self.df))
#write script file using mtpy.processing.birrp
script_file, birrp_dict = birrp.write_script_file(dict(self.processing_dict),
save_path=self.output_path)
cfg_fn = mtfh.make_unique_filename('{0}_birrp_params.cfg'.format(
script_file[:-7]))
mtcf.write_dict_to_configfile(birrp_dict, cfg_fn)
print 'Wrote BIRRP config file for edi file to {0}'.format(cfg_fn)
self.birrp_config_fn = cfg_fn
self.script_file = script_file
self.birrp_dict = birrp_dict
def run_birrp(self, script_file=None, birrp_exe=None):
"""
run birrp given the specified files
"""
if script_file is not None:
self.script_file = script_file
if self.script_file is None:
self.write_script_file()
if birrp_exe is not None:
self.birrp_exe = birrp_exe
birrp.run(self.birrp_exe, self.script_file)
self.edi_fn = self.write_edi_file(self.output_path,
self.survey_config_fn,
self.birrp_config_fn)
def write_edi_file(self, birrp_output_path=None, survey_config_fn=None,
birrp_config_fn=None):
"""
write an edi file from outputs of birrp
"""
if birrp_output_path is not None and self.output_path is None:
self.run_birrp()
elif birrp_output_path is not None:
self.output_path = birrp_output_path
if survey_config_fn is None and self.survey_config_fn is None:
self.get_survey_parameters()
elif survey_config_fn is not None:
self.survey_config_fn = survey_config_fn
if self.birrp_config_fn is None and birrp_config_fn is None:
self.write_script_file()
elif birrp_config_fn is not None:
self.birrp_config_fn = birrp_config_fn
edi_fn = birrp.convert2edi(self.station,
self.output_path,
self.survey_config_fn,
self.birrp_config_fn)
return edi_fn
#==============================================================================
# Error instances for Zen
#==============================================================================
class ZenGPSError(Exception):
"""
error for gps timing
"""
pass
class ZenSamplingRateError(Exception):
"""
error for different sampling rates
"""
pass
class ZenInputFileError(Exception):
"""
error for input files
"""
pass
class CacheNavigationError(Exception):
"""
error for navigation block in cache file
"""
pass
class CacheMetaDataError(Exception):
"""
error for meta data block in cache file
"""
pass
class CacheCalibrationError(Exception):
"""
error for calibration block in cache file
"""
pass
class CacheTimeSeriesError(Exception):
"""
error for time series block in cache file
"""
pass
#==============================================================================
# make a class to go from Z3d to .edi
#==============================================================================
class BIRRP_processing(object):
"""
configuration file for birrp processing
"""
def __init__(self, **kwargs):
self.jmode = 0
self.nskip = 1
self.nskipr = 1
self.calibration_path = kwargs.pop('calibration_path',
r"d:\Peacock\MTData\Ant_calibrations")
self.calibration_list = ['2254', '2264', '2274', '2284', '2294',
'2304', '2314', '2324', '2334', '2344']
self.mcomps = 5
self.elecori = "EX,EY"
self.tbw = 2
self.ainuin = .9999
self.magtype = 'bb'
self.nfft = 2**18
self.nsctmax = 14
self.ilev = 0
self.nar = 5
self.nrr = 0
self.c2thresb = 0.45
def get_calibrations(self, calibration_path=None):
"""
get coil calibrations
"""
if calibration_path is not None:
self.calibration_path = calibration_path
calibration_dict = {}
for cal_fn in os.listdir(self.calibration_path):
for cal_num in self.calibration_list:
if cal_num in cal_fn:
calibration_dict[cal_num] = \
os.path.join(self.calibration_path, cal_fn)
break
return calibration_dict
def get_processing_dict(self, fn_birrp_list, hx=2284, hy=2284, hz=2284):
"""
from fn_birrp_arr make a processing dictionary to input into writing
a birrp script file
fn_birrp_list = fn_birrp_arr[df]
"""
comp_dict = {4:{'EX':0, 'EY':1, 'HX':2, 'HY':3},
5:{'EX':0, 'EY':1, 'HZ':2, 'HX':3, 'HY':4}}
rr_comp_dict = {'HX':0, 'HY':1}
self.fn_list = [fn_list['fn'] for fn_list in fn_birrp_list]
# need to sort the fn list so that the files are in the correct
# order for input and output as defined by birrp
for ii, f_list in enumerate(self.fn_list):
sort_list = list(f_list)
num_comps = len(f_list)
for fn in f_list:
key = fn[-2:]
sort_list[comp_dict[num_comps][key.upper()]] = fn
self.fn_list[ii] = sort_list
# get remote reference file names, same as input, just hx and hy
self.rrfn_list = []
for fn_list in fn_birrp_list:
rr_list = [1, 2]
for fn in fn_list['fn']:
key = fn[-2:].upper()
if key == 'HX' or key == 'HY':
rr_list[rr_comp_dict[key]] = fn
self.rrfn_list.append(rr_list)
self.nread = [fn_list['npts'].min() for fn_list in fn_birrp_list]
self.mcomps = len(fn_birrp_list[0])
if self.mcomps == 5:
self.magori = "HZ,HX,HY"
elif self.mcomps == 4:
self.magori = "HX,HY"
else:
raise IOError('Number of components is {0}'.format(self.mcomps))
# get calibrations for coil responses
cal_dict = self.get_calibrations()
#get calibration files
#--> HX
try:
self.hx_cal = cal_dict[str(hx)]
self.rrhx_cal = cal_dict[str(hx)]
except KeyError:
print 'Did not find HX calibration for {0}'.format(hx)
self.hx_cal = cal_dict['2284']
self.rrhx_cal = cal_dict['2284']
print 'Setting calibration coil number to 2284 as default.'
#--> HY
try:
self.hy_cal = cal_dict[str(hy)]
self.rrhy_cal = cal_dict[str(hy)]
except KeyError:
print 'Did not find HX calibration for {0}'.format(hy)
self.hy_cal = cal_dict['2284']
self.rrhy_cal = cal_dict['2284']
print 'Setting calibration coil number to 2284 as default.'
#--> HZ
try:
self.hz_cal = cal_dict[str(hz)]
except KeyError:
print 'Did not find HX calibration for {0}'.format(hz)
self.hz_cal = cal_dict['2284']
print 'Setting calibration coil number to 2284 as default.'
return self.__dict__
class Survey_Config(object):
"""
survey config class
"""
def __init__(self, **kwargs):
self.b_instrument_amplification = 1
self.b_instrument_type = 'coil'
self.b_logger_gain = 1
self.b_logger_type = 'zen'
self.b_xaxis_azimuth = 0
self.b_yaxis_azimuth = 90
self.box = 24
self.date = '01/01/00'
self.e_instrument_amplification = 1
self.e_instrument_type = 'Cu-CuSO4 electrodes'
self.e_logger_gain = 1
self.e_logger_type = 'zen'
self.e_xaxis_azimuth = 0
self.e_xaxis_length = 100
self.e_yaxis_azimuth = 90
self.e_yaxis_length = 100
self.elevation = 2113.2
self.hx = 2324
self.hy = 2314
self.hz = 2334
self.lat = 37.8861
self.location = 'Earth'
self.lon = -119.05417
self.network = 'USGS'
self.notes = 'Generic config file'
self.sampling_interval = 'all'
self.station = 'mb000'
self.station_type = 'mt'
self.save_path = None
for key in kwargs:
setattr(self, key, kwargs[key])
def write_survey_config_file(self, save_path=None):
"""
write a survey config file to save path
"""
if save_path is not None:
self.save_path = save_path
fn = os.path.join(self.save_path, '{0}.cfg'.format(self.station))
mtcfg.write_dict_to_configfile({self.station:self.__dict__}, fn)
print 'Wrote survey config file to {0}'.format(fn)
return fn
class Z3D_to_edi(object):
"""
go from z3d files to .edi
"""
def __init__(self, station_dir=None, **kwargs):
self.station_dir = station_dir
#ZenBIRRP.__init__(self, self.station_dir)
self.survey_config = Survey_Config(save_path=self.station_dir)
self.survey_config_fn = None
self.birrp_config_fn = None
self.birrp_exe = r"c:\MinGW32-xy\Peacock\birrp52\birrp52_3pcs6e9pts.exe"
self.coil_cal_path = r"c:\MT\Ant_calibrations"
self.num_comp = 5
def make_survey_config_file(self, survey_config_dict=None):
"""
make a survey configuration file from the data
"""
self.survey_config_fn = self.survey_config.write_survey_config_file()
def get_schedules_fn_from_dir(self, station_ts_dir):
"""
get the birrp fn list from a directory of TS files
"""
self.station_dir = station_ts_dir
fn_arr = np.zeros(len(os.listdir(station_ts_dir)),
dtype=[('fn','|S100'),
('npts',np.int),
('start_dt','|S19'),
('end_dt','|S19'),
('df', np.float)])
fn_count = 0
for fn in os.listdir(station_ts_dir):
fn = os.path.join(station_ts_dir, fn)
try:
header_dict = mtfh.read_ts_header(fn)
fn_arr[fn_count]['fn'] = fn
fn_arr[fn_count]['npts'] = header_dict['nsamples']
fn_arr[fn_count]['df'] = header_dict['samplingrate']
start_sec = header_dict['t_min']
num_sec = float(header_dict['nsamples'])/\
header_dict['samplingrate']
fn_arr[fn_count]['start_dt'] = time.strftime(datetime_fmt,
time.localtime(start_sec))
fn_arr[fn_count]['end_dt'] = time.strftime(datetime_fmt,
time.localtime(start_sec+\
num_sec))
fn_count += 1
except mtex.MTpyError_ts_data:
print ' Skipped {0}'.format(fn)
except mtex.MTpyError_inputarguments:
print ' Skipped {0}'.format(fn)
# be sure to trim the array
fn_arr = fn_arr[np.nonzero(fn_arr['npts'])]
return self.get_schedules_fn(fn_arr)
def get_schedules_fn(self, fn_arr):
"""
seperate out the different schedule blocks and frequencies so the
can be processed
Returns
---------
**schedule_fn_dict** : dictionary
keys are sampling rates and values are
lists of file names for each schedule
block up to 3 blocks
"""
# get the sampling rates used
s_keys = set(fn_arr['df'])
# make a dictionary with keys as the sampling rates
s_dict = dict([(skey, []) for skey in s_keys])
# loop over the sampling rates and find the schedule blocks
for df in s_keys:
# find startind dates for sampling rate
s_dates = set(fn_arr['start_dt'][np.where(fn_arr['df']==df)])
for sdate in s_dates:
s_fn_arr = fn_arr[np.where(fn_arr['start_dt']==sdate)]
s_fn_birrp_arr = np.zeros(len(s_fn_arr),
dtype=[('fn','|S100'),
('npts',np.int),
('start_dt','|S19'),
('end_dt','|S19')])
s_fn_birrp_arr['fn'] = s_fn_arr['fn']
s_fn_birrp_arr['npts'][:] = s_fn_arr['npts'].min()
s_fn_birrp_arr['start_dt'][:] = sdate
start_seconds = time.mktime(time.strptime(sdate,
datetime_fmt))
end_seconds = start_seconds+s_fn_arr['npts'].min()/float(df)
s_fn_birrp_arr['end_dt'][:] = time.strftime(datetime_sec,
time.localtime(end_seconds))
s_dict[df].append(s_fn_birrp_arr)
return s_dict
def make_mtpy_ascii_files(self, station_dir=None, fmt='%.8',
station_name='mb', notch_dict={},
df_list=None, max_blocks=3, ex=100., ey=100.,):
"""
makes mtpy_mt files from .Z3D files
Arguments:
-----------
**dirpath** : full path to .Z3D files
**station_name** : prefix for station names
**fmt** : format of data numbers for mt_files
Outputs:
--------
**fn_arr** : np.ndarray(file, length, df, start_dt)
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> fn_list = zen.copy_from_sd('mt01')
>>> mtpy_fn = zen.make_mtpy_files(fn_list, station_name='mt')
"""
if station_dir is not None:
self.station_dir = station_dir
fn_list = [os.path.join(self.station_dir, fn)
for fn in os.listdir(self.station_dir)
if fn[-4:] == '.Z3D']
if len(fn_list) == 0:
raise IOError('Could not find any .Z3D files in {0}'.format(
self.station_dir))
# make an array that has all the information about each file
fn_arr = np.zeros(len(fn_list),
dtype=[('station','|S6'),
('npts',np.int),
('df', np.int),
('start_dt', '|S22'),
('comp','|S2'),
('fn','|S100')])
fn_lines = []
z3d_count = 0
for ii, fn in enumerate(fn_list):
if z3d_count > len(df_list)*self.num_comp*max_blocks-1:
break
if df_list is not None:
zd = Zen3D(fn)
zd.read_header()
if zd.header.ad_rate in df_list:
zd.read_z3d()
z3d_count += 1
else:
continue
else:
zd = Zen3D(fn)
zd.read_z3d()
if zd.metadata.ch_cmp.lower() == 'hx':
self.survey_config.hx = zd.metadata.ch_number
if zd.metadata.ch_cmp.lower() == 'hy':
self.survey_config.hy = zd.metadata.ch_number
if zd.metadata.ch_cmp.lower() == 'hz':
self.survey_config.hz = zd.metadata.ch_number
if zd.metadata.ch_cmp.lower() == 'ex':
self.survey_config.e_xaxis_length = zd.metadata.ch_length
if zd.metadata.ch_cmp.lower() == 'ey':
self.survey_config.e_yaxis_length = zd.metadata.ch_length
# get station configuration from the first Z3D file
if ii == 0:
self.survey_config.lat = zd.header.lat
self.survey_config.lon = zd.header.long
self.survey_config.date = zd.schedule.Date.replace('-','/')
self.survey_config.box = int(zd.header.box_number)
#write mtpy mt file
zd.write_ascii_mt_file(notch_dict=notch_dict, ex=ex, ey=ey)
#create lines to write to a log file
station = zd.metadata.rx_xyz0.split(':')[0]
fn_arr[ii]['station'] = '{0}{1}'.format(station_name, station)
fn_arr[ii]['npts'] = zd.time_series.shape[0]
fn_arr[ii]['df'] = zd.df
fn_arr[ii]['start_dt'] = zd.zen_schedule
fn_arr[ii]['comp'] = zd.metadata.ch_cmp.lower()
fn_arr[ii]['fn'] = zd.fn_mt_ascii
fn_lines.append(''.join(['--> station: {0}{1}\n'.format(station_name, station),
' ts_len = {0}\n'.format(zd.time_series.shape[0]),
' df = {0}\n'.format(zd.df),
' start_dt = {0}\n'.format(zd.zen_schedule),
' comp = {0}\n'.format(zd.metadata.ch_cmp),
' fn = {0}\n'.format(zd.fn)]))
self.station_dir = os.path.join(self.station_dir, 'TS')
self.survey_config.save_path = self.station_dir
# write survey configuration file
self.survey_config.write_survey_config_file()
return fn_arr[np.nonzero(fn_arr['npts'])], fn_lines
def write_script_files(self, fn_birrp_dict, save_path=None):
"""
write a script file from a generic processing dictionary
"""
if save_path is None:
save_path = os.path.join(self.station_dir, 'BF')
if not os.path.exists(save_path):
os.mkdir(save_path)
s_keys = fn_birrp_dict.keys()
script_fn_list = []
for skey in s_keys:
bf_path = os.path.join(save_path, '{0:.0f}'.format(skey))
fn_birrp_arr = fn_birrp_dict[skey]
pro_obj = BIRRP_processing()
pro_obj.calibration_path = self.coil_cal_path
pro_obj.station = self.survey_config.station
pro_obj.deltat = -float(skey)
pro_dict = pro_obj.get_processing_dict(fn_birrp_arr,
hx=self.survey_config.hx,
hy=self.survey_config.hy,
hz=self.survey_config.hz)
#write script file using mtpy.processing.birrp
script_fn, birrp_dict = birrp.write_script_file(pro_dict,
save_path=bf_path)
script_fn_list.append(script_fn)
cfg_fn = mtfh.make_unique_filename('{0}_birrp_params.cfg'.format(
script_fn[:-7]))
mtcfg.write_dict_to_configfile(birrp_dict, cfg_fn)
print 'Wrote BIRRP config file for edi file to {0}'.format(cfg_fn)
self.birrp_config_fn = cfg_fn
return script_fn_list
def run_birrp(self, script_fn_list=None, birrp_exe=None):
"""
run birrp given the specified files
"""
if script_fn_list is None:
raise IOError('Need to input a script file or list of script files')
if birrp_exe is not None:
self.birrp_exe = birrp_exe
if type(script_fn_list) is list:
self.edi_fn = []
for script_fn in script_fn_list:
birrp.run(self.birrp_exe, script_fn)
output_path = os.path.dirname(script_fn)
self.edi_fn.append(self.write_edi_file(output_path,
survey_config_fn=self.survey_config_fn,
birrp_config_fn=self.birrp_config_fn))
elif type(script_fn_list) is str:
birrp.run(self.birrp_exe, script_fn_list)
output_path = os.path.dirname(script_fn)
self.edi_fn = self.write_edi_file(output_path,
survey_config_fn=self.survey_config_fn,
birrp_config_fn=self.birrp_config_fn)
def write_edi_file(self, birrp_output_path, survey_config_fn=None,
birrp_config_fn=None):
"""
write an edi file from outputs of birrp
"""
if self.survey_config_fn is not None:
self.survey_config_fn = survey_config_fn
if self.survey_config_fn is None:
ts_find = birrp_output_path.find('TS')
if ts_find > 0:
ts_dir = birrp_output_path[0:ts_find+2]
for fn in os.listdir(ts_dir):
if fn[-4:] == '.cfg':
self.survey_config_fn = os.path.join(ts_dir, fn)
edi_fn = birrp.convert2edi(self.survey_config.station,
birrp_output_path,
self.survey_config_fn,
self.birrp_config_fn)
return edi_fn
def plot_responses(self, edi_fn_list=None):
"""
plot all the edi files that were created.
"""
if edi_fn_list is not None:
self.edi_fn = edi_fn_list
if type(self.edi_fn) is list:
# check file lengths to make sure there are no zero length files
for edi_fn in self.edi_fn:
fn_size = os.path.getsize(edi_fn)
if fn_size < 3000:
self.edi_fn.remove(edi_fn)
if len(self.edi_fn) == 0:
raise ValueError('No good .edi files where produced')
resp_plot = plotnresponses.PlotMultipleResponses(fn_list=self.edi_fn,
plot_style='compare',
plot_tipper='yri')
elif type(self.edi_fn) is str:
if os.path.getsize(self.edi_fn) < 3000:
raise ValueError('No good .edi files where produced')
resp_plot = plotresponse.PlotResponse(fn=self.edi_fn,
plot_tipper='yri')
return resp_plot
def process_data(self, df_list=None, max_blocks=2, num_comp=5):
"""
from the input station directory, convert files to ascii, run through
BIRRP, convert to .edi files and plot
"""
st = time.time()
self.num_comp = num_comp
if df_list is not None:
if type(df_list) is float or type(df_list) is int or\
type(df_list) is str:
df_list = [df_list]
# make files into mtpy files
z3d_fn_list, log_lines = self.make_mtpy_ascii_files(df_list=df_list,
max_blocks=max_blocks)
# get all information from mtpy files
schedule_dict = self.get_schedules_fn(z3d_fn_list)
# write script files for birrp
sfn_list = self.write_script_files(schedule_dict)
# run birrp
self.run_birrp(sfn_list)
# plot the output
r_plot = self.plot_responses()
et = time.time()
print 'took {0} seconds'.format(et-st)
return r_plot
#==============================================================================
# read processing file
#==============================================================================
def read_processing_fn(processing_fn, delimiter='\t'):
"""
Read in the information from processing file and output
as a list of dictionaries.
can include:
================== ========================================================
parameter description
================== ========================================================
station station name
ilev processing mode 0 for basic and 1 for advanced RR-2
stage
nout Number of Output time series (2 or 3-> for BZ)
ninp Number of input time series for E-field (1,2,3)
nref Number of reference channels (2 for MT)
nrr bounded remote reference (0) or 2 stage bounded
influence (1)
tbw Time bandwidth for Sepian sequence
deltat Sampling rate (+) for (s), (-) for (Hz)
nfft Length of FFT (should be even)
nsctinc section increment divisor (2 to divide by half)
nsctmax Number of windows used in FFT
nf1 1st frequency to extract from FFT window (>=3)
nfinc frequency extraction increment
nfsect number of frequencies to extract
mfft AR filter factor, window divisor (2 for half)
uin Quantile factor determination
ainlin Residual rejection factor low end (usually 0)
ainuin Residual rejection factor high end (.95-.99)
c2threshb Coherence threshold for magnetics (0 if undesired)
c2threshe Coherence threshold for electrics (0 if undesired)
nz Threshold for Bz (0=separate from E, 1=E threshold,
2=E and B)
Input if 3 B components else None
c2thresh1 Squared coherence for Bz, input if NZ=0, Nout=3
perlo longest period to apply coherence threshold over
perhi shortes period to apply coherence threshold over
ofil Output file root(usually three letters, can add full
path)
nlev Output files (0=Z; 1=Z,qq; 2=Z,qq,w; 3=Z,qq,w,d)
nprej number of frequencies to reject
prej frequencies to reject (+) for period, (-) for frequency
npcs Number of independent data to be processed (1 for one
segement)
nar Prewhitening Filter (3< >15) or 0 if not desired',
imode Output file mode (0=ascii; 1=binary; 2=headerless ascii;
3=ascii in TS mode',
jmode input file mode (0=user defined; 1=start time
YYYY-MM-DD HH:MM:SS)',
nread Number of points to be read for each data set
(if segments>1 -> npts1,npts2...)',
nfil Filter parameters (0=none; >0=input parameters;
<0=filename)
nskip Skip number of points in time series (0) if no skip,
(if segements >1 -> input1,input2...)',
nskipr Number of points to skip over (0) if none,
(if segements >1 -> input1,input2...)',
thetae Rotation angles for electrics (relative to geomagnetic
North)(N,E,rot)',
thetab Rotation angles for magnetics (relative to geomagnetic
North)(N,E,rot)',
thetar Rotation angles for calculation (relative to geomagnetic
North)(N,E,rot)'
================== ========================================================
..see also::
=> see BIRRP Manual for more details on the parameters
=> see A. D. Chave and D. J. Thomson [1989,2003,2004] for more
information on Bounded influence and robust processing.
Arguments:
-----------
**processing_fn** : string (full path to file)
tab delimited text file with appropriate
information.
**station_path** : directory path to where station folders are
Outputs:
----------
**slist** : list of dictionaries with key words related to headers of
txt file
:Example File: ::
station df start_dt stop rrstation rrstart rrstop mcomps\
magori elecori rrmagori tbw ainuin magtype nfft nsctmax\
ilev nar nrr c2thresb nsctinc nf1 nfinc nfsect ainlin\
declination thetae
mb037 256 2013-06-28,00:00:00 2013-06-28,18:00:00 mbrr \
2013-06-28,00:00:00 2013-06-28,18:00:00 5 "HZ,HX,HY" "EX,EY"\
"HX,HY" 2 0.9999 bb 262144 14 1 5 0 0.45 2\
3 1 3 0.0001 -13.367 0,90,180
"""
pfid = open(processing_fn, 'r')
plines = pfid.readlines()
pkeys = plines[0].rstrip()
pkeys = pkeys.split('\t')
plist=[]
for pline in plines[1:]:
pstr = pline.rstrip()
pstr = pstr.split(delimiter)
if len(pstr)>1:
pdict={}
for kk, pkey in enumerate(pkeys):
pstr[kk] = pstr[kk].replace('"','')
pdict[pkey.lower()] = pstr[kk]
plist.append(pdict)
pfid.close()
return plist
#==============================================================================
# get the external drives for SD cards
#==============================================================================
def get_drives():
"""
get a list of logical drives detected on the machine
Note this only works for windows.
Outputs:
----------
**drives** : list of drives as letters
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> zen.get_drives()
"""
drives = []
bitmask = win32api.GetLogicalDrives()
for letter in string.uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
return drives
#==============================================================================
# get the names of the drives which should correspond to channels
#==============================================================================
def get_drive_names():
"""
get a list of drive names detected assuming the cards are names by box
and channel.
Outputs:
----------
**drive_dict** : dictionary
keys are the drive letters and values are the
drive names
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> zen.get_drives_names()
"""
drives = get_drives()
drive_dict = {}
for drive in drives:
try:
drive_name = win32api.GetVolumeInformation(drive+':\\')[0]
if drive_name.find('CH') > 0:
drive_dict[drive] = drive_name
except:
pass
if drives == {}:
print 'No external drives detected, check the connections.'
return None
else:
return drive_dict
#==============================================================================
# copy files from SD cards
#==============================================================================
def copy_from_sd(station, save_path=r"d:\Peacock\MTData",
channel_dict={'1':'HX', '2':'HY', '3':'HZ',
'4':'EX', '5':'EY', '6':'HZ'},
copy_date=None, copy_type='all'):
"""
copy files from sd cards into a common folder (save_path)
do not put an underscore in station, causes problems at the moment
Arguments:
-----------
**station** : string
full name of station from which data is being saved
**save_path** : string
full path to save data to
**channel_dict** : dictionary
keys are the channel numbers as strings and the
values are the component that corresponds to that
channel, values are placed in upper case in the
code
**copy_date** : YYYY-MM-DD
date to copy from depending on copy_type
**copy_type** : [ 'all' | 'before' | 'after' | 'on' ]
* 'all' --> copy all files on the SD card
* 'before' --> copy files before and on this date
* 'after' --> copy files on and after this date
* 'on' --> copy files on this date only
Outputs:
-----------
**fn_list** : list
list of filenames copied to save_path
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> fn_list = zen.copy_from_sd('mt01', save_path=r"/home/mt/survey_1")
"""
drive_names = get_drive_names()
if drive_names is None:
raise IOError('No drives to copy from.')
save_path = os.path.join(save_path,station)
if not os.path.exists(save_path):
os.mkdir(save_path)
log_fid = file(os.path.join(save_path,'copy_from_sd.log'),'w')
st_test = time.ctime()
fn_list = []
for key in drive_names.keys():
dr = r"{0}:\\".format(key)
print '='*25+drive_names[key]+'='*25
log_fid.write('='*25+drive_names[key]+'='*25+'\n')
for fn in os.listdir(dr):
full_path_fn = os.path.normpath(os.path.join(dr, fn))
if fn[-4:] == '.cfg':
shutil.copy(full_path_fn, os.path.join(save_path, fn))
try:
file_size = os.stat(full_path_fn)[6]
if file_size >= 1600L and fn.find('.cfg') == -1:
zt = Zen3D(fn=full_path_fn)
#zt.get_info()
zt.read_header()
zt.read_schedule()
zt.read_metadata()
schedule_date = '{0}'.format(zt.schedule.Date)
if zt.metadata.rx_xyz0.find(station[2:]) >= 0:
fn_find = True
if copy_date is not None:
cp_date = int(''.join(copy_date.split('-')))
fn_find = False
zt_date = int(''.join(schedule_date.split('-')))
if copy_type == 'before':
if zt_date <= cp_date:
fn_find = True
elif copy_type == 'after':
if zt_date >= cp_date:
fn_find = True
elif copy_type == 'on':
if zt_date == cp_date:
fn_find = True
if fn_find:
channel = zt.metadata.ch_cmp.upper()
st = zt.schedule.Time.replace(':','')
sd = zt.schedule.Date.replace('-','')
sv_fn = '{0}_{1}_{2}_{3}_{4}.Z3D'.format(station,
sd,
st,
int(zt.df),
channel)
full_path_sv = os.path.join(save_path, sv_fn)
fn_list.append(full_path_sv)
shutil.copy(full_path_fn, full_path_sv)
print 'copied {0} to {1}\n'.format(full_path_fn,
full_path_sv)
#log_fid.writelines(zt.log_lines)
log_fid.write('copied {0} to \n'.format(full_path_fn)+\
' {0}\n'.format(full_path_sv))
else:
pass
# print '+++ SKIPPED {0}+++\n'.format(zt.fn)
# log_fid.write(' '*4+\
# '+++ SKIPPED {0}+++\n'.format(zt.fn))
else:
pass
# print '{0} '.format(full_path_fn)+\
# 'not copied due to bad data.'
#
# log_fid.write(' '*4+'***{0} '.format(full_path_fn)+\
# 'not copied due to bad data.\n\n')
except WindowsError:
print 'Faulty file at {0}'.format(full_path_fn)
log_fid.write('---Faulty file at {0}\n\n'.format(full_path_fn))
log_fid.close()
et_test = time.ctime()
print 'Started at: {0}'.format(st_test)
print 'Ended at: {0}'.format(et_test)
return fn_list
#==============================================================================
# merge files into cache files for each sample block
#==============================================================================
def merge_3d_files(fn_list, save_path=None, verbose=False,
calibration_fn=r"c:\MT\amtant.cal"):
"""
merge .Z3D files into cache files. Looks through the file list and
Combines files with the same start time and sampling rate into a
cache file. The calibration file is copied to the merged path for
later use with mtft24.exe processing code.
Arguments:
----------
**fn_list** : list
list of files to be merged
**save_path** : directory to save cach files to
**verbose** : [ True | False ]
* True --> prints out information about the merging
* False--> surpresses print statements
**calibration_fn** : string
full path to calibration file for ANT's
Outputs:
--------
**merged_fn_list** : nested list of files that were merged together
A log file is written to save_path\station_merged_log.log that contains
information about the files that were merged together.
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> fn_list = zen.copy_from_sd('mt01', save_path=r"/home/mt/survey_1")
>>> zen.merge_3d_files(fn_list, calibration_fn=r"/home/mt/amtant.cal")
"""
start_time = time.ctime()
merge_list = np.array([[fn]+\
os.path.basename(fn)[:-4].split('_')
for fn in fn_list if fn[-4:]=='.Z3D'])
merge_list = np.array([merge_list[:,0],
merge_list[:,1],
np.core.defchararray.add(merge_list[:,2],
merge_list[:,3]),
merge_list[:,4],
merge_list[:,5]])
merge_list = merge_list.T
time_counts = Counter(merge_list[:,2])
time_list = time_counts.keys()
log_lines = []
merged_fn_list = []
for tt in time_list:
log_lines.append('+'*72+'\n')
log_lines.append('Files Being Merged: \n')
cache_fn_list = merge_list[np.where(merge_list==tt)[0],0].tolist()
for cfn in cache_fn_list:
log_lines.append(' '*4+cfn+'\n')
if save_path is None:
save_path = os.path.dirname(cache_fn_list[0])
station_name = merge_list[np.where(merge_list==tt)[0][0],1]
else:
save_path = save_path
station_name = 'ZEN'
zc = ZenCache()
zc.verbose = verbose
zc.write_cache_file(cache_fn_list, save_path, station=station_name)
for zt in zc.zt_list:
log_lines.append(zt.log_lines)
merged_fn_list.append(zc.save_fn)
log_lines.append('\n---> Merged Time Series Lengths and Start Time \n')
log_lines.append(zc.log_lines)
log_lines.append('\n')
end_time = time.ctime()
#copy the calibration file into the merged folder for mtft24
try:
copy_cal_fn = os.path.join(save_path, 'Merged',
os.path.basename(calibration_fn))
except:
copy_cal_fn = os.path.join(save_path, os.path.basename(calibration_fn))
shutil.copy(calibration_fn, copy_cal_fn)
print 'copied {0} to {1}'.format(calibration_fn, copy_cal_fn)
print 'Start time: {0}'.format(start_time)
print 'End time: {0}'.format(end_time)
if os.path.basename(save_path) != 'Merged':
log_fid = file(os.path.join(save_path, 'Merged',
station_name+'_Merged.log'), 'w')
else:
log_fid = file(os.path.join(save_path, station_name+'_Merged.log'),
'w')
for line in log_lines:
log_fid.writelines(line)
log_fid.close()
return merged_fn_list
#==============================================================================
# delete files from sd cards
#==============================================================================
def delete_files_from_sd(delete_date=None, delete_type=None,
delete_folder=r"d:\Peacock\MTData\Deleted",
verbose=True):
"""
delete files from sd card, if delete_date is not None, anything on this
date and before will be deleted. Deletes just .Z3D files, leaves
zenini.cfg
Agruments:
-----------
**delete_date** : YYYY-MM-DD
date to delete files from
**delete_type** : [ 'all' | 'before' | 'after' | 'on' ]
* 'all' --> delete all files on sd card
* 'before' --> delete files on and before delete_date
* 'after' --> delete files on and after delete_date
* 'on' --> delete files on delete_date
**delete_folder** : string
full path to a folder where files will be moved to
just in case. If None, files will be deleted
for ever.
Returns:
---------
**delete_fn_list** : list
list of deleted files.
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> # Delete all files before given date, forever.
>>> zen.delete_files_from_sd(delete_date='2004/04/20',
delete_type='before',
delete_folder=None)
>>> # Delete all files into a folder just in case
>>> zen.delete_files_from_sd(delete_type='all',
delete_folder=r"/home/mt/deleted_files")
"""
drive_names = get_drive_names()
if drive_names is None:
raise IOError('No drives to copy from.')
log_lines = []
if delete_folder is not None:
if not os.path.exists(delete_folder):
os.mkdir(delete_folder)
log_fid = file(os.path.join(delete_folder,'Log_file.log'),'w')
if delete_date is not None:
delete_date = int(delete_date.replace('-',''))
delete_fn_list = []
for key in drive_names.keys():
dr = r"{0}:\\".format(key)
log_lines.append('='*25+drive_names[key]+'='*25+'\n')
for fn in os.listdir(dr):
if fn[-4:].lower() == '.Z3D'.lower():
full_path_fn = os.path.normpath(os.path.join(dr, fn))
zt = Zen3D(full_path_fn)
#zt.get_info()
if delete_type == 'all' or delete_date is None:
if delete_folder is None:
os.remove(full_path_fn)
delete_fn_list.append(full_path_fn)
log_lines.append('Deleted {0}'.format(full_path_fn))
else:
shutil.move(full_path_fn,
os.path.join(delete_folder,
os.path.basename(full_path_fn)))
delete_fn_list.append(full_path_fn)
log_lines.append('Moved {0} '.format(full_path_fn)+
'to {0}'.format(delete_folder))
else:
zt_date = int(zt.schedule_date.replace('-',''))
if delete_type == 'before':
if zt_date <= delete_date:
if delete_folder is None:
os.remove(full_path_fn)
delete_fn_list.append(full_path_fn)
log_lines.append('Deleted {0}\n'.format(full_path_fn))
else:
shutil.move(full_path_fn,
os.path.join(delete_folder,
os.path.basename(full_path_fn)))
delete_fn_list.append(full_path_fn)
log_lines.append('Moved {0} '.format(full_path_fn)+
'to {0}\n'.format(delete_folder))
elif delete_type == 'after':
if zt_date >= delete_date:
if delete_folder is None:
os.remove(full_path_fn)
delete_fn_list.append(full_path_fn)
log_lines.append('Deleted {0}\n'.format(full_path_fn))
else:
shutil.move(full_path_fn,
os.path.join(delete_folder,
os.path.basename(full_path_fn)))
delete_fn_list.append(full_path_fn)
log_lines.append('Moved {0} '.format(full_path_fn)+
'to {0}\n'.format(delete_folder))
elif delete_type == 'on':
if zt_date == delete_date:
if delete_folder is None:
os.remove(full_path_fn)
delete_fn_list.append(full_path_fn)
log_lines.append('Deleted {0}\n'.format(full_path_fn))
else:
shutil.move(full_path_fn,
os.path.join(delete_folder,
os.path.basename(full_path_fn)))
delete_fn_list.append(full_path_fn)
log_lines.append('Moved {0} '.format(full_path_fn)+
'to {0}\n'.format(delete_folder))
if delete_folder is not None:
log_fid = file(os.path.join(delete_folder, 'Delete_log.log'), 'w')
log_fid.writelines(log_lines)
log_fid.close()
if verbose:
for lline in log_lines:
print lline
return delete_fn_list
#==============================================================================
# copy and merge Z3D files from SD cards
#==============================================================================
def copy_and_merge(station, z3d_save_path=None, merge_save_path=None,
channel_dict={'1':'HX', '2':'HY', '3':'HZ','4':'EX',
'5':'EY', '6':'HZ'},
copy_date=None, copy_type='all'):
"""
copy files from sd card then merge them together and run mtft24.exe
Arguments:
----------
**station** : string
full station name
**z3d_save_path** : string
full path to save .Z3D files
**merge_save_path** : string
full path to save merged cache files. If None
saved to z3d_save_path\Merged
**channel_dict** : dictionary
keys are the channel numbers as strings and the
values are the component that corresponds to that
channel, values are placed in upper case in the
code
**copy_date** : YYYY-MM-DD
date to copy from depending on copy_type
**copy_type** : [ 'all' | 'before' | 'after' | 'on' ]
* 'all' --> copy all files on the SD card
* 'before' --> copy files before and on this date
* 'after' --> copy files on and after this date
* 'on' --> copy files on this date only
Returns:
------------
**mfn_list** : list
list of merged file names
:Example: ::
>>> import mpty.usgs.zen as zen
>>> mfn_list = zen.copy_and_merge('mt01', z3d_save_path=r"/home/mt")
>>> #copy only after a certain date
>>> mfn_list = zen.copy_and_merge('mt01', z3d_save_path=r"/home/mt",\
copy_date='2014/04/20', \
copy_type='after')
"""
#--> copy files from sd cards
cpkwargs = {}
cpkwargs['channel_dict'] = channel_dict
cpkwargs['copy_date'] = copy_date
cpkwargs['copy_type'] = copy_type
if z3d_save_path != None:
cpkwargs['save_path'] = z3d_save_path
fn_list = copy_from_sd(station, **cpkwargs)
#--> merge files into cache files
mfn_list = merge_3d_files(fn_list, save_path=merge_save_path)
return mfn_list
#==============================================================================
# Make mtpy_mt files
#==============================================================================
def make_mtpy_mt_files(fn_list, station_name='mb', fmt='%.8e',
ex=1, ey=1, notch_dict=None, ey_skip=False):
"""
makes mtpy_mt files from .Z3D files
Arguments:
-----------
**dirpath** : full path to .Z3D files
**station_name** : prefix for station names
**fmt** : format of data numbers for mt_files
Outputs:
--------
**fn_arr** : np.ndarray(file, length, df, start_dt)
:Example: ::
>>> import mtpy.usgs.zen as zen
>>> fn_list = zen.copy_from_sd('mt01')
>>> mtpy_fn = zen.make_mtpy_files(fn_list, station_name='mt')
"""
fn_arr = np.zeros(len(fn_list),
dtype=[('station','|S6'), ('len',np.int), ('df', np.int),
('start_dt', '|S22'), ('comp','|S2'),
('fn','|S100')])
fn_lines = []
for ii, fn in enumerate(fn_list):
zd = Zen3D(fn)
#read in Z3D data
try:
zd.read_3d()
except ZenGPSError:
try:
zd._seconds_diff = 59
zd.read_3d()
except ZenGPSError:
pass
if ey_skip and zd.ch_cmp == 'ey':
pass
else:
#write mtpy mt file
zd.write_ascii_mt_file(save_station=station_name,
fmt=fmt,
ex=ex,
ey=ey,
notch_dict=notch_dict)
#create lines to write to a log file
fn_arr[ii]['station'] = '{0}{1}'.format(station_name, zd.rx_stn)
fn_arr[ii]['len'] = zd.time_series.shape[0]
fn_arr[ii]['df'] = zd.df
fn_arr[ii]['start_dt'] = zd.start_dt
fn_arr[ii]['comp'] = zd.ch_cmp
fn_arr[ii]['fn'] = zd.fn
fn_lines.append(''.join(['--> station: {0}{1}\n'.format(station_name,
zd.rx_stn),
' ts_len = {0}\n'.format(zd.time_series.shape[0]),
' df = {0}\n'.format(zd.df),
' start_dt = {0}\n'.format(zd.start_dt),
' comp = {0}\n'.format(zd.ch_cmp),
' fn = {0}\n'.format(zd.fn)]))
return fn_arr, fn_lines
#==============================================================================
# make time series loop
#==============================================================================
def make_mtpy_ts_loop(station_path, station_list, survey_file=None,
station_name='mb', fmt='%.8e', notch_dict=None,
ey_skip=False):
"""
loop over station folder to write mtpy time series
Arguments:
----------
**station_path** : directory of station folders
**station_list** : list of stations to process
**survey_file** : string
full path to survey_config file created by
mtpy.utils.configfile
**station_name** : string
prefix to append to station name from Z3D files
**fmt** : string format of how the numbers are formated in new file
**notch_dict** : dictionary
if the data has noise at single frequencies, such
as power line noise input a dictionary with keys:
* df --> float sampling frequency in Hz
* notches --> list of frequencies (Hz) to filter
* notchradius --> float radius of the notch in
frequency domain (Hz)
* freqrad --> float radius to searching for peak about
notch from notches
* rp --> float ripple of Chebyshev type 1 filter,
lower numbers means less ripples
* dbstop_limit --> float (in decibels) limits the
difference between the peak at the
notch and surrounding spectra.
Any difference above dbstop_limit
will be filtered, anything
less will not
"""
log_fid = file(os.path.join(station_path, 'TS_log.log'), 'a')
if survey_file is not None:
survey_dict = mtcf.read_survey_configfile(survey_file)
for station in station_list:
spath = os.path.join(station_path, station)
if survey_file is not None:
try:
sdict = survey_dict[station.upper()]
ex = float(sdict['e_xaxis_length'])
ey = float(sdict['e_yaxis_length'])
except KeyError:
ex = 1.
ey = 1.
else:
ex = 1.
ey = 1.
log_fid.write('-'*72+'\n')
fn_list = [os.path.join(spath, fn) for fn in os.listdir(spath)
if fn[-3:]=='Z3D']
sfn_arr, sfn_lines = make_mtpy_mt_files(fn_list,
station_name=station_name,
fmt=fmt,
ex=ex,
ey=ey,
notch_dict=notch_dict,
ey_skip=ey_skip)
log_fid.writelines(sfn_lines)
log_fid.close()
#==============================================================================
# this should capture all the print statements from zen
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
sys.stdout = self._stdout
#==============================================================================
def compute_mt_response(survey_dir, station='mt000', copy_date=None,
birrp_exe=r"c:\MinGW32-xy\Peacock\birrp52\birrp52_3pcs6e9pts.exe",
ant_calibrations=r"c:\MT\Ant_calibrations",
process_df_list=[256],
num_comp=5):
"""
This code will down load Z3D files from a Zen that is in SD Mode,
convert the Z3D files to ascii format, then process them for each
sampling rate using Alan Chave's BIRRP code. The outputs are then
converted to .edi files and plotted.
You need 2 things to run this code:
* mtpy --> a Python package for MT and can be found at
https://github.com/geophysics/mtpy
* BIRRP executable --> you can get this from Alan Chave at WHOI
if you are using it for non-commercial projects.
..note:: This code is quite specific to my setup, so let me know what
doesn't work for you so I can generalize it.
Arguments
----------------
**survey_dir** : string
full path to the directory where you are storing
the station data. ie. /home/MT/Survey_00
**station** : string
name of the station you are down loading.
*default* is 'mt000'
**copy_date** : string
copy all files on and after this date
format is YYYY-MM-DD
*default* is None, which copies all files on the SD
cards.
**birrp_exe** : string
full path to the BIRRP executable on your machine
*default* is the location on my machine
**ant_calibrations** : string
full path to a folder that contains the coil
calibration data. These must be in seperate
.csv files for each coil named by corresponding
coil name. If you're coil is 2884, then you
need a calibration file named Ant2884_cal.csv
in which the data is freq,real,imaginary
**process_df_list** : list
list of sampling rates to process
Returns
-----------------
**rp_plot** : mtpy.imaging.plotnresponses object
ploting object of data, if you want to change how the
output is plot change the attributes of rp_plot
Outputs
-----------------
**copy_from_sd.log** : file
contains information on how files were copied
from the SD cards.
**processing.log** : file
a log file of how the program ran
**survey_dir/station/TS** : directory
contains the time series data in .ascii
format
**survey_dir/station/TS/BF** : directory
contains the processing results from
BIRRP for each sampling rate in the
data in subsequent directories
**survey_dir/station/TS/station.cfg** : file
configuration file of the
station parameters
Example
------------------------
>>> import zen_processing_data as zpd
>>> zpd.compute_mt_response(r"/home/mt/survey_00",
station='mt010',
copy_date='2015-05-22',
birrp_exe=r"/home/bin/birrp52.exe",
ant_calibrations=r"/home/ant_calibrations",
process_df_list=[1024, 256])
"""
station_dir = os.path.join(survey_dir, station)
st = time.time()
#--> Copy data from files
try:
if copy_date is None:
copy_from_sd(station, save_path=survey_dir)
else:
copy_from_sd(station, save_path=survey_dir,
copy_date=copy_date, copy_type='after')
except IOError:
print 'No files copied from SD cards'
print 'Looking in {0} for Z3D files'.format(station_dir)
#--> process data
with Capturing() as output:
z2edi = Z3D_to_edi(station_dir)
z2edi.birrp_exe = birrp_exe
z2edi.coil_cal_path = ant_calibrations
try:
rp = z2edi.process_data(df_list=process_df_list, num_comp=num_comp)
except mtex.MTpyError_inputarguments:
print '==> Data not good!! Did not produce a proper .edi file'
et = time.time()
print '--> took {0} seconds'.format(et-st)
rp = None
#--> write log file
log_fid = open(os.path.join(station_dir, 'Processing.log'), 'w')
log_fid.write('\n'.join(output))
log_fid.close()
return rp
#==============================================================================
def rename_cac_files(station_dir, station='mb'):
"""
rename and move .cac files to something more useful
"""
fn_list = [os.path.join(station_dir, fn) for fn in os.listdir(station_dir)
if fn[-4:].lower() == '.cac']
if len(fn_list) == 0:
raise IOError('Could not find any .cac files')
save_path = os.path.join(station_dir, 'Merged')
if not os.path.exists(save_path) :
os.mkdir(save_path)
for fn in fn_list:
cac_obj = Cache(fn)
cac_obj.read_cache_metadata()
station_name = '{0}{1}'.format(station,
cac_obj.metadata.station_number)
station_date = cac_obj.metadata.gdp_date.replace('-', '')
station_time = cac_obj.metadata.gdp_time.replace(':', '')
new_fn = '{0}_{1}_{2}_{3:.0f}.cac'.format(station_name,
station_date,
station_time,
cac_obj.metadata.ts_adfreq)
new_fn = os.path.join(save_path, new_fn)
shutil.move(fn, new_fn)
print 'moved {0} to {1}'.format(fn, new_fn)
| geophysics/mtpy | mtpy/usgs/zen.py | Python | gpl-3.0 | 252,541 | [
"Gaussian"
] | fac3f6685cd4ecc37d281f2939decf4f231abb71b0a5b54b41c566d80111ff54 |
""" The TimeLeft utility allows to calculate the amount of CPU time
left for a given batch system slot. This is essential for the 'Filling
Mode' where several VO jobs may be executed in the same allocated slot.
The prerequisites for the utility to run are:
- Plugin for extracting information from local batch system
- Scale factor for the local site.
With this information the utility can calculate in normalized units the
CPU time remaining for a given slot.
"""
import os
import DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import shellCall
__RCSID__ = "$Id$"
class TimeLeft( object ):
""" This generally does not run alone
"""
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger.getSubLogger( 'TimeLeft' )
# This is the ratio SpecInt published by the site over 250 (the reference used for Matching)
self.scaleFactor = gConfig.getValue( '/LocalSite/CPUScalingFactor', 0.0 )
if not self.scaleFactor:
self.log.warn( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName() )
self.normFactor = gConfig.getValue( '/LocalSite/CPUNormalizationFactor', 0.0 )
if not self.normFactor:
self.log.warn( '/LocalSite/CPUNormalizationFactor not defined for site %s' % DIRAC.siteName() )
# CPU and wall clock margins, which don't seem to be set anywhere
self.cpuMargin = gConfig.getValue('/LocalSite/CPUMargin', 2) # percent
self.wallClockMargin = gConfig.getValue('/LocalSite/wallClockMargin', 8) # percent
result = self.__getBatchSystemPlugin()
if result['OK']:
self.batchPlugin = result['Value']
else:
self.batchPlugin = None
self.batchError = result['Message']
def getScaledCPU( self, processors = 1 ):
"""Returns the current CPU Time spend (according to batch system) scaled according
to /LocalSite/CPUScalingFactor
"""
# Quit if no scale factor available
if not self.scaleFactor:
return 0
# Quit if Plugin is not available
if not self.batchPlugin:
return 0
resourceDict = self.batchPlugin.getResourceUsage()
if 'Value' in resourceDict:
if resourceDict['Value']['CPU']:
return resourceDict['Value']['CPU'] * self.scaleFactor
elif resourceDict['Value']['WallClock']:
# When CPU value missing, guess from WallClock and number of processors
return resourceDict['Value']['WallClock'] * self.scaleFactor * processors
return 0
#############################################################################
def getTimeLeft( self, cpuConsumed = 0.0, processors = 1 ):
"""Returns the CPU Time Left for supported batch systems. The CPUConsumed
is the current raw total CPU.
"""
# Quit if no scale factor available
if not self.scaleFactor:
return S_ERROR( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName() )
if not self.batchPlugin:
return S_ERROR( self.batchError )
resourceDict = self.batchPlugin.getResourceUsage()
if not resourceDict['OK']:
self.log.warn( 'Could not determine timeleft for batch system at site %s' % DIRAC.siteName() )
return resourceDict
resources = resourceDict['Value']
self.log.debug( "self.batchPlugin.getResourceUsage(): %s" % str( resources ) )
if not resources['CPULimit'] and not resources['WallClockLimit']:
# This should never happen
return S_ERROR( 'No CPU or WallClock limit obtained' )
# if one of CPULimit or WallClockLimit is missing, compute a reasonable value
if not resources['CPULimit']:
resources['CPULimit'] = resources['WallClockLimit'] * processors
elif not resources['WallClockLimit']:
resources['WallClockLimit'] = resources['CPULimit']
# if one of CPU or WallClock is missing, compute a reasonable value
if not resources['CPU']:
resources['CPU'] = resources['WallClock'] * processors
elif not resources['WallClock']:
resources['WallClock'] = resources['CPU']
timeLeft = 0.
cpu = float( resources['CPU'] )
cpuLimit = float( resources['CPULimit'] )
wallClock = float( resources['WallClock'] )
wallClockLimit = float( resources['WallClockLimit'] )
validTimeLeft = enoughTimeLeft(cpu, cpuLimit, wallClock, wallClockLimit, self.cpuMargin, self.wallClockMargin)
if validTimeLeft:
if cpu and cpuConsumed > 3600. and self.normFactor:
# If there has been more than 1 hour of consumed CPU and
# there is a Normalization set for the current CPU
# use that value to renormalize the values returned by the batch system
# NOTE: cpuConsumed is non-zero for call by the JobAgent and 0 for call by the watchdog
# cpuLimit and cpu may be in the units of the batch system, not real seconds... (in this case the other case won't work)
# therefore renormalise it using cpuConsumed (which is in real seconds)
timeLeft = ( cpuLimit - cpu ) * self.normFactor * cpuConsumed / cpu
elif self.normFactor:
# FIXME: this is always used by the watchdog... Also used by the JobAgent
# if consumed less than 1 hour of CPU
# It was using self.scaleFactor but this is inconsistent: use the same as above
# In case the returned cpu and cpuLimit are not in real seconds, this is however rubbish
timeLeft = ( cpuLimit - cpu ) * self.normFactor
else:
# Last resort recovery...
timeLeft = ( cpuLimit - cpu ) * self.scaleFactor
self.log.verbose( 'Remaining CPU in normalized units is: %.02f' % timeLeft )
return S_OK( timeLeft )
else:
return S_ERROR( 'No time left for slot' )
#############################################################################
def __getBatchSystemPlugin( self ):
""" Using the name of the batch system plugin, will return an instance of the plugin class.
"""
batchSystems = {'LSF':'LSB_JOBID', 'PBS':'PBS_JOBID', 'BQS':'QSUB_REQNAME', 'SGE':'SGE_TASK_ID'} # more to be added later
name = None
for batchSystem, envVar in batchSystems.items():
if envVar in os.environ:
name = batchSystem
break
if name is None and 'MACHINEFEATURES' in os.environ and 'JOBFEATURES' in os.environ:
# Only use MJF if legacy batch system information not available for now
name = 'MJF'
if name is None:
self.log.warn( 'Batch system type for site %s is not currently supported' % DIRAC.siteName() )
return S_ERROR( 'Current batch system is not supported' )
self.log.debug( 'Creating plugin for %s batch system' % ( name ) )
try:
batchSystemName = "%sTimeLeft" % ( name )
batchPlugin = __import__( 'DIRAC.Core.Utilities.TimeLeft.%s' % #pylint: disable=unused-variable
batchSystemName, globals(), locals(), [batchSystemName] )
except ImportError as x:
msg = 'Could not import DIRAC.Core.Utilities.TimeLeft.%s' % ( batchSystemName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
try:
batchStr = 'batchPlugin.%s()' % ( batchSystemName )
batchInstance = eval( batchStr )
except Exception as x: #pylint: disable=broad-except
msg = 'Could not instantiate %s()' % ( batchSystemName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
return S_OK( batchInstance )
#############################################################################
def runCommand( cmd, timeout = 120 ):
"""Wrapper around shellCall to return S_OK(stdout) or S_ERROR(message)
"""
result = shellCall( timeout, cmd )
if not result['OK']:
return result
status, stdout, stderr = result['Value'][0:3]
if status:
gLogger.warn( 'Status %s while executing %s' % ( status, cmd ) )
gLogger.warn( stderr )
if stdout:
return S_ERROR( stdout )
if stderr:
return S_ERROR( stderr )
return S_ERROR( 'Status %s while executing %s' % ( status, cmd ) )
else:
return S_OK( str( stdout ) )
def enoughTimeLeft(cpu, cpuLimit, wallClock, wallClockLimit, cpuMargin, wallClockMargin):
""" Is there enough time?
:returns: True/False
"""
cpuRemainingFraction = 100 * (1. - cpu / cpuLimit)
wallClockRemainingFraction = 100 * (1. - wallClock / wallClockLimit)
fractionTuple = ( cpuRemainingFraction, wallClockRemainingFraction, cpuMargin, wallClockMargin )
gLogger.verbose( 'Used CPU is %.1f s out of %.1f, Used WallClock is %.1f s out of %.1f.' % ( cpu,
cpuLimit,
wallClock,
wallClockLimit ) )
gLogger.verbose( 'Remaining CPU %.02f%%, Remaining WallClock %.02f%%, margin CPU %s%%, margin WC %s%%' % fractionTuple )
if cpuRemainingFraction > cpuMargin \
and wallClockRemainingFraction > wallClockMargin:
gLogger.verbose( 'Remaining CPU %.02f%% < Remaining WallClock %.02f%% and margins respected (%s%% and %s%%)' % fractionTuple )
return True
else:
gLogger.verbose( 'Remaining CPU %.02f%% or WallClock %.02f%% fractions < margin (%s%% and %s%%) so no time left' % fractionTuple )
return False
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| Andrew-McNab-UK/DIRAC | Core/Utilities/TimeLeft/TimeLeft.py | Python | gpl-3.0 | 9,585 | [
"DIRAC"
] | a3829a0705f3e5cb8f0cf122b0124fc11e75c5f10e009859fdf16df87448cc19 |
# Test the Brenner potential with some hydrocarbons. -*- encoding: utf-8 -*-
from ase.structure import molecule
from ase.optimize import BFGS
from asap3 import BrennerPotential
from asap3.testtools import ReportTest
import ase.data.g2
import numpy as np
C60_data = {
'C60': {
'description': "Buckminsterfullerene, I*h symm.",
'name': "C_{60}",
# The Buckyball has two degrees of freedom, the C-C bond, and the C=C bond.
# This is an LDA-gpaw relaxed structure with bond lengths 1.437 and 1.385.
# Experimentally, the two bond lengths are 1.45 and 1.40 Angstrom.
'symbols': 'C60',
'magmoms': None,
'positions': [[ 2.2101953, 0.5866631, 2.6669504],
[ 3.1076393, 0.1577008, 1.6300286],
[ 1.3284430, -0.3158939, 3.2363232],
[ 3.0908709, -1.1585005, 1.2014240],
[ 3.1879245, -1.4574599, -0.1997005],
[ 3.2214623, 1.2230966, 0.6739440],
[ 3.3161210, 0.9351586, -0.6765151],
[ 3.2984981, -0.4301142, -1.1204138],
[-0.4480842, 1.3591484, 3.2081020],
[ 0.4672056, 2.2949830, 2.6175264],
[-0.0256575, 0.0764219, 3.5086259],
[ 1.7727917, 1.9176584, 2.3529691],
[ 2.3954623, 2.3095689, 1.1189539],
[-0.2610195, 3.0820935, 1.6623117],
[ 0.3407726, 3.4592388, 0.4745968],
[ 1.6951171, 3.0692446, 0.1976623],
[-2.1258394, -0.8458853, 2.6700963],
[-2.5620990, 0.4855202, 2.3531715],
[-0.8781521, -1.0461985, 3.2367302],
[-1.7415096, 1.5679963, 2.6197333],
[-1.6262468, 2.6357030, 1.6641811],
[-3.2984810, 0.4301871, 1.1204208],
[-3.1879469, 1.4573895, 0.1996030],
[-2.3360261, 2.5813627, 0.4760912],
[-0.5005210, -2.9797771, 1.7940308],
[-1.7944338, -2.7729087, 1.2047891],
[-0.0514245, -2.1328841, 2.7938830],
[-2.5891471, -1.7225828, 1.6329715],
[-3.3160705, -0.9350636, 0.6765268],
[-1.6951919, -3.0692581, -0.1976564],
[-2.3954901, -2.3096853, -1.1189862],
[-3.2214182, -1.2231835, -0.6739581],
[ 2.1758234, -2.0946263, 1.7922529],
[ 1.7118619, -2.9749681, 0.7557198],
[ 1.3130656, -1.6829416, 2.7943892],
[ 0.3959024, -3.4051395, 0.7557638],
[-0.3408219, -3.4591883, -0.4745610],
[ 2.3360057, -2.5814499, -0.4761050],
[ 1.6263757, -2.6357349, -1.6642309],
[ 0.2611352, -3.0821271, -1.6622618],
[-2.2100844, -0.5868636, -2.6670300],
[-1.7726970, -1.9178969, -2.3530466],
[-0.4670723, -2.2950509, -2.6175105],
[-1.3283500, 0.3157683, -3.2362375],
[-2.1759882, 2.0945383, -1.7923294],
[-3.0909663, 1.1583472, -1.2015749],
[-3.1076090, -0.1578453, -1.6301627],
[-1.3131365, 1.6828292, -2.7943639],
[ 0.5003224, 2.9799637, -1.7940203],
[-0.3961148, 3.4052817, -0.7557272],
[-1.7120629, 2.9749122, -0.7557988],
[ 0.0512824, 2.1329478, -2.7937450],
[ 2.1258630, 0.8460809, -2.6700534],
[ 2.5891853, 1.7227742, -1.6329562],
[ 1.7943010, 2.7730684, -1.2048262],
[ 0.8781323, 1.0463514, -3.2365313],
[ 0.4482452, -1.3591061, -3.2080510],
[ 1.7416948, -1.5679557, -2.6197714],
[ 2.5621724, -0.4853529, -2.3532026],
[ 0.0257904, -0.0763567, -3.5084446]]}
}
my_data = ase.data.g2.data.copy()
my_data.update(C60_data)
tests = ['C60', 'trans-butane', 'C2H6', 'C2H4', 'C2H2' ]
for mol in tests:
atoms = molecule(mol, data=my_data)
oldpos = atoms.get_positions()
atoms.set_calculator(BrennerPotential())
dyn = BFGS(atoms, logfile=None)
maxsteps = 20 + 3*len(atoms)
dyn.run(steps=maxsteps, fmax=0.01)
dp = atoms.get_positions() - oldpos
dpmax = np.sqrt((dp * dp).sum(axis=1)).max()
print "%13s: Max movement is %.2f Å" % (mol, dpmax)
ReportTest(mol, dpmax, 0.0, 0.1, silent=True)
ReportTest.Summary()
| auag92/n2dm | Asap-3.8.4/Test/Hydrocarbons.py | Python | mit | 4,854 | [
"ASE",
"GPAW"
] | a26fbc8675ed82faca4d9b93c45a2f44b1cc5ed2b780d82b8579a8f6134512fe |
import numpy as np
import math
import sys
import random
# Star position in parsecs
class Star:
velocity = np.array([0., 0., 0.])
force = np.array([0., 0., 0.])
def __init__(self, mass, x, y, z, origin):
self.mass = mass
self.pos = np.array([x, y, z])
self.origin = origin
class Galaxy:
smbh_mass = 4.2e6 ###Roughly Sagittarius A* mass in solar masses###
galaxy_bulge_width = 1000
galaxy_bulge_height = 1000
main_disk_vel = 7.129e-12
def __init__(self, width, height, x, y, z, numstars, color):
self.vel = np.array([0., 0., 0.])
self.galaxy_stars = []
self.width = width
self.height = height
self.pos = np.array([x, y, z])
self.pitch = 0
self.roll = 0
self.numstars = numstars
self.color = color
self.set_rand_multiplier()
def set_pitch_roll(self, pitch, roll):
self.pitch = pitch
self.roll = roll
# Updates the position of the galactic center
def update(self, t):
self.pos = self.pos + (self.vel * t)
def setstardistribution(self):
smbh = Star(self.smbh_mass, self.pos[0], self.pos[1], self.pos[2], self)
smbh.velocity = self.vel[:]
self.galaxy_stars.append(smbh)
for i in range(1, int(self.numstars)):
printProgress(i + 1, self.numstars, prefix="Setting Star Distributions:",
suffix="Completed ({}/{} stars distributed in galaxy {})".format((i + 1), int(self.numstars),
self.color), barLength=50)
# Determines random x and y position for star
dist = self.get_star_rand_num()
angle = random.random() * 2 * math.pi
x1 = dist * math.cos(angle)
y1 = dist * math.sin(angle)
# Determines z position for star
if dist < self.galaxy_bulge_width:
z1 = (self.galaxy_bulge_height * random.random()) - (self.galaxy_bulge_height / 2)
else:
z1 = (self.height * random.random()) - (self.height / 2)
# Mass in solar masses
mass = 1 * (0.8 + random.random() * 10)
star_pos = np.array([self.pos[0] + x1, self.pos[1] + y1, self.pos[2] + z1])
star_pos = self.apply_pitch_roll(star_pos)
ts = Star(mass, star_pos[0], star_pos[1], star_pos[2], self)
self.set_star_velocity(ts)
self.galaxy_stars.append(ts)
print("\n")
# May need to add in color code things
# Sets star velocity perpendicular to the center of the galaxy
def set_star_velocity(self, star):
xt = self.pos[0] - star.pos[0]
yt = self.pos[1] - star.pos[1]
a = np.array([xt, yt, 0])
r = np.linalg.norm(a)
# Initial velocity in pc/s
# velo = 7.129e-12 #220 km/s in pc/s
velo = self.main_disk_vel
# Center of galaxy
r1 = 1000
if r < r1:
velo *= (0.5 + (0.5 * r) / r1)
# Set direction of velocity
theta = math.atan(yt / xt)
if xt < 0:
velo *= -1
vx = -velo * math.sin(theta)
vy = velo * math.cos(theta)
vz = 0
v = self.apply_pitch_roll(np.array([vx, vy, vz]))
star.velocity = v + self.vel
# Used to determine density of stars in galaxy by radius
@staticmethod
def star_density(r):
return np.exp(-r / 3000)
# Get the initial random multiplier to use for star distribution
def set_rand_multiplier(self):
self.randommultiplier = 0.0
for i in range(1, int(self.width)):
self.randommultiplier += self.star_density(i)
# For star distribution calculations
def get_star_rand_num(self):
n = 1
r = random.random() * self.randommultiplier
r -= self.star_density(n)
while r >= 0:
n += 1
r -= self.star_density(n)
return n
def yaw_rot(self, alpha):
return np.array([[math.cos(alpha), -math.sin(alpha), 0],
[math.sin(alpha), math.cos(alpha), 0],
[0, 0, 1]])
def pitch_rot(self, beta):
return np.array([[math.cos(beta), 0, math.sin(beta)],
[0, 1, 0],
[-math.sin(beta), 0, math.cos(beta)]])
def roll_rot(self, gamma):
return np.array([[1, 0, 0],
[0, math.cos(gamma), -math.sin(gamma)],
[0, math.sin(gamma), math.cos(gamma)]])
def apply_pitch_roll(self, array):
return array.dot(self.pitch_rot(self.pitch)).dot(self.roll_rot(self.roll))
def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '=' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, 100, '%', suffix)),
sys.stdout.write('\n')
sys.stdout.flush()
| samhollenbach/Galaxy2 | SimComponents.py | Python | mit | 5,780 | [
"Galaxy"
] | b9508e572c01f1c6a17c97bd67149c876859cf79361265ab6e4d4ea451453e01 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.symm import basis
from pyscf.symm import param
from pyscf import __config__
def label_orb_symm(mol, irrep_name, symm_orb, mo, s=None,
check=getattr(__config__, 'symm_addons_label_orb_symm_check', True),
tol=getattr(__config__, 'symm_addons_label_orb_symm_tol', 1e-9)):
'''Label the symmetry of given orbitals
irrep_name can be either the symbol or the ID of the irreducible
representation. If the ID is provided, it returns the numeric code
associated with XOR operator, see :py:meth:`symm.param.IRREP_ID_TABLE`
Args:
mol : an instance of :class:`Mole`
irrep_name : list of str or int
A list of irrep ID or name, it can be either mol.irrep_id or
mol.irrep_name. It can affect the return "label".
symm_orb : list of 2d array
the symmetry adapted basis
mo : 2d array
the orbitals to label
Returns:
list of symbols or integers to represent the irreps for the given
orbitals
Examples:
>>> from pyscf import gto, scf, symm
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1', basis='ccpvdz',verbose=0, symmetry=1)
>>> mf = scf.RHF(mol)
>>> mf.kernel()
>>> symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mf.mo_coeff)
['Ag', 'B1u', 'Ag', 'B1u', 'B2u', 'B3u', 'Ag', 'B2g', 'B3g', 'B1u']
>>> symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, mf.mo_coeff)
[0, 5, 0, 5, 6, 7, 0, 2, 3, 5]
'''
nmo = mo.shape[1]
if s is None:
s = mol.intor_symmetric('int1e_ovlp')
s_mo = numpy.dot(s, mo)
norm = numpy.zeros((len(irrep_name), nmo))
for i, csym in enumerate(symm_orb):
moso = numpy.dot(csym.T, s_mo)
ovlpso = reduce(numpy.dot, (csym.T, s, csym))
try:
s_moso = lib.cho_solve(ovlpso, moso)
except:
ovlpso[numpy.diag_indices(csym.shape[1])] += 1e-12
s_moso = lib.cho_solve(ovlpso, moso)
norm[i] = numpy.einsum('ki,ki->i', moso.conj(), s_moso).real
norm /= numpy.sum(norm, axis=0) # for orbitals which are not normalized
iridx = numpy.argmax(norm, axis=0)
orbsym = numpy.asarray([irrep_name[i] for i in iridx])
logger.debug(mol, 'irreps of each MO %s', orbsym)
if check:
largest_norm = norm[iridx,numpy.arange(nmo)]
orbidx = numpy.where(largest_norm < 1-tol)[0]
if orbidx.size > 0:
idx = numpy.where(largest_norm < 1-tol*1e2)[0]
if idx.size > 0:
raise ValueError('orbitals %s not symmetrized, norm = %s' %
(idx, largest_norm[idx]))
else:
logger.warn(mol, 'orbitals %s not strictly symmetrized.',
numpy.unique(orbidx))
logger.warn(mol, 'They can be symmetrized with '
'pyscf.symm.symmetrize_space function.')
logger.debug(mol, 'norm = %s', largest_norm[orbidx])
return orbsym
def symmetrize_orb(mol, mo, orbsym=None, s=None,
check=getattr(__config__, 'symm_addons_symmetrize_orb_check', False)):
'''Symmetrize the given orbitals.
This function is different to the :func:`symmetrize_space`: In this
function, each orbital is symmetrized by removing non-symmetric components.
:func:`symmetrize_space` symmetrizes the entire space by mixing different
orbitals.
Note this function might return non-orthorgonal orbitals.
Call :func:`symmetrize_space` to find the symmetrized orbitals that are
close to the given orbitals.
Args:
mo : 2D float array
The orbital space to symmetrize
Kwargs:
orbsym : integer list
Irrep id for each orbital. If not given, the irreps are guessed
by calling :func:`label_orb_symm`.
s : 2D float array
Overlap matrix. If given, use this overlap than the the overlap
of the input mol.
Returns:
2D orbital coefficients
Examples:
>>> from pyscf import gto, symm, scf
>>> mol = gto.M(atom = 'C 0 0 0; H 1 1 1; H -1 -1 1; H 1 -1 -1; H -1 1 -1',
... basis = 'sto3g')
>>> mf = scf.RHF(mol).run()
>>> mol.build(0, 0, symmetry='D2')
>>> mo = symm.symmetrize_orb(mol, mf.mo_coeff)
>>> print(symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo))
['A', 'A', 'B1', 'B2', 'B3', 'A', 'B1', 'B2', 'B3']
'''
if s is None:
s = mol.intor_symmetric('int1e_ovlp')
if orbsym is None:
orbsym = label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo, s=s, check=check)
orbsym = numpy.asarray(orbsym)
s_mo = numpy.dot(s, mo)
mo1 = numpy.empty_like(mo)
if orbsym[0] in mol.irrep_name:
irrep_id = mol.irrep_name
else:
irrep_id = mol.irrep_id
for i, ir in enumerate(irrep_id):
idx = orbsym == ir
csym = mol.symm_orb[i]
ovlpso = reduce(numpy.dot, (csym.T, s, csym))
sc = lib.cho_solve(ovlpso, numpy.dot(csym.T, s_mo[:,idx]))
mo1[:,idx] = numpy.dot(csym, sc)
return mo1
def symmetrize_space(mol, mo, s=None,
check=getattr(__config__, 'symm_addons_symmetrize_space_check', True),
tol=getattr(__config__, 'symm_addons_symmetrize_space_tol', 1e-7)):
'''Symmetrize the given orbital space.
This function is different to the :func:`symmetrize_orb`: In this function,
the given orbitals are mixed to reveal the symmtery; :func:`symmetrize_orb`
projects out non-symmetric components for each orbital.
Args:
mo : 2D float array
The orbital space to symmetrize
Kwargs:
s : 2D float array
Overlap matrix. If not given, overlap is computed with the input mol.
Returns:
2D orbital coefficients
Examples:
>>> from pyscf import gto, symm, scf
>>> mol = gto.M(atom = 'C 0 0 0; H 1 1 1; H -1 -1 1; H 1 -1 -1; H -1 1 -1',
... basis = 'sto3g')
>>> mf = scf.RHF(mol).run()
>>> mol.build(0, 0, symmetry='D2')
>>> mo = symm.symmetrize_space(mol, mf.mo_coeff)
>>> print(symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo))
['A', 'A', 'A', 'B1', 'B1', 'B2', 'B2', 'B3', 'B3']
'''
from pyscf.tools import mo_mapping
if s is None:
s = mol.intor_symmetric('int1e_ovlp')
nmo = mo.shape[1]
s_mo = numpy.dot(s, mo)
if check and abs(numpy.dot(mo.conj().T, s_mo) - numpy.eye(nmo)).max() > tol:
raise ValueError('Orbitals are not orthogonalized')
mo1 = []
for i, csym in enumerate(mol.symm_orb):
moso = numpy.dot(csym.T, s_mo)
ovlpso = reduce(numpy.dot, (csym.T, s, csym))
# excluding orbitals which are already symmetrized
try:
diag = numpy.einsum('ki,ki->i', moso.conj(), lib.cho_solve(ovlpso, moso))
except:
ovlpso[numpy.diag_indices(csym.shape[1])] += 1e-12
diag = numpy.einsum('ki,ki->i', moso.conj(), lib.cho_solve(ovlpso, moso))
idx = abs(1-diag) < 1e-8
orb_exclude = mo[:,idx]
mo1.append(orb_exclude)
moso1 = moso[:,~idx]
dm = numpy.dot(moso1, moso1.T.conj())
if dm.trace() > 1e-8:
e, u = scipy.linalg.eigh(dm, ovlpso)
mo1.append(numpy.dot(csym, u[:,abs(1-e) < 1e-6]))
mo1 = numpy.hstack(mo1)
if mo1.shape[1] != nmo:
raise ValueError('The input orbital space is not symmetrized.\n One '
'possible reason is that the input mol and orbitals '
'are of different orientation.')
if (check and
abs(reduce(numpy.dot, (mo1.conj().T, s, mo1)) - numpy.eye(nmo)).max() > tol):
raise ValueError('Orbitals are not orthogonalized')
idx = mo_mapping.mo_1to1map(reduce(numpy.dot, (mo.T, s, mo1)))
return mo1[:,idx]
def std_symb(gpname):
'''std_symb('d2h') returns D2h; std_symb('D2H') returns D2h'''
return str(gpname[0].upper() + gpname[1:].lower())
def irrep_name2id(gpname, symb):
'''Convert the irrep symbol to internal irrep ID
Args:
gpname : str
The point group symbol
symb : str
Irrep symbol
Returns:
Irrep ID, int
'''
gpname = std_symb(gpname)
symb = std_symb(symb)
if gpname in ('Dooh', 'Coov'):
return basis.linearmole_irrep_symb2id(gpname, symb)
else:
return param.IRREP_ID_TABLE[gpname][symb]
def irrep_id2name(gpname, irrep_id):
'''Convert the internal irrep ID to irrep symbol
Args:
gpname : str
The point group symbol
irrep_id : int
See IRREP_ID_TABLE in pyscf/symm/param.py
Returns:
Irrep sybmol, str
'''
gpname = std_symb(gpname)
if gpname in ('Dooh', 'Coov'):
return basis.linearmole_irrep_id2symb(gpname, irrep_id)
else:
return param.CHARACTER_TABLE[gpname][irrep_id][0]
def irrep_name(pgname, irrep_id):
raise RuntimeError('This function was obsoleted. Use irrep_id2name')
def route(target, nelec, orbsym):
'''Pick orbitals to form a determinant which has the right symmetry.
If solution is not found, return []
'''
def riter(target, nelec, orbsym):
if nelec == 1:
if target in orbsym:
return [orbsym.index(target)]
else:
return []
else:
for i, ir in enumerate(orbsym):
off = i + 1
orb_left = orbsym[off:]
res = riter(target^ir, nelec-1, orb_left)
if res:
return [i] + [off+x for x in res]
return []
if isinstance(orbsym, numpy.ndarray):
orbsym = orbsym.tolist()
return riter(target, nelec, orbsym)
def eigh(h, orbsym):
'''Solve eigenvalue problem based on the symmetry information for basis.
See also pyscf/lib/linalg_helper.py :func:`eigh_by_blocks`
Examples:
>>> from pyscf import gto, symm
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1', basis='ccpvdz', symmetry=True)
>>> c = numpy.hstack(mol.symm_orb)
>>> vnuc_so = reduce(numpy.dot, (c.T, mol.intor('int1e_nuc_sph'), c))
>>> orbsym = symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, c)
>>> symm.eigh(vnuc_so, orbsym)
(array([-4.50766885, -1.80666351, -1.7808565 , -1.7808565 , -1.74189134,
-0.98998583, -0.98998583, -0.40322226, -0.30242374, -0.07608981]),
...)
'''
return lib.eigh_by_blocks(h, labels=orbsym)
if __name__ == "__main__":
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.build(
atom = [['H', (0,0,0)], ['H', (0,0,1)]],
basis = {'H': 'cc-pvdz'},
symmetry = 1
)
mf = scf.RHF(mol)
mf.scf()
nao, nmo = mf.mo_coeff.shape
print(label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mf.mo_coeff))
numpy.random.seed(1)
u = numpy.random.random((nmo,nmo))*1e-2
u = scipy.linalg.expm(u - u.T)
mo = symmetrize_orb(mol, numpy.dot(mf.mo_coeff, u))
print(label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo))
orbsym = [0, 3, 0, 2, 5, 6]
res = route(7, 3, orbsym)
print(res, reduce(lambda x,y:x^y, [orbsym[i] for i in res]))
| gkc1000/pyscf | pyscf/symm/addons.py | Python | apache-2.0 | 12,136 | [
"PySCF"
] | 24e342d315ead8bc7d69d659de6842a95ffe2bd79a43d781df9dae21226d39d7 |
"""
BLAST+6 format (:mod:`skbio.io.format.blast6`)
==============================================
.. currentmodule:: skbio.io.format.blast6
The BLAST+6 format (``blast+6``) stores the results of a BLAST [1]_ database
search. The results are stored in a simple tabular format with no column
headers. Values are separated by the tab character.
An example BLAST+6-formatted file comparing two protein sequences, taken
from [2]_ (tab characters represented by ``<tab>``)::
moaC<tab>gi|15800534|ref|NP_286546.1|<tab>100.00<tab>161<tab>0<tab>0<tab>1\
<tab>161<tab>1<tab>161<tab>3e-114<tab>330
moaC<tab>gi|170768970|ref|ZP_02903423.1|<tab>99.38<tab>161<tab>1<tab>0\
<tab>1<tab>161<tab>1<tab>161<tab>9e-114<tab>329
Format Support
--------------
**Has Sniffer: No**
**State: Experimental as of 0.4.1.**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |No |:mod:`pandas.DataFrame` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
BLAST+6 format is a tabular text-based format produced by both BLAST+ output
format 6 (``-outfmt 6``) and legacy BLAST output format 8 (``-m 8``). It is
tab-separated and has no column headers. With BLAST+, users can specify the
columns that are present in their BLAST output file by specifying column names
(e.g., ``-outfmt "6 qseqid sseqid bitscore qstart sstart"``), if the default
columns output by BLAST are not desired.
BLAST Column Types
^^^^^^^^^^^^^^^^^^
The following column types are output by BLAST and supported by scikit-bio.
This information is taken from [3]_.
+-----------+------------------------------------+-----+
|Name |Description |Type |
+===========+====================================+=====+
|qseqid |Query Seq-id |str |
+-----------+------------------------------------+-----+
|qgi |Query GI |int |
+-----------+------------------------------------+-----+
|qacc |Query accesion |str |
+-----------+------------------------------------+-----+
|qaccver |Query accesion.version |str |
+-----------+------------------------------------+-----+
|qlen |Query sequence length |int |
+-----------+------------------------------------+-----+
|sseqid |Subject Seq-id |str |
+-----------+------------------------------------+-----+
|sallseqid |All subject Seq-id(s), separated by |str |
| |a ';' | |
+-----------+------------------------------------+-----+
|sgi |Subject GI |int |
+-----------+------------------------------------+-----+
|sallgi |All subject GIs |int |
+-----------+------------------------------------+-----+
|sacc |Subject accesion |str |
+-----------+------------------------------------+-----+
|saccver |Subject accesion.version |str |
+-----------+------------------------------------+-----+
|sallacc |All subject accesions |str |
+-----------+------------------------------------+-----+
|slen |Subject sequence length |int |
+-----------+------------------------------------+-----+
|qstart |Start of alignment in query |int |
+-----------+------------------------------------+-----+
|qend |End of alignment in query |int |
+-----------+------------------------------------+-----+
|sstart |Start of alignment in subject |int |
+-----------+------------------------------------+-----+
|send |End of alignment in subject |int |
+-----------+------------------------------------+-----+
|qseq |Aligned part of query sequence |str |
+-----------+------------------------------------+-----+
|sseq |Aligned part of subject sequence |str |
+-----------+------------------------------------+-----+
|evalue |Expect value |float|
+-----------+------------------------------------+-----+
|bitscore |Bit score |float|
+-----------+------------------------------------+-----+
|score |Raw score |int |
+-----------+------------------------------------+-----+
|length |Alignment length |int |
+-----------+------------------------------------+-----+
|pident |Percent of identical matches |float|
+-----------+------------------------------------+-----+
|nident |Number of identical matches |int |
+-----------+------------------------------------+-----+
|mismatch |Number of mismatches |int |
+-----------+------------------------------------+-----+
|positive |Number of positive-scoring matches |int |
+-----------+------------------------------------+-----+
|gapopen |Number of gap openings |int |
+-----------+------------------------------------+-----+
|gaps |Total number of gaps |int |
+-----------+------------------------------------+-----+
|ppos |Percentage of positive-scoring |float|
| |matches | |
+-----------+------------------------------------+-----+
|frames |Query and subject frames separated |str |
| |by a '/' | |
+-----------+------------------------------------+-----+
|qframe |Query frame |int |
+-----------+------------------------------------+-----+
|sframe |Subject frame |int |
+-----------+------------------------------------+-----+
|btop |Blast traceback operations (BTOP) |int |
+-----------+------------------------------------+-----+
|staxids |Unique Subject Taxonomy ID(s), |str |
| |separated by a ';' (in numerical | |
| |order). | |
+-----------+------------------------------------+-----+
|sscinames |Unique Subject Scientific Name(s), |str |
| |separated by a ';' | |
+-----------+------------------------------------+-----+
|scomnames |Unique Subject Common Name(s), |str |
| |separated by a ';' | |
+-----------+------------------------------------+-----+
|sblastnames|unique Subject Blast Name(s), |str |
| |separated by a ';' (in alphabetical | |
| |order) | |
+-----------+------------------------------------+-----+
|sskingdoms |unique Subject Super Kingdom(s), |str |
| |separated by a ';' (in alphabetical | |
| |order) | |
+-----------+------------------------------------+-----+
|stitle |Subject Title |str |
+-----------+------------------------------------+-----+
|sstrand |Subject Strand |str |
+-----------+------------------------------------+-----+
|salltitles |All Subject Title(s), separated by |str |
| |a '<>' | |
+-----------+------------------------------------+-----+
|qcovs |Query Coverage Per Subject |int |
+-----------+------------------------------------+-----+
|qcovhsp |Query Coverage Per HSP |int |
+-----------+------------------------------------+-----+
.. note:: When a BLAST+6-formatted file contains ``N/A`` values, scikit-bio
will convert these values into ``np.nan``, matching pandas' convention for
representing missing data.
.. note:: scikit-bio stores columns of type ``int`` as type ``float`` in the
returned ``pd.DataFrame``. This is necessary in order to allow ``N/A``
values in integer columns (this is currently a limitation of pandas).
Format Parameters
-----------------
The following format parameters are available in ``blast+6`` format:
- ``default_columns``: ``False`` by default. If ``True``, will use the default
columns output by BLAST, which are qseqid, sseqid, pident, length, mismatch,
gapopen, qstart, qend, sstart, send, evalue, and bitscore.
.. warning:: When reading legacy BLAST files, you must pass
``default_columns=True`` because legacy BLAST does not allow users to
specify which columns are present in the output file.
- ``columns``: ``None`` by default. If provided, must be a list of column names
in the order they will appear in the file.
.. note:: Either ``default_columns`` or ``columns`` must be provided, as
``blast+6`` does not contain column headers.
Examples
--------
Suppose we have a ``blast+6`` file with default columns:
>>> from io import StringIO
>>> import skbio.io
>>> import pandas as pd
>>> fs = '\\n'.join([
... 'moaC\\tgi|15800534|ref|NP_286546.1|\\t100.00\\t161\\t0\\t0\\t1\\t161\
\\t1\\t161\\t3e-114\\t330',
... 'moaC\\tgi|170768970|ref|ZP_02903423.1|\\t99.38\\t161\\t1\\t0\\t1\\t\
161\\t1\\t161\\t9e-114\\t329'
... ])
>>> fh = StringIO(fs)
Read the file into a ``pd.DataFrame`` and specify that default columns should
be used:
>>> df = skbio.io.read(fh, format="blast+6", into=pd.DataFrame,
... default_columns=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
qseqid sseqid pident length mismatch gapopen \\
0 moaC gi|15800534|ref|NP_286546.1| 100.00 161.0 0.0 0.0
1 moaC gi|170768970|ref|ZP_02903423.1| 99.38 161.0 1.0 0.0
<BLANKLINE>
qstart qend sstart send evalue bitscore
0 1.0 161.0 1.0 161.0 3.000000e-114 330.0
1 1.0 161.0 1.0 161.0 9.000000e-114 329.0
Suppose we have a ``blast+6`` file with user-supplied (non-default) columns:
>>> from io import StringIO
>>> import skbio.io
>>> import pandas as pd
>>> fs = '\\n'.join([
... 'moaC\\t100.00\\t0\\t161\\t0\\t161\\t330\\t1',
... 'moaC\\t99.38\\t1\\t161\\t0\\t161\\t329\\t1'
... ])
>>> fh = StringIO(fs)
Read the file into a ``pd.DataFrame`` and specify which columns are present
in the file:
>>> df = skbio.io.read(fh, format="blast+6", into=pd.DataFrame,
... columns=['qseqid', 'pident', 'mismatch', 'length',
... 'gapopen', 'qend', 'bitscore', 'sstart'])
>>> df # doctest: +NORMALIZE_WHITESPACE
qseqid pident mismatch length gapopen qend bitscore sstart
0 moaC 100.00 0.0 161.0 0.0 161.0 330.0 1.0
1 moaC 99.38 1.0 161.0 0.0 161.0 329.0 1.0
References
----------
.. [1] Altschul, S.F., Gish, W., Miller, W., Myers, E.W. & Lipman, D.J. (1990)
"Basic local alignment search tool." J. Mol. Biol. 215:403-410.
.. [2] http://blastedbio.blogspot.com/2014/11/column-headers-in-blast-tabular-\
and-csv.html
.. [3] http://www.ncbi.nlm.nih.gov/books/NBK279675/
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import pandas as pd
from skbio.io import create_format
from skbio.io.format._blast import _parse_blast_data, _possible_columns
blast6 = create_format('blast+6')
_default_columns = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch',
'gapopen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
@blast6.reader(pd.DataFrame, monkey_patch=False)
def _blast6_to_data_frame(fh, columns=None, default_columns=False):
if default_columns and columns is not None:
raise ValueError("`columns` and `default_columns` cannot both be"
" provided.")
if not default_columns and columns is None:
raise ValueError("Either `columns` or `default_columns` must be"
" provided.")
if default_columns:
columns = _default_columns
else:
for column in columns:
if column not in _possible_columns:
raise ValueError("Unrecognized column (%r)."
" Supported columns:\n%r" %
(column, set(_possible_columns.keys())))
return _parse_blast_data(fh, columns, ValueError,
"Specified number of columns (%r) does not equal"
" number of columns in file (%r).")
| gregcaporaso/scikit-bio | skbio/io/format/blast6.py | Python | bsd-3-clause | 12,815 | [
"BLAST",
"scikit-bio"
] | f04104ef8330d3980518d4c732dc81fb9c37dc910ff746107053f302d0ef9fe4 |
#!/usr/bin/env python
"""
dirac-rss-query-db
Script that dumps the DB information for the elements into the standard output.
If returns information concerning the StatusType and Status attributes.
Usage:
dirac-rss-query-db [option] <query> <element> <tableType>
Queries:
[select|add|modify|delete]
Elements:
[site|resource|component|node]
TableTypes:
[status|log|history]
Options:
--name= ElementName (it admits a comma-separated list of element names); None by default
--statusType= A valid StatusType argument (it admits a comma-separated list of statusTypes
e.g. ReadAccess, WriteAccess, RemoveAccess ); None by default
--status= A valid Status argument ( Active, Probing, Degraded, Banned, Unknown, Error );
None by default
--elementType= ElementType narrows the search (string, list); None by default
--reason= Decision that triggered the assigned status
--lastCheckTime= Time-stamp setting last time the status & status were checked
--tokenOwner= Owner of the token ( to specify only with select/delete queries )
Verbosity:
-o LogLevel=LEVEL NOTICE by default, levels available: INFO, DEBUG, VERBOSE..
"""
import datetime
from DIRAC import gLogger, exit as DIRACExit, S_OK, version
from DIRAC.Core.Base import Script
from DIRAC.ResourceStatusSystem.Client import ResourceStatusClient
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.PrettyPrint import printTable
__RCSID__ = '$Id:$'
subLogger = None
switchDict = {}
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
switches = (
( 'element=', 'Element family to be Synchronized ( Site, Resource, Node )' ),
( 'tableType=', 'A valid table type (Status, Log, History)' ),
( 'name=', 'ElementName; None if default' ),
( 'statusType=', 'A valid StatusType argument (it admits a comma-separated list of statusTypes); None if default' ),
( 'status=', 'A valid Status argument ( Active, Probing, Degraded, Banned, Unknown, Error ); None if default' ),
( 'elementType=', 'ElementType narrows the search; None if default' ),
( 'reason=', 'Decision that triggered the assigned status' ),
( 'lastCheckTime=', 'Time-stamp setting last time the status & status were checked' ),
( 'tokenOwner=', 'Owner of the token ( to specify only with select/delete queries' ),
)
for switch in switches:
Script.registerSwitch( '', switch[ 0 ], switch[ 1 ] )
def registerUsageMessage():
'''
Takes the script __doc__ and adds the DIRAC version to it
'''
usageMessage = 'DIRAC version: %s \n' % version
usageMessage += __doc__
Script.setUsageMessage( usageMessage )
def parseSwitches():
'''
Parses the arguments passed by the user
'''
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 3:
error( "Missing all mandatory 'query', 'element', 'tableType' arguments" )
elif args[0].lower() not in ( 'select', 'add', 'modify', 'delete' ):
error( "Incorrect 'query' argument" )
elif args[1].lower() not in ( 'site', 'resource', 'component', 'node' ):
error( "Incorrect 'element' argument" )
elif args[2].lower() not in ( 'status', 'log', 'history' ):
error( "Incorrect 'tableType' argument" )
else:
query = args[0].lower()
switches = dict( Script.getUnprocessedSwitches() )
# Default values
switches.setdefault( 'name', None )
switches.setdefault( 'statusType', None )
switches.setdefault( 'status', None )
switches.setdefault( 'elementType', None )
switches.setdefault( 'reason', None )
switches.setdefault( 'lastCheckTime', None )
switches.setdefault( 'tokenOwner', None )
if 'status' in switches and switches[ 'status' ] is not None:
switches[ 'status' ] = switches[ 'status' ].title()
if switches[ 'status' ] not in ( 'Active', 'Probing', 'Degraded', 'Banned', 'Unknown', 'Error' ):
error( "'%s' is an invalid argument for switch 'status'" % switches[ 'status' ] )
# when it's a add/modify query and status/reason/statusType are not specified
#then some specific defaults are set up
if query == 'add' or query == 'modify':
if 'status' not in switches or switches[ 'status' ] is None:
switches[ 'status' ] = 'Unknown'
if 'reason' not in switches or switches[ 'reason' ] is None:
switches[ 'reason' ] = 'Unknown reason'
if 'statusType' not in switches or switches[ 'statusType' ] is None:
switches[ 'statusType' ] = 'all'
subLogger.debug( "The switches used are:" )
map( subLogger.debug, switches.iteritems() )
return args, switches
#...............................................................................
# UTILS: to check and unpack
def getToken( key ):
'''
Function that gets the userName from the proxy
'''
proxyInfo = getProxyInfo()
if not proxyInfo[ 'OK' ]:
error( str( proxyInfo ) )
if key.lower() == 'owner':
userName = proxyInfo[ 'Value' ][ 'username' ]
tokenOwner = S_OK( userName )
if not tokenOwner[ 'OK' ]:
error( tokenOwner[ 'Message' ] )
return tokenOwner[ 'Value' ]
elif key.lower() == 'expiration':
expiration = proxyInfo[ 'Value' ][ 'secondsLeft' ]
tokenExpiration = S_OK( expiration )
if not tokenExpiration[ 'OK' ]:
error( tokenExpiration[ 'Message' ] )
now = Time.dateTime()
#datetime.datetime.utcnow()
expirationDate = now + datetime.timedelta( seconds=tokenExpiration['Value'] )
expirationDate = Time.toString( expirationDate )
expirationDate = expirationDate.split('.')[0]
return expirationDate
def checkStatusTypes( statusTypes ):
'''
To check if values for 'statusType' are valid
'''
opsH = Operations().getValue( 'ResourceStatus/Config/StatusTypes/StorageElement' )
acceptableStatusTypes = opsH.replace( ',', '' ).split()
for statusType in statusTypes:
if statusType not in acceptableStatusTypes and statusType != 'all':
acceptableStatusTypes.append('all')
error( "'%s' is a wrong value for switch 'statusType'.\n\tThe acceptable values are:\n\t%s"
% ( statusType, str( acceptableStatusTypes ) ) )
if 'all' in statusType:
return acceptableStatusTypes
return statusTypes
def unpack( switchDict ):
'''
To split and process comma-separated list of values for 'name' and 'statusType'
'''
switchDictSet = []
names = []
statusTypes = []
if switchDict[ 'name' ] is not None:
names = filter( None, switchDict[ 'name' ].split( ',' ) )
if switchDict[ 'statusType' ] is not None:
statusTypes = filter( None, switchDict[ 'statusType' ].split( ',' ) )
statusTypes = checkStatusTypes( statusTypes )
if names and statusTypes:
combinations = [ ( a, b ) for a in names for b in statusTypes ]
for combination in combinations:
n, s = combination
switchDictClone = switchDict.copy()
switchDictClone[ 'name' ] = n
switchDictClone[ 'statusType' ] = s
switchDictSet.append( switchDictClone )
elif names and not statusTypes:
for name in names:
switchDictClone = switchDict.copy()
switchDictClone[ 'name' ] = name
switchDictSet.append( switchDictClone )
elif not names and statusTypes:
for statusType in statusTypes:
switchDictClone = switchDict.copy()
switchDictClone[ 'statusType' ] = statusType
switchDictSet.append( switchDictClone )
elif not names and not statusTypes:
switchDictClone = switchDict.copy()
switchDictClone[ 'name' ] = None
switchDictClone[ 'statusType' ] = None
switchDictSet.append( switchDictClone )
return switchDictSet
#...............................................................................
# UTILS: for filtering 'select' output
def filterReason( selectOutput, reason ):
'''
Selects all the elements that match 'reason'
'''
elements = selectOutput
elementsFiltered = []
if reason is not None:
for e in elements:
if reason in e[ 'reason' ]:
elementsFiltered.append( e )
else:
elementsFiltered = elements
return elementsFiltered
#...............................................................................
# Utils: for formatting query output and notifications
def error( msg ):
'''
Format error messages
'''
subLogger.error( "\nERROR:" )
subLogger.error( "\t" + msg )
subLogger.error( "\tPlease, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
def confirm( query, matches ):
'''
Format confirmation messages
'''
subLogger.notice( "\nNOTICE: '%s' request successfully executed ( matches' number: %s )! \n" % ( query, matches ) )
def tabularPrint( table ):
columns_names = table[0].keys()
records = []
for row in table:
record = []
for _k,v in row.items():
if isinstance( v, datetime.datetime ):
record.append( Time.toString( v ) )
elif v is None:
record.append( '' )
else:
record.append( v )
records.append( record )
output = printTable( columns_names, records, numbering = False,
columnSeparator = " | ", printOut = False )
subLogger.notice( output )
#...............................................................................
def select( args, switchDict ):
'''
Given the switches, request a query 'select' on the ResourceStatusDB
that gets from <element><tableType> all rows that match the parameters given.
'''
rssClient = ResourceStatusClient.ResourceStatusClient()
meta = { 'columns' : [ 'name', 'statusType', 'status', 'elementType', 'reason',
'dateEffective', 'lastCheckTime', 'tokenOwner', 'tokenExpiration' ] }
result = { 'output': None, 'successful': None, 'message': None, 'match': None }
output = rssClient.selectStatusElement( element = args[1].title(),
tableType = args[2].title(),
name = switchDict[ 'name' ],
statusType = switchDict[ 'statusType' ],
status = switchDict[ 'status' ],
elementType = switchDict[ 'elementType' ],
#reason = switchDict[ 'reason' ],
#dateEffective = switchDict[ 'dateEffective' ],
lastCheckTime = switchDict[ 'lastCheckTime' ],
tokenOwner = switchDict[ 'tokenOwner' ],
#tokenExpiration = switchDict[ 'tokenExpiration' ],
meta = meta )
result['output'] = [ dict( zip( output[ 'Columns' ], e ) ) for e in output[ 'Value' ] ]
result['output'] = filterReason( result['output'], switchDict[ 'reason' ] )
result['match'] = len( result['output'] )
result['successful'] = output['OK']
result['message'] = output['Message'] if 'Message' in output else None
return result
def add( args, switchDict ):
'''
Given the switches, request a query 'addOrModify' on the ResourceStatusDB
that inserts or updates-if-duplicated from <element><tableType> and also adds
a log if flag is active.
'''
rssClient = ResourceStatusClient.ResourceStatusClient()
result = { 'output': None, 'successful': None, 'message': None, 'match': None }
output = rssClient.addOrModifyStatusElement( element = args[1].title(),
tableType = args[2].title(),
name = switchDict[ 'name' ],
statusType = switchDict[ 'statusType' ],
status = switchDict[ 'status' ],
elementType = switchDict[ 'elementType' ],
reason = switchDict[ 'reason' ],
#dateEffective = switchDict[ 'dateEffective' ],
#lastCheckTime = switchDict[ 'lastCheckTime' ],
tokenOwner = getToken( 'owner' ),
tokenExpiration = getToken( 'expiration' )
)
if output.get('Value'):
result['match'] = int( output['Value'] if output['Value'] else 0 )
result['successful'] = output['OK']
result['message'] = output['Message'] if 'Message' in output else None
return result
def modify( args, switchDict ):
'''
Given the switches, request a query 'modify' on the ResourceStatusDB
that updates from <element><tableType> and also adds a log if flag is active.
'''
rssClient = ResourceStatusClient.ResourceStatusClient()
result = { 'output': None, 'successful': None, 'message': None, 'match': None }
output = rssClient.modifyStatusElement( element = args[1].title(),
tableType = args[2].title(),
name = switchDict[ 'name' ],
statusType = switchDict[ 'statusType' ],
status = switchDict[ 'status' ],
elementType = switchDict[ 'elementType' ],
reason = switchDict[ 'reason' ],
#dateEffective = switchDict[ 'dateEffective' ],
#lastCheckTime = switchDict[ 'lastCheckTime' ],
tokenOwner = getToken( 'owner' ),
tokenExpiration = getToken( 'expiration' )
)
if output.get('Value'):
result['match'] = int( output['Value'] if output['Value'] else 0 )
result['successful'] = output['OK']
result['message'] = output['Message'] if 'Message' in output else None
return result
def delete( args, switchDict ):
'''
Given the switches, request a query 'delete' on the ResourceStatusDB
that deletes from <element><tableType> all rows that match the parameters given.
'''
rssClient = ResourceStatusClient.ResourceStatusClient()
result = { 'output': None, 'successful': None, 'message': None, 'match': None }
output = rssClient.deleteStatusElement( element = args[1].title(),
tableType = args[2].title(),
name = switchDict[ 'name' ],
statusType = switchDict[ 'statusType' ],
status = switchDict[ 'status' ],
elementType = switchDict[ 'elementType' ],
reason = switchDict[ 'reason' ],
#dateEffective = switchDict[ 'dateEffective' ],
#lastCheckTime = switchDict[ 'lastCheckTime' ],
tokenOwner = switchDict[ 'tokenOwner' ],
#tokenExpiration = switchDict[ 'tokenExpiration' ]
)
if 'Value' in output:
result['match'] = int( output['Value'] if output['Value'] else 0 )
result['successful'] = output['OK']
result['message'] = output['Message'] if 'Message' in output else None
return result
#...............................................................................
def run( args, switchDictSet ):
'''
Main function of the script
'''
query = args[0]
matches = 0
table = []
for switchDict in switchDictSet:
# exectue the query request: e.g. if it's a 'select' it executes 'select()'
# the same if it is insert, update, add, modify, delete
result = eval( query + '( args, switchDict )' )
if result[ 'successful' ]:
if query == 'select' and result['match'] > 0:
table.extend( result[ 'output' ] )
matches = matches + result['match'] if result['match'] else 0
else:
error( result[ 'message' ] )
if query == 'select' and matches > 0:
tabularPrint( table )
confirm( query, matches )
#...............................................................................
if __name__ == "__main__":
subLogger = gLogger.getSubLogger( __file__ )
#Script initialization
registerSwitches()
registerUsageMessage()
args, switchDict = parseSwitches()
#Unpack switchDict if 'name' or 'statusType' have multiple values
switchDictSet = unpack( switchDict )
#Run script
run( args, switchDictSet )
#Bye
DIRACExit( 0 )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| Andrew-McNab-UK/DIRAC | ResourceStatusSystem/scripts/dirac-rss-query-db.py | Python | gpl-3.0 | 17,572 | [
"DIRAC"
] | 8bcd41794fc81cab38d563786f3a176cd2e06d2cbc80439e4fc5aa17c23987f5 |
from twisted.enterprise import adbapi
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.words.protocols import irc
from txircd.modbase import Command
from txircd.server import RegisterUser, RemoveUser, ModuleMessage, SetIdent, SetHost, SetName
from txircd.utils import chunk_message, crypt, irc_lower, now, CaseInsensitiveDictionary
from base64 import b64decode, b64encode
from Crypto.Random.random import getrandbits
from Crypto.Cipher import AES
from Crypto.Cipher import Blowfish
from datetime import datetime
from random import choice
import math, os, random, uuid, yaml
class Service(object):
class ServiceSocket(object):
class ServiceTransport(object):
def loseConnection(self):
pass
def __init__(self):
self.transport = self.ServiceTransport()
self.secure = True
def __init__(self, ircd, nick, ident, host, gecos, helpTexts, module):
# We're going to try to keep Service fairly consistent with IRCUser, even if most of these variables will never be used
# in order to prevent exceptions all over the place
self.ircd = ircd
self.socket = self.ServiceSocket()
self.uuid = str(uuid.uuid1())
self.password = None
self.nickname = nick
self.username = ident
self.hostname = host
self.realhost = host
self.realname = gecos
self.ip = "127.0.0.1"
self.server = self.ircd.name
self.signon = datetime.utcfromtimestamp(1) # Give these pseudoclients a really old time so that they won't be disconnected by remote servers
self.lastactivity = now()
self.lastpong = now()
self.nicktime = datetime.utcfromtimestamp(1)
self.mode = {}
self.disconnected = Deferred()
self.disconnected.callback(None)
self.registered = 0
self.metadata = {
"server": {},
"user": {},
"client": {},
"ext": {},
"private": {}
}
self.cache = {} # Not only do various other modules potentially play with the cache, but we can do what we want with it to store auction data, etc.
self.help = helpTexts
self.module = module
def addToServers(self):
for server in self.ircd.servers.itervalues():
if server.nearHop == self.ircd.name:
server.callRemote(RegisterUser, uuid=self.uuid, nick=self.nickname, ident=self.username, host=self.hostname, realhost=self.realhost, gecos=self.realname, ip=self.ip, password="", server=self.server, secure=self.socket.secure, signon=1, nickts=1)
def removeFromServers(self):
for server in self.ircd.servers.itervalues():
if server.nearHop == self.ircd.name:
server.callRemote(RemoveUser, user=self.uuid, reason="Unloading module")
def register(self):
pass
def send_isupport(self):
pass
def disconnect(self, reason, sourceServer = None):
if sourceServer is None:
return
self.ircd.servers[sourceServer].callRemote(RegisterUser, uuid=self.uuid, nick=self.nickname, ident=self.username, host=self.hostname, realhost=self.realhost, gecos=self.realname, ip=self.ip, password="", server=self.server, secure=self.socket.secure, signon=1, nickts=1)
def sendMessage(self, command, *parameter_list, **kw):
if command == "PRIVMSG" and "prefix" in kw:
nick = kw["prefix"][0:kw["prefix"].find("!")]
user = self.ircd.users[nick]
params = parameter_list[0].split(" ")
serviceCommand = params.pop(0).upper().lstrip(":") # Messages sent this way start with a colon
if serviceCommand == "HELP":
if not params:
helpOut = chunk_message(self.help[0], 80)
for line in helpOut:
user.sendMessage("NOTICE", ":{}".format(line), prefix=self.prefix())
user.sendMessage("NOTICE", ": ", prefix=self.prefix())
commands = sorted(self.help[1].keys())
for cmd in commands:
info = self.help[1][cmd]
if info[2] and not self.module.isServiceAdmin(user, self):
continue
user.sendMessage("NOTICE", ":\x02{}\x02\t{}".format(cmd.upper(), info[0]), prefix=self.prefix())
user.sendMessage("NOTICE", ": ", prefix=self.prefix())
user.sendMessage("NOTICE", ":*** End of help", prefix=self.prefix())
else:
helpCmd = params[0]
if helpCmd not in self.help[1]:
user.sendMessage("NOTICE", ":No help available for \x02{}\x02.".format(helpCmd), prefix=self.prefix())
else:
info = self.help[1][helpCmd]
if info[2] and not self.module.isServiceAdmin(user, self):
user.sendMessage("NOTICE", ":No help available for \x02{}\x02.".format(helpCmd), prefix=self.prefix())
else:
helpOut = chunk_message(info[1], 80)
for line in helpOut:
user.sendMessage("NOTICE", ":{}".format(line), prefix=self.prefix())
user.sendMessage("NOTICE", ":*** End of \x02{}\x02 help".format(helpCmd), prefix=self.prefix())
elif serviceCommand in self.help[1] and (not self.help[1][serviceCommand][2] or self.module.isServiceAdmin(user, self)):
self.ircd.users[nick].handleCommand(serviceCommand, None, params)
else:
self.ircd.users[nick].sendMessage("NOTICE", ":Unknown command \x02{}\x02. Use \x1F/msg {} HELP\x1F for help.".format(serviceCommand, self.nickname), prefix=self.prefix())
def setMetadata(self, namespace, key, value):
oldValue = self.metadata[namespace][key] if key in self.metadata[namespace] else ""
self.metadata[namespace][key] = value
for modfunc in self.ircd.actions["metadataupdate"]:
modfunc(self, namespace, key, oldValue, value)
def delMetadata(self, namespace, key):
oldValue = self.metadata[namespace][key]
del self.metadata[namespace][key]
for modfunc in self.ircd.actions["metadataupdate"]:
modfunc(self, namespace, key, oldValue, "")
def prefix(self):
return "{}!{}@{}".format(self.nickname, self.username, self.hostname)
def hasAccess(self, channel, level):
return True # access to change anything in all channels
def setUsername(self, newUsername, sourceServer = None):
if sourceServer:
self.ircd.servers[sourceServer].callRemote(SetIdent, user=self.uuid, ident=self.username)
def setHostname(self, newHostname, sourceServer = None):
if sourceServer:
self.ircd.servers[sourceServer].callRemote(SetHost, user=self.uuid, host=self.hostname)
def setRealname(self, newRealname, sourceServer = None):
if sourceServer:
self.ircd.servers[sourceServer].callRemote(SetName, user=self.uuid, gecos=self.realname)
def setMode(self, user, modes, params, displayPrefix = None):
return ""
def modeString(self, user):
return "+" # user modes are for chumps
def send_motd(self):
pass
def send_lusers(self):
pass
def report_names(self, channel):
pass
def listname(self, channel, listingUser, representation):
for modfunc in self.ircd.actions["nameslistentry"]:
representation = modfunc(self, channel, listingUser, representation)
if not representation:
return representation
return representation
def join(self, channel):
pass
def leave(self, channel):
pass
def nick(self, newNick):
pass
class NickServAlias(Command):
def onUse(self, user, data):
user.handleCommand("PRIVMSG", None, [self.ircd.servconfig["services_nickserv_nick"], " ".join(data["params"])])
class ChanServAlias(Command):
def onUse(self, user, data):
user.handleCommand("PRIVMSG", None, [self.ircd.servconfig["services_chanserv_nick"], " ".join(data["params"])])
class BidServAlias(Command):
def onUse(self, user, data):
user.handleCommand("PRIVMSG", None, [self.ircd.servconfig["services_bidserv_nick"], " ".join(data["params"])])
class OperServAlias(Command):
def onUse(self, user, data):
user.handleCommand("PRIVMSG", None, [self.ircd.servconfig["services_operserv_nick"], " ".join(data["params"])])
class NSIdentifyCommand(Command):
def __init__(self, module, service):
self.module = module
self.nickserv = service
def onUse(self, user, data):
self.module.auth(user, data["email"], data["password"])
def processParams(self, user, params):
if user.registered > 0:
user.sendMesssage(irc.ERR_NOTREGISTERED, "IDENTIFY", ":You have not registered")
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: \x02IDENTIFY \x1Femail:password", prefix=self.nickserv.prefix())
return {}
if len(params) >= 2 and ":" not in params[0]:
return {
"user": user,
"email": params[0],
"password": params[1]
}
try:
email, password = params[0].split(":")
except ValueError:
user.sendMessage("NOTICE", ":Usage: \x02IDENTIFY \x1Femail:password", prefix=self.nickserv.prefix())
return {}
return {
"user": user,
"email": email,
"password": password
}
class NSGhostCommand(Command):
def __init__(self, module, service):
self.module = module
self.nickserv = service
def onUse(self, user, data):
targetUser = data["targetuser"]
if "accountid" in targetUser.cache and targetUser.cache["accountid"] == user.cache["accountid"]:
targetUser.disconnect("Killed (GHOST command issued by {})".format(user.nickname))
user.sendMessage("NOTICE", ":{} has been disconnected.".format(targetUser.nickname), prefix=self.nickserv.prefix())
else:
d = self.module.query("SELECT nick FROM ircnicks WHERE donor_id = {0} AND nick = {0}", user.cache["accountid"], irc_lower(targetUser.nickname))
d.addCallback(self.ghostSuccess, user, targetUser)
d.addErrback(self.module.exclaimServerError, user, self.nickserv)
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "GHOST", ":You have not registered")
return {}
if "accountid" not in user.cache:
user.sendMessage("NOTICE", ":You must be logged in to do that.", prefix=self.nickserv.prefix())
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: \x02GHOST \x1Fnickname", prefix=self.nickserv.prefix())
return {}
if params[0] not in self.ircd.users:
user.sendMessage("NOTICE", ":No user is connected with that nickname.", prefix=self.nickserv.prefix())
return {}
targetUser = self.ircd.users[params[0]]
if user == targetUser:
user.sendMessage("NOTICE", ":That's you! You can't ghost yourself.", prefix=self.nickserv.prefix())
return {}
return {
"user": user,
"targetuser": targetUser
}
def ghostSuccess(self, result, user, targetUser):
if result:
targetUser.disconnect("Killed (GHOST command used by {})".format(user.nickname))
user.sendMessage("NOTICE", ":{} has been disconnected.".format(targetUser.nickname), prefix=self.nickserv.prefix())
else:
user.sendMessage("NOTICE", ":That nick does not belong to you.", prefix=self.nickserv.prefix())
class NSLoginCommand(Command):
def __init__(self, module, service):
self.module = module
self.nickserv = service
def onUse(self, user, data):
self.module.auth(user, data["email"], data["password"])
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "LOGIN", ":You have not registered")
return {}
if not params or len(params) < 2:
user.sendMessage("NOTICE", ":Usage: \x02LOGIN \x1Femail\x1F \x1Fpassword", prefix=self.nickserv.prefix())
return {}
return {
"user": user,
"email": params[0],
"password": params[1]
}
class NSLogoutCommand(Command):
def __init__(self, module, service):
self.module = module
self.nickserv = service
def onUse(self, user, data):
self.module.logoutUser(user)
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "LOGOUT", ":You have not registered")
return {}
if "accountid" not in user.cache:
user.sendMessage("NOTICE", ":You're already logged out.", prefix=self.nickserv.prefix())
return {}
return {
"user": user
}
class NSDropCommand(Command):
def __init__(self, module, service):
self.module = module
self.nickserv = service
def onUse(self, user, data):
d = self.module.db.runInteraction(self.dropNicknameTransaction, user.cache["accountid"], data["nick"], self.ircd.servconfig["servdb_marker"])
d.addCallback(self.confirmDropped, user, data["nick"])
d.addErrback(self.module.exclaimServerError, user, self.nickserv)
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "DROP", ":You have not registered")
return {}
if "accountid" not in user.cache:
user.sendMessage("NOTICE", ":You must be logged in to use the DROP command.", prefix=self.nickserv.prefix())
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: \x02DROP \x1Fnickname", prefix=self.nickserv.prefix())
return {}
return {
"user": user,
"nick": params[0]
}
def dropNicknameTransaction(self, transaction, id, nick, db_marker):
query = "DELETE FROM ircnicks WHERE donor_id = {0} AND nick = {0}".format(db_marker)
transaction.execute(query, (id, nick))
return transaction.rowcount
def confirmDropped(self, result, user, nick):
if result:
user.sendMessage("NOTICE", ":The nickname {} has been dropped from your account.".format(nick), prefix=self.nickserv.prefix())
else:
user.sendMessage("NOTICE", ":Could not drop nickname {} from your account. Ensure that it belongs to you.".format(nick), prefix=self.nickserv.prefix())
class NSNicklistCommand(Command):
def __init__(self, module, service):
self.module = module
self.nickserv = service
def onUse(self, user, data):
d = self.module.query("SELECT nick FROM ircnicks WHERE donor_id = {0}", user.cache["accountid"])
d.addCallback(self.showNicks, user)
d.addErrback(self.module.exclaimServerError, user, self.nickserv)
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "NICKLIST", ":You have not registered")
return {}
if "accountid" not in user.cache:
user.sendMessage("NOTICE", ":You must be logged in to see your nicknames.", prefix=self.nickserv.prefix())
return {}
return {
"user": user
}
def showNicks(self, results, user):
user.sendMessage("NOTICE", ":Registered Nicknames: {}".format(", ".join([n[0] for n in results])), prefix=self.nickserv.prefix())
class NSCertCommand(Command):
def __init__(self, module, service):
self.module = module
self.nickserv = service
def onUse(self, user, data):
accountid = user.cache["accountid"]
if data["subcmd"] == "LIST":
user.sendMessage("NOTICE", ":Certificate list:", prefix=self.nickserv.prefix())
if accountid in self.nickserv.cache["certfp"]:
for cert in self.nickserv.cache["certfp"][accountid]:
user.sendMessage("NOTICE", ":{}".format(cert), prefix=self.nickserv.prefix())
user.sendMessage("NOTICE", ":*** End of certificate list", prefix=self.nickserv.prefix())
elif data["subcmd"] == "ADD":
if self.module.addCert(user, data["certfp"]):
user.sendMessage("NOTICE", ":Certificate fingerprint {} added to your account.".format(data["certfp"]), prefix=self.nickserv.prefix())
else:
user.sendMessage("NOTICE", ":Certificate fingerprint {} could not be added to your account.".format(data["certfp"]), prefix=self.nickserv.prefix())
else:
certfp = data["certfp"]
if certfp in self.nickserv.cache["certfp"][accountid]:
self.nickserv.cache["certfp"][accountid].remove(certfp)
user.sendMessage("NOTICE", ":Certificate fingerprint {} has been removed from your account.".format(certfp), prefix=self.nickserv.prefix())
else:
user.sendMessage("NOTICE", ":Certificate fingerprint {} was not associated with your account.".format(certfp), prefix=self.nickserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "CERT", ":You have not registered")
return {}
if "accountid" not in user.cache:
user.sendMessage("NOTICE", ":You must be logged in to use that command.", prefix=self.nickserv.prefix())
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: \x02CERT \x1F{LIST|ADD|DEL}\x1F \x1F[certificate fingerprint]", prefix=self.nickserv.prefix())
return {}
subcmd = params[0].upper()
if subcmd not in ["LIST", "ADD", "DEL"]:
user.sendMessage("NOTICE", ":Usage: \x02CERT \x1F{LIST|ADD|DEL}\x1F \x1F[certificate fingerprint]", prefix=self.nickserv.prefix())
return {}
if subcmd == "LIST":
return {
"user": user,
"subcmd": "LIST"
}
if len(params) < 2:
user.sendMessage("NOTICE", ":Usage: \x02CERT \x1F{}\x1F \x1Fcertificate fingerprint\x1F".format(subcmd), prefix=self.nickserv.prefix())
return {}
return {
"user": user,
"subcmd": subcmd,
"certfp": params[1].lower()
}
class CSRegisterCommand(Command):
def __init__(self, module, service):
self.module = module
self.chanserv = service
def onUse(self, user, data):
channel = data["targetchan"]
self.chanserv.cache["registered"][channel.name] = {"founder": user.cache["accountid"], "access": {}, "registertime": now()}
user.sendMessage("NOTICE", ":The channel {} has been registered under your account.".format(channel.name), prefix=self.chanserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "REGISTER", ":You have not registered")
return {}
if "accountid" not in user.cache:
user.sendMessage("NOTICE", ":You must be logged in to register a channel.", prefix=self.chanserv.prefix())
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: \x02REGISTER \x1Fchannel", prefix=self.chanserv.prefix())
return {}
if params[0] not in self.ircd.channels:
user.sendMessage("NOTICE", ":You cannot register a channel that does not exist.", prefix=self.chanserv.prefix())
return {}
if params[0] in self.chanserv.cache["registered"]:
user.sendMessage("NOTICE", ":That channel is already registered.", prefix=self.chanserv.prefix())
return {}
cdata = self.ircd.channels[params[0]]
if not user.hasAccess(cdata, "o") and not self.module.isServiceAdmin(user, self.chanserv):
user.sendMessage("NOTICE", ":You must be a channel operator to register that channel.", prefix=self.chanserv.prefix())
return {}
return {
"user": user,
"targetchan": cdata
}
class CSAccessCommand(Command):
def __init__(self, module, service):
self.module = module
self.chanserv = service
def onUse(self, user, data):
if "targetgroup" in data:
accessID = data["targetgroup"]
elif "targetaccount" in data:
accessID = data["targetaccount"]
elif "targetnick" in data:
d = self.module.query("SELECT donor_id FROM ircnicks WHERE nick = {0} LIMIT 1", data["targetnick"])
d.addCallback(self.changeAccess, data["targetnick"], user, data["targetchan"], data["flags"])
d.addErrback(self.module.exclaimServerError, user, self.chanserv)
return
else:
accessList = self.chanserv.cache["registered"][data["targetchan"]]["access"]
if not accessList:
user.sendMessage("NOTICE", ":The access list is empty.", prefix=self.chanserv.prefix())
else:
convertEntries = [u for u in accessList.iterkeys() if u.isdigit()]
# For this list, we assume the lowest ID number in the ircnicks table is for the main nick of the account
d = self.module.query("SELECT n1.donor_id, n1.nick FROM ircnicks n1 JOIN (SELECT MIN(id) minID, donor_id FROM ircnicks GROUP BY donor_id) n2 ON n1.id = n2.minID WHERE {}".format(" OR ".join(["n1.donor_id = {0}" for i in convertEntries])), *convertEntries)
d.addCallback(self.listAccess, user, accessList)
d.addErrback(self.module.exclaimServerError, user, self.chanserv)
return
self.changeAccess([[accessID]], accessID, user, data["targetchan"], data["flags"])
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "ACCESS", ":You have not registered")
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: \x02ACCESS \x1Fchannel\x1F [\x1Faccount|nick|group\x1F \x1Fflags\x1F]", prefix=self.chanserv.prefix())
return {}
if params[0][0] != "#":
user.sendMessage("NOTICE", ":{} is not a channel.".format(params[0]), prefix=self.chanserv.prefix())
return {}
if params[0] not in self.chanserv.cache["registered"]:
user.sendMessage("NOTICE", ":{} is not registered.".format(params[0]), prefix=self.chanserv.prefix())
return {}
if len(params) < 3:
return {
"user": user,
"targetchan": params[0]
}
can_modify = False
if self.module.isServiceAdmin(user, self.chanserv):
can_modify = True
elif "accountid" in user.cache:
if user.cache["accountid"] == self.chanserv.cache["registered"][params[0]]["founder"]:
can_modify = True
else:
for acct, flags in self.chanserv.cache["registered"][params[0]]["access"].iteritems():
if (acct == "~r" or acct == user.cache["accountid"]) and "A" in flags:
can_modify = True
if not can_modify:
user.sendMessage("NOTICE", ":You do not have access to change the permissions of that channel.", prefix=self.chanserv.prefix())
return {}
if params[1] in ["~o", "~r"]:
return {
"user": user,
"targetchan": params[0],
"targetgroup": params[1],
"flags": params[2]
}
if params[1] in self.ircd.users:
udata = self.ircd.users[params[1]]
if "accountid" not in udata.cache:
user.sendMessage("NOTICE", ":The target user is not identified to any account.", prefix=self.chanserv.prefix())
return {}
return {
"user": user,
"targetchan": params[0],
"targetaccount": udata.cache["accountid"],
"flags": params[2]
}
if params[1].isdigit():
return {
"user": user,
"targetchan": params[0],
"targetaccount": params[1],
"flags": params[2]
}
return {
"user": user,
"targetchan": params[0],
"targetnick": params[1],
"flags": params[2]
}
def listAccess(self, results, user, access):
accessList = access.copy() # Ensure the original access list is not modified as we delete out of this one
for result in results:
id = str(result[0])
if id in accessList:
user.sendMessage("NOTICE", ": {}: +{}".format(result[1], accessList[id]), prefix=self.chanserv.prefix())
del accessList[id]
for id, flags in accessList.iteritems(): # Everything not shown from the results of the SQL query
user.sendMessage("NOTICE", ": {}: +{}".format(id, flags), prefix=self.chanserv.prefix())
user.sendMessage("NOTICE", ":End of ACCESS list", prefix=self.chanserv.prefix())
def changeAccess(self, result, display, user, channel, flags):
if not result:
user.sendMessage("NOTICE", ":The given nickname is not registered.", prefix=self.chanserv.prefix())
return
accessID = str(result[0][0])
try:
flagSet = list(self.chanserv.cache["registered"][channel]["access"][accessID])
except KeyError:
flagSet = []
adding = True
for flag in flags:
if flag == "+":
adding = True
elif flag == "-":
adding = False
elif flag in self.ircd.prefix_order or flag == "A":
if adding and flag not in flagSet:
flagSet.append(flag)
elif not adding and flag in flagSet:
flagSet.remove(flag)
if flagSet:
self.chanserv.cache["registered"][channel]["access"][accessID] = "".join(flagSet)
else:
try:
del self.chanserv.cache["registered"][channel]["access"][accessID]
except KeyError:
pass # If it was already not specified somehow, go ahead and remove it
user.sendMessage("NOTICE", ":The flags for {} have been changed to +{}".format(display, "".join(flagSet)), prefix=self.chanserv.prefix())
class CSCdropCommand(Command):
def __init__(self, module, service):
self.module = module
self.chanserv = service
def onUse(self, user, data):
del self.chanserv.cache["registered"][data["channel"]]
user.sendMessage("NOTICE", ":The channel \x02{}\x02 has been dropped.".format(data["channel"]), prefix=self.chanserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "CDROP", ":You have not registered")
return {}
if "accountid" not in user.cache:
user.sendMessage("NOTICE", ":You must be logged in to drop a channel.", prefix=self.chanserv.prefix())
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: \x02CDROP \x1Fchannel", prefix=self.chanserv.prefix())
return {}
if params[0] not in self.chanserv.cache["registered"]:
user.sendMessage("NOTICE", ":The channel \x02{}\x02 isn't registered.".format(params[0]), prefix=self.chanserv.prefix())
return {}
if user.cache["accountid"] != self.chanserv.cache["registered"][params[0]]["founder"] and not self.module.isServiceAdmin(user, self.chanserv):
user.sendMessage("NOTICE", ":You must be the channel founder in order to drop it.", prefix=self.chanserv.prefix())
return {}
return {
"user": user,
"channel": params[0]
}
class BSStartCommand(Command):
def __init__(self, module, service):
self.module = module
self.bidserv = service
def onUse(self, user, data):
d = self.module.query("SELECT id, name, sold, starting_bid FROM prizes WHERE id = {0}", data["auction"])
d.addCallback(self.auctionStart, user, data["auction"])
d.addErrback(self.module.exclaimServerError, user, self.bidserv)
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "START", ":You have not registered")
return {}
if not self.module.isServiceAdmin(user, self.bidserv):
user.sendMessage(irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return {}
if "auction" in self.bidserv.cache:
user.sendMessage("NOTICE", ":You cannot start an auction when one is currently in progress.", prefix=self.bidserv.prefix())
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: \x02START \x1FauctionID", prefix=self.bidserv.prefix())
return {}
try:
auctionID = int(params[0])
except ValueError:
user.sendMessage("NOTICE", ":The auction ID given is not valid.", prefix=self.bidserv.prefix())
return {}
return {
"user": user,
"auction": auctionID
}
def auctionStart(self, results, user, auctionID):
if not results:
user.sendMessage("NOTICE", ":Could not find the item ID {}".format(auctionID), prefix=self.bidserv.prefix())
return
if results[0][2]:
user.sendMessage("NOTICE", ":The item {} ({}) has already been sold.".format(results[0][0], results[0][1]), prefix=self.bidserv.prefix())
return
self.bidserv.cache["auction"] = {
"item": int(results[0][0]),
"name": results[0][1],
"highbid": float(results[0][3]),
"highbidder": "Nobody",
"highbidderid": None,
"startbid": float(results[0][3]),
"bids": [],
"called": 0
}
lines = [] # The lines array here serves as a cache for the lines so that the format isn't applied repeatedly on every iteration
lines.append(":\x02\x034Starting Auction for Lot #{}: \"{}\"\x02 - Called by {}".format(results[0][0], results[0][1], user.nickname))
lines.append(":\x02\x034Item info at http://desertbus.org/live-auction/{}".format(results[0][0]))
lines.append(":\x02\x034Make bids with \x1F/bid ###.## [smack talk]")
if "services_bidserv_increment" in self.ircd.servconfig:
lines.append(":\x02\x034The minimum increment between bids is ${:,.2f}".format(self.ircd.servconfig["services_bidserv_increment"]))
lines.append(":\x02\x034Only registered donors can bid - https://donor.desertbus.org/")
lines.append(":\x02\x034Please do not make any fake bids")
lines.append(":\x02\x034Beginning bidding at ${:,.2f}".format(float(results[0][3])))
for channel in self.ircd.channels.itervalues():
for line in lines:
channel.sendChannelMessage("PRIVMSG", line, prefix=self.bidserv.prefix())
user.sendMessage("NOTICE", ":The auction has been started.", prefix=self.bidserv.prefix())
class BSStopCommand(Command):
def __init__(self, module, service):
self.module = module
self.bidserv = service
def onUse(self, user, data):
try:
with open(self.failedLogName(self.bidserv.cache["auction"]["item"]), "w") as logFile:
yaml.dump(self.bidserv.cache["auction"], logFile, default_flow_style=False)
except IOError:
user.sendMessage("NOTICE", ":The auction logs could not be written.", prefix=self.bidserv.prefix())
itemName = self.bidserv.cache["auction"]["name"]
cancelMsg = ":\x02\x034Auction for {} canceled.\x02 - Called by {}".format(itemName, user.nickname)
for channel in self.ircd.channels.itervalues():
channel.sendChannelMessage("PRIVMSG", cancelMsg, prefix=self.bidserv.prefix())
del self.bidserv.cache["auction"]
user.sendMessage("NOTICE", ":The auction has been canceled.", prefix=self.bidserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "STOP", ":You have not registered")
return {}
if not self.module.isServiceAdmin(user, self.bidserv):
user.sendMessage(irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return {}
if "auction" not in self.bidserv.cache:
user.sendMessage("NOTICE", ":There is not an auction going on now.", prefix=self.bidserv.prefix())
return {}
return {
"user": user
}
def failedLogName(self, id):
log = "{}/auction_stopped-{!s}.log".format(self.ircd.servconfig["app_log_dir"], id)
count = 1
while os.path.exists(log):
log = "{}/auction_stopped-{!s}-{!s}.log".format(self.ircd.servconfig["app_log_dir"], id, count)
count += 1
return log
class BSBidCommand(Command):
def __init__(self, module, service):
self.module = module
self.bidserv = service
def onUse(self, user, data):
bid = data["bid"]
madness = ""
if "services_bidserv_madness_levels" in self.ircd.servconfig:
levels = sorted(self.ircd.servconfig["services_bidserv_madness_levels"].items(), key=lambda t: t[0])
for amount, name in levels:
if amount <= self.bidserv.cache["auction"]["highbid"] or bid < amount:
continue
if "services_bidserv_madness_show_all" in self.ircd.servconfig and self.ircd.servconfig["services_bidserv_madness_show_all"]:
madness += "{}! ".format(name)
else:
madness = "{}! ".format(name)
if self.bidserv.cache["auction"]["highbidderid"] == user.cache["accountid"] and "services_bidserv_space_bid" in self.ircd.servconfig:
madness += "{}! ".format(self.ircd.servconfig["services_bidserv_space_bid"])
bidMsg = ":\x02\x034{}{} has the high bid of ${:,.2f}! \x0312{}".format(madness, user.nickname, bid, data["smacktalk"])
self.bidserv.cache["auction"]["called"] = 0
self.bidserv.cache["auction"]["bids"].append({
"bid": bid,
"bidder": user.cache["accountid"],
"nick": user.nickname
})
self.bidserv.cache["auction"]["highbid"] = bid
self.bidserv.cache["auction"]["highbidder"] = user.nickname
self.bidserv.cache["auction"]["highbidderid"] = user.cache["accountid"]
for channel in self.ircd.channels.itervalues():
channel.sendChannelMessage("PRIVMSG", bidMsg, prefix=self.bidserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "BID", ":You have not registered")
return {}
if "accountid" not in user.cache:
user.sendMessage("NOTICE", ":You must be logged in to bid.", prefix=self.bidserv.prefix())
return {}
if "auction" not in self.bidserv.cache:
user.sendMessage("NOTICE", ":There is not an auction going on right now.", prefix=self.bidserv.prefix())
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: \x02BID \x1Famount\x1F \x1F[smack talk]", prefix=self.bidserv.prefix())
return {}
try:
bid = float(params[0].lstrip("$"))
bid = round(bid, 2)
except ValueError:
user.sendMessage("NOTICE", ":Bid amount must be a valid decimal.", prefix=self.bidserv.prefix())
return {}
if math.isnan(bid) or math.isinf(bid):
user.sendMessage("NOTICE", ":Bid amount must be a valid decimal.", prefix=self.bidserv.prefix())
return {}
if "services_bidserv_limit" in self.ircd.servconfig and bid > self.ircd.servconfig["services_bidserv_limit"]:
user.sendMessage("NOTICE", ":Let's be honest, here. You don't really have ${:,.2f}, do you? I mean, do you \x02really\x02 have that much money on you?".format(bid), prefix=self.bidserv.prefix())
return {}
if bid <= self.bidserv.cache["auction"]["highbid"]:
user.sendMessage("NOTICE", ":The high bid is already ${:,.2f}.".format(self.bidserv.cache["auction"]["highbid"]), prefix=self.bidserv.prefix())
return {}
if self.bidserv.cache["auction"]["bids"] and "services_bidserv_increment" in self.ircd.servconfig and bid < self.bidserv.cache["auction"]["highbid"] + self.ircd.servconfig["services_bidserv_increment"]:
user.sendMessage("NOTICE", ":The minimum bid increment is ${:,.2f}.".format(self.ircd.servconfig["services_bidserv_increment"]), prefix=self.bidserv.prefix())
return {}
return {
"user": user,
"bid": bid,
"smacktalk": " ".join(params[1:]).strip()[:250]
}
class BSRevertCommand(Command):
def __init__(self, module, service):
self.module = module
self.bidserv = service
def onUse(self, user, data):
badBid = self.bidserv.cache["auction"]["bids"].pop()
if self.bidserv.cache["auction"]["bids"]:
newHighBid = self.bidserv.cache["auction"]["bids"][-1]["bid"]
newHighBidder = self.bidserv.cache["auction"]["bids"][-1]["nick"]
newHighBidderID = self.bidserv.cache["auction"]["bids"][-1]["bidder"]
else:
newHighBid = self.bidserv.cache["auction"]["startbid"]
newHighBidder = "Nobody"
newHighBidderID = None
revertMsg = ":\x02\x034Bid for ${:,.2f} by {} removed. The new highest bid is for ${:,.2f} by {}!\x02 - Called by {}".format(badBid["bid"], badBid["nick"], newHighBid, newHighBidder, user.nickname)
self.bidserv.cache["auction"]["highbid"] = newHighBid
self.bidserv.cache["auction"]["highbidder"] = newHighBidder
self.bidserv.cache["auction"]["highbidderid"] = newHighBidderID
self.bidserv.cache["auction"]["called"] = 0
for channel in self.ircd.channels.itervalues():
channel.sendChannelMessage("PRIVMSG", revertMsg, prefix=self.bidserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "REVERT", ":You have not registered")
return {}
if not self.module.isServiceAdmin(user, self.bidserv):
user.sendMessage(irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return {}
if "auction" not in self.bidserv.cache:
user.sendMessage("NOTICE", ":There is not an auction going on right now.", prefix=self.bidserv.prefix())
return {}
if not self.bidserv.cache["auction"]["bids"]:
user.sendMessage("NOTICE", ":No bids have been made yet!", prefix=self.bidserv.prefix())
return {}
return {
"user": user
}
class BSOnceCommand(Command):
def __init__(self, module, service):
self.module = module
self.bidserv = service
def onUse(self, user, data):
self.bidserv.cache["auction"]["called"] = 1
onceMsg = ":\x02\x034Going Once! To {} for ${:,.2f}!\x02 - Called by {}".format(self.bidserv.cache["auction"]["highbidder"], self.bidserv.cache["auction"]["highbid"], user.nickname)
for channel in self.ircd.channels.itervalues():
channel.sendChannelMessage("PRIVMSG", onceMsg, prefix=self.bidserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "ONCE", ":You have not registered")
return {}
if not self.module.isServiceAdmin(user, self.bidserv):
user.sendMessage(irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return {}
if "auction" not in self.bidserv.cache:
user.sendMessage("NOTICE", ":There is not an auction going on right now.", prefix=self.bidserv.prefix())
return {}
if self.bidserv.cache["auction"]["called"] != 0:
user.sendMessage("NOTICE", ":Now is not the time to call going once. (Current state: {})".format(self.bidserv.cache["auction"]["called"]), prefix=self.bidserv.prefix())
return {}
return {
"user": user
}
class BSTwiceCommand(Command):
def __init__(self, module, service):
self.module = module
self.bidserv = service
def onUse(self, user, data):
self.bidserv.cache["auction"]["called"] = 2
twiceMsg = ":\x02\x034Going Twice! To {} for ${:,.2f}!\x02 - Called by {}".format(self.bidserv.cache["auction"]["highbidder"], self.bidserv.cache["auction"]["highbid"], user.nickname)
for channel in self.ircd.channels.itervalues():
channel.sendChannelMessage("PRIVMSG", twiceMsg, prefix=self.bidserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "TWICE", ":You have not registered")
return {}
if not self.module.isServiceAdmin(user, self.bidserv):
user.sendMessage(irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return {}
if "auction" not in self.bidserv.cache:
user.sendMessage("NOTICE", ":There is not an auction going on right now.", prefix=self.bidserv.prefix())
return {}
if self.bidserv.cache["auction"]["called"] != 1:
user.sendMessage("NOTICE", ":Now is not the time to call going twice. (Current state: {})".format(self.bidserv.cache["auction"]["called"]), prefix=self.bidserv.prefix())
return {}
return {
"user": user
}
class BSSoldCommand(Command):
def __init__(self, module, service):
self.module = module
self.bidserv = service
def onUse(self, user, data):
try:
with open(self.logname(self.bidserv.cache["auction"]["item"]), "w") as logFile:
yaml.dump(self.bidserv.cache["auction"], logFile, default_flow_style=False)
except IOError:
user.sendMessage("NOTICE", ":The log file for this auction could not be written.", prefix=self.bidserv.prefix())
soldMsg = ":\x02\x034Sold! {} to {} for ${:,.2f}!\x02 - Called by {}".format(self.bidserv.cache["auction"]["name"], self.bidserv.cache["auction"]["highbidder"], self.bidserv.cache["auction"]["highbid"], user.nickname)
for channel in self.ircd.channels.itervalues():
channel.sendChannelMessage("PRIVMSG", soldMsg, prefix=self.bidserv.prefix())
if self.bidserv.cache["auction"]["highbidder"] in self.ircd.users:
udata = self.ircd.users[self.bidserv.cache["auction"]["highbidder"]]
if "accountid" in udata.cache and udata.cache["accountid"] == self.bidserv.cache["auction"]["highbidderid"]:
udata.sendMessage("NOTICE", ":Congratulations! You won \"{}\"! Please log into your donor account and visit https://desertbus.org/donate?type=auction&prize={!s} to pay for your prize.".format(self.bidserv.cache["auction"]["name"], self.bidserv.cache["auction"]["item"]), prefix=self.bidserv.prefix())
d = self.module.query("UPDATE prizes SET donor_id = {0}, sold_amount = {0}, sold = 1 WHERE id = {0}", self.bidserv.cache["auction"]["highbidderid"], self.bidserv.cache["auction"]["highbid"], self.bidserv.cache["auction"]["item"])
d.addErrback(self.reportError, user, self.bidserv.cache["auction"])
del self.bidserv.cache["auction"]
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "SOLD", ":You have not registered")
return {}
if not self.module.isServiceAdmin(user, self.bidserv):
user.sendMessage(irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return {}
if "auction" not in self.bidserv.cache:
user.sendMessage("NOTICE", ":There is not an auction going on right now.", prefix=self.bidserv.prefix())
return {}
if self.bidserv.cache["auction"]["called"] != 2:
user.sendMessage("NOTICE", ":Now is not the time to call sold. (Current state: {})".format(self.bidserv.cache["auction"]["called"]), prefix=self.bidserv.prefix())
return {}
return {
"user": user
}
def logname(self, id):
log = "{}/auction_{!s}.log".format(self.ircd.servconfig["app_log_dir"], id)
count = 1
while os.path.exists(log):
log = "{}/auction_{!s}-{!s}.log".format(self.ircd.servconfig["app_log_dir"], id, count)
count += 1
return log
def reportError(self, results, user, auctionData):
user.sendMessage("NOTICE", ":An error occurred updating the database with the winner ({} with ID {} for amount ${:,.2f}).".format(auctionData["highbidder"], auctionData["highbidderid"], auctionData["highbid"]), prefix=self.bidserv.prefix())
class BSHighbidderCommand(Command):
def __init__(self, module, service):
self.module = module
self.bidserv = service
def onUse(self, user, data):
user.sendMessage("NOTICE", ":The current high bid is ${:,.2f} by {}.".format(self.bidserv.cache["auction"]["highbid"], self.bidserv.cache["auction"]["highbidder"]), prefix=self.bidserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "HIGHBIDDER", ":You have not registered")
return {}
if "auction" not in self.bidserv.cache:
user.sendMessage("NOTICE", ":There is not an auction going on right now.", prefix=self.bidserv.prefix())
return {}
return {
"user": user
}
class BSCurrentAuctionCommand(Command):
def __init__(self, module, service):
self.module = module
self.bidserv = service
def onUse(self, user, data):
user.sendMessage("NOTICE", ":The item currently up for auction is lot #{} ({}). http://desertbus.org/live-auction/{}".format(self.bidserv.cache["auction"]["item"], self.bidserv.cache["auction"]["name"], self.bidserv.cache["auction"]["item"]), prefix=self.bidserv.prefix())
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "CURRENTAUCTION", ":You have not registered")
return {}
if "auction" not in self.bidserv.cache:
user.sendMessage("NOTICE", ":There is not an auction running at this time.", prefix=self.bidserv.prefix())
return {}
return {
"user": user
}
class OSServAdminCommand(Command):
def __init__(self, module, service):
self.module = module
self.operserv = service
def onUse(self, user, data):
if "nick" in data:
d = self.module.query("SELECT donor_id FROM ircnicks WHERE nick = {0} LIMIT 1", data["nick"])
d.addCallback(self.modifyList, data)
d.addErrback(self.module.exclaimServerError, user, self.operserv)
return
if data["action"] == "list":
adminList = self.module.admins[data["service"]]
if not adminList:
user.sendMessage("NOTICE", ":The admin list for that service is empty.", prefix=self.operserv.prefix())
return
d = self.module.query("SELECT n1.donor_id, n1.nick FROM ircnicks n1 JOIN (SELECT MIN(id) minID, donor_id FROM ircnicks GROUP BY donor_id) n2 ON n1.id = n2.minID WHERE {}".format(" OR ".join(["n1.donor_id = {0}" for i in adminList])), *adminList)
d.addCallback(self.listAdmins, user, data["service"])
d.addErrback(self.module.exclaimServerError, user, self.operserv)
return
self.modifyList([[data["account"]]], data)
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "SERVADMIN", ":You have not registered")
return {}
if not self.module.isServiceAdmin(user, self.operserv):
user.sendMessage(irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return {}
if not params:
user.sendMessage("NOTICE", ":Usage: SERVADMIN {ADD|DEL|LIST} \x1Fservice\x1F \x1F[user]\x1F", prefix=self.operserv.prefix())
return {}
subcmd = params[0].lower()
if len(params) < 2 or subcmd not in ["add", "del", "list"]:
user.sendMessage("NOTICE", ":Usage: SERVADMIN {ADD|DEL|LIST} \x1Fservice\x1F \x1F[user]\x1F", prefix=self.operserv.prefix())
return {}
service = params[1].lower()
if service not in self.module.admins.keys():
user.sendMessage("NOTICE", ":Service {} is not a valid service name.".format(service), prefix=self.operserv.prefix())
return {}
if subcmd == "list":
return {
"user": user,
"service": service,
"action": "list"
}
if len(params) < 3:
user.sendMessage("NOTICE", ":The ADD and DEL subcommands require a user to add or remove.", prefix=self.operserv.prefix())
return {}
if params[2] in self.ircd.users:
udata = self.ircd.users[params[2]]
if "accountid" not in udata.cache:
user.sendMessage("NOTICE", ":The given user is not signed in.", prefix=self.operserv.prefix())
return {}
if subcmd == "add":
if udata.cache["accountid"] in self.module.admins[service]:
user.sendMessage("NOTICE", ":The user you're adding is already an admin.", prefix=self.operserv.prefix())
return {}
else:
if udata.cache["accountid"] not in self.module.admins[service]:
user.sendMessage("NOTICE", ":The user you're removing is already not an admin.", prefix=self.operserv.prefix())
return {}
return {
"user": user,
"service": service,
"action": subcmd,
"account": udata.cache["accountid"]
}
if params[2].isdigit():
return {
"user": user,
"service": service,
"action": subcmd,
"account": params[2]
}
return {
"user": user,
"service": service,
"action": subcmd,
"nick": params[2]
}
def modifyList(self, target, data):
user = data["user"]
if not target:
user.sendMessage("NOTICE", ":The given nickname is not registered.", prefix=self.operserv.prefix())
return
targetID = str(target[0][0])
adminList = self.module.admins[data["service"]]
if data["action"] == "add":
if targetID in adminList:
user.sendMessage("NOTICE", ":Account {} is already on the admin list.".format(targetID), prefix=self.operserv.prefix())
return
adminList.append(targetID)
user.sendMessage("NOTICE", ":Account {} was added to the admin list.".format(targetID), prefix=self.operserv.prefix())
else:
if targetID not in adminList:
user.sendMessage("NOTICE", ":Account {} is already not on the admin list.".format(targetID), prefix=self.operserv.prefix())
return
adminList.remove(targetID)
user.sendMessage("NOTICE", ":Account {} was removed from the admin list.".format(targetID), prefix=self.operserv.prefix())
for server in self.ircd.servers.itervalues():
server.callRemote(ModuleMessage, destserver=server.name, type="ServiceAdmins", args=[data["service"]] + adminList)
def listAdmins(self, admins, user, service):
user.sendMessage("NOTICE", ":Admins for service {}".format(service), prefix=self.operserv.prefix())
for admin in admins:
user.sendMessage("NOTICE", ": {}".format(admin[1]), prefix=self.operserv.prefix())
user.sendMessage("NOTICE", ":End of admin list", prefix=self.operserv.prefix())
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.db = None
self.helpText = {
"nickserv": ["NickServ matches your IRC nickname to your Donor account, allowing for a painless auction process, as well as the peace of mind that nobody can use your nickname but you.", CaseInsensitiveDictionary()],
"chanserv": ["ChanServ allows managing channels to ease slightly the process of running this thing.", CaseInsensitiveDictionary()],
"bidserv": ["BidServ handles all of our fancy schmancy auction business and serves as the interface directly to auctions.", CaseInsensitiveDictionary()],
"operserv": ["OperServ handles things that opers may need.", CaseInsensitiveDictionary()]
}
# Help text values:
# [ short description, long description, oper only ]
# The short description should be short and fit on one line. Further details can be relegated to the long description.
# The long description should be written in a manner suitable to be passed to chunk_message (i.e. it accepts \n to signify a line break
# and will split lines automatically; any other characters allowed in a message should be fine).
# Set the oper-only parameter to True to hide the command from non-opers. The actual command output should claim it doesn't
# know that command to non-opers; this system in the help text helps facilitate that.
self.helpText["nickserv"][1]["HELP"] = ["Shows command help", "Syntax: \x02HELP \x1F[command]\x1F\x02\n\nDisplays command help. With the optional command parameter, displays help for the given command.", False]
self.helpText["nickserv"][1]["IDENTIFY"] = ["Backward-compatible version of LOGIN", "Syntax: \x02IDENTIFY \x1Femail:password\x1F\x02\n\nLogs into a donor account with the specified email and password. If it isn't already, your current nick will be associated with the account and protected from impersonation. You'll also be allowed to bid in all auctions.", False]
self.helpText["nickserv"][1]["ID"] = ["Alias of IDENTIFY", "Syntax: \x02ID \x1Femail:password\x1F\x02\n\nLogs into a donor account with the specified email and password. If it isn't already, your current nick will be associated with the account and protected from impersonation. You'll also be allowed to bid in all auctions.", False]
self.helpText["nickserv"][1]["LOGIN"] = ["Log into an existing donor account", "Syntax: \x02LOGIN \x1Femail\x1F \x1Fpassword\x1F\x02\n\nLogs into a donor account with the specified email and password. If it isn't already, your current nick will be associated with the account and protected from impersonation. You'll also be allowed to bid in all auctions.", False]
self.helpText["nickserv"][1]["LOGOUT"] = ["Log out of your donor account", "Syntax: \x02LOGOUT\x02\n\nLogs out of whatever account you are in right now. Useful to prevent your roommate from bidding on auctions in your name.", False]
self.helpText["nickserv"][1]["GHOST"] = ["Disconnects a user with the given nick", "Syntax: \x02GHOST \x1Fnickname\x1F\x02\n\nIf the given nickname is linked with your account, the user with the given nick is disconnected.", False]
self.helpText["nickserv"][1]["DROP"] = ["Unregisters a givennickname from your account", "Syntax: \x02DROP \x1Fnickname\x1F\x02\n\nUnregisters the given nickname from your account, allowing other people to use it and giving you more space to register other nicknames.", False]
self.helpText["nickserv"][1]["NICKLIST"] = ["Lists all the nicknames registered to your account", "Syntax: \x02NICKLIST\x02\n\nLists all the nicknames registered to your account.", False]
self.helpText["nickserv"][1]["CERT"] = ["Allows you to manage SSL certificate fingerprints for SASL EXTERNAL authentication", "Syntax: \x02CERT \x1F{LIST|ADD|DEL}\x1F \x1F[certificate fingerprint]\x1F\x02\n\nProvides a mechanism to manage SSL certificate fingerprints for SASL EXTERNAL authentication. SSL certificate fingerprints available on your account when you log in normally are automatically added to this list for later use. Use the \x02LIST\x02 subcommand to view all certificate fingerprints associated with your account. If you supply a certificate fingerprint for the \x02ADD\x02 or \x02DEL\x02 subcommands, you can modify the list. If you are currently connected via SSL with a certificate, you can view your current certificate fingerprint using /WHOIS.", False]
self.helpText["chanserv"][1]["HELP"] = ["Shows command help", "Syntax: \x02HELP \x1F[command]\x1F\x02\n\nDisplays command help. With the optional command parameter, displays help for the given command.", False]
self.helpText["chanserv"][1]["REGISTER"] = ["Registers a channel for your use", "Syntax: \x02REGISTER \x1Fchannel\x1F\x02\n\nRegisters a channel with you as a founder. You must be a channel op or higher in the specified channel in order to register the channel.", False]
self.helpText["chanserv"][1]["ACCESS"] = ["Allows you to change the access level of another user in a channel you own", "Syntax: \x02ACCESS \x1Fchannel\x1F [\x1Faccount|nick|group\x1F \x1Fflags\x1F]\x02\n\nLists or changes access information for a channel. If an account is not specified, the channel's access list will be displayed. If a nick is given for the account, it will first match a user with that nick; if one is not connected to the network, it then checks for an account to which that nick is registered. If an account and flags are specified, the given flag changes will be applied to the given account in the channel. Valid flags are any channel status mode level, and they are automatically applied to matching users on join or identify. "
"You can also assign the +A flag, which grants the ability to modify the channel access list to other users. The channel founder always has the ability to control the access list. The group parameter can be any of the following:\n\t~o\tAll opered users\n\t~r\tAll registered and identified users", False]
self.helpText["chanserv"][1]["CDROP"] = ["Allows you to drop channels you own", "Syntax: \x02CDROP \x1Fchannel\x1F\x02\n\nDrops the specified channel that you own.", False]
self.helpText["bidserv"][1]["HELP"] = ["Shows command help", "Syntax: \x02HELP \x1F[command]\x1F\x02\n\nDisplays command help. With the optional command parameter, displays help for the given command.", False]
self.helpText["bidserv"][1]["START"] = ["Start an auction", "Syntax: \x02START \x1FItemID\x1F\x02\n\nStarts an auction with the given item ID.", True]
self.helpText["bidserv"][1]["STOP"] = ["Cancel the current auction", "Syntax: \x02STOP\x02\n\nStops and cancels the currently running auction, and logs the bid history.", True]
self.helpText["bidserv"][1]["BID"] = ["Bid in the active auction", "Syntax: \x02BID \x1Famount\x1F \x1F[smacktalk]\x1F\x02\n\nDuring an auction, this command allows the user to place a bid. If the bid is higher than the last successful bid, BidServ will display it along with any provided smack talk.", False]
self.helpText["bidserv"][1]["REVERT"] = ["Cancel the highest bid", "Syntax: \x02REVERT\x02\n\nRemoves the highest bid from the auction, returning the auction to the state of the bid before that.", True]
self.helpText["bidserv"][1]["ONCE"] = ["Call \"Going Once!\"", "Syntax: \x02ONCE\x02\n\nCalls \"Going Once!\" to the crowd.", True]
self.helpText["bidserv"][1]["TWICE"] = ["Call \"Going Twice!\"", "Syntax: \x02TWICE\x02\n\nCalls \"Going Twice!\" to the crowd. Use with caution, as this command is known to make the crowd go crazy and arouse bid snipers.", True]
self.helpText["bidserv"][1]["SOLD"] = ["Award the auction to the highest bidder", "Syntax: \x02SOLD\x02\n\nDeclares the auction as complete, awarding the prize to the highest bidder.", True]
self.helpText["bidserv"][1]["HIGHBIDDER"] = ["Get the high bidder in the current auction", "Syntax: \x02HIGHBIDDER\x02\n\nDisplays the high bidder in the current auction along with the amount of the current high bid.", False]
self.helpText["bidserv"][1]["CURRENTAUCTION"] = ["Shows the item currently up for auction.", "Syntax: \x02CURRENTAUCTION\x02\n\nDisplays the item currently up for auction.", False]
self.helpText["operserv"][1]["SERVADMIN"] = ["Modifies the list of services admins", "Syntax: \x02SERVADMIN \x1F{ADD|DEL|LIST}\x1F \x1Fservice\x1F \x1F[nick|ID]\x1F\n\nWith the LIST subcommand, shows the admin list for the given service, or with the ADD or DEL subcommands, adds or removes users from the admin list for the given service. The service parameter must be the base name of a running service (e.g. 'chanserv', 'nickserv'). The nick|ID parameter (required with the ADD or DEL subcommands) takes either a nick (either of a connected user or a registered nick) or an account ID to add or remove.", True]
self.nickserv = None
self.chanserv = None
self.bidserv = None
self.operserv = None
self.admins = {
"nickserv": [],
"chanserv": [],
"bidserv": [],
"operserv": []
}
self.auth_timer = {}
self.saslUsers = {}
self.dh_params = {
"prime": choice([
88402972170164172303996249033648413055387009713852283396427128179123747944379,
91746108751951218532193616301150976083366027734878534201960124432778416879539,
92945140821716085806198142320947682800779092108019535774184129113149939170123,
93625107128045288835755305646068609532847375166101406081196645551422757580507,
108909761299361055449660291659325447936718493848917032839979168931777346073907,
58529074759799933482807725855391987615194322074665787950527162701215610904859,
63215203281400196112753723499340546973831665649781552489040151108086880794939,
99195390668713967129863040089109264022338575583938782520648896161781140978099,
98320696763549501689335835915885018157132325101334822216070056656015233291067,
74908680543512180865211668542005927401643158932789334079491797673369893924603,
99025823254147595040966722556875361638898692074641873723359611001113538823443,
107964489490334018274784413863315720640934243685778520051782258286366346826227,
104202362400023930381804819994551127488289562009989972491899584394317891141443,
73863143383182619527071902801928331241530571923876498504070459947520196044787,
95801365258657418181206410666013041855141021310679528411633513825801160377803,
89054622815932378492843517219374835719798439123122784761267126530397148323187,
103713955944890997176144155572473093154977522758539026968740490431737758488227,
79308228509923367000733842193939129986180038982554140219238722525621333587459,
106461735594795909591077249375502099206790800370424877313249472120829170793483,
108457637430077952262260760668351495732056364579055819040728019625047787438083,
106759564531318215142965091722492578636123746401975729785500302146499220422803,
98855733477651975750208811397393732496469393603166987989558094274863510415547,
70674938222379309574525107444002821282063783401243929580390502861261302706259,
67537014653035600875177807262642023628239365826709589889453984786327365000627,
77605853594559162243575384531288420166266958774785718529594030783621600613987
]),
"generator": 2,
"privkey": getrandbits(512)
}
self.dh_params["pubkey"] = pow(self.dh_params["generator"], self.dh_params["privkey"], self.dh_params["prime"])
# The Diffie-Hellman parameters are generated here for the DH-BLOWFISH and DH-AES mechanisms for SASL authentication.
# For the prime numbers here, I generated a series of 256-bit prime numbers using the C OpenSSL library, because all the
# ones for Python suck and won't do it for me properly. A random one of the 25 here will be chosen on each module initialization.
#
# 2 and 5 are common values for the generator. I chose two. You can change it to five if you want.
#
# The private key is just random bits. It is currently generated at 512 bits.
#
# The public key must be generated from these other three values ((generator ^ private_key) mod prime), and is stored here as well.
#
# Everything here is generated once per session.
def spawn(self):
if "servdb_library" in self.ircd.servconfig and "servdb_host" in self.ircd.servconfig and "servdb_port" in self.ircd.servconfig and "servdb_database" in self.ircd.servconfig and "servdb_username" in self.ircd.servconfig and "servdb_password" in self.ircd.servconfig and self.ircd.servconfig["servdb_library"]:
self.db = adbapi.ConnectionPool(self.ircd.servconfig["servdb_library"], host=self.ircd.servconfig["servdb_host"], port=self.ircd.servconfig["servdb_port"], db=self.ircd.servconfig["servdb_database"], user=self.ircd.servconfig["servdb_username"], passwd=self.ircd.servconfig["servdb_password"], cp_reconnect=True)
if "servdb_marker" not in self.ircd.servconfig:
self.ircd.servconfig["servdb_marker"] = "%s"
if "services_nickserv_guest_prefix" not in self.ircd.servconfig:
self.ircd.servconfig["services_nickserv_guest_prefix"] = "Guest"
if "services_nickserv_nick" not in self.ircd.servconfig:
self.ircd.servconfig["services_nickserv_nick"] = "NickServ"
if "services_nickserv_ident" not in self.ircd.servconfig:
self.ircd.servconfig["services_nickserv_ident"] = "NickServ"
if "services_nickserv_host" not in self.ircd.servconfig:
self.ircd.servconfig["services_nickserv_host"] = "services.desertbus.org"
if "services_nickserv_gecos" not in self.ircd.servconfig:
self.ircd.servconfig["services_nickserv_gecos"] = "Nickname Service"
if "services_chanserv_nick" not in self.ircd.servconfig:
self.ircd.servconfig["services_chanserv_nick"] = "ChanServ"
if "services_chanserv_ident" not in self.ircd.servconfig:
self.ircd.servconfig["services_chanserv_ident"] = "ChanServ"
if "services_chanserv_host" not in self.ircd.servconfig:
self.ircd.servconfig["services_chanserv_host"] = "services.desertbus.org"
if "services_chanserv_gecos" not in self.ircd.servconfig:
self.ircd.servconfig["services_chanserv_gecos"] = "Channel Service"
if "services_bidserv_nick" not in self.ircd.servconfig:
self.ircd.servconfig["services_bidserv_nick"] = "BidServ"
if "services_bidserv_ident" not in self.ircd.servconfig:
self.ircd.servconfig["services_bidserv_ident"] = "BidServ"
if "services_bidserv_host" not in self.ircd.servconfig:
self.ircd.servconfig["services_bidserv_host"] = "services.desertbus.org"
if "services_bidserv_gecos" not in self.ircd.servconfig:
self.ircd.servconfig["services_bidserv_gecos"] = "Bidding Service"
if "services_operserv_nick" not in self.ircd.servconfig:
self.ircd.servconfig["services_operserv_nick"] = "OperServ"
if "services_operserv_ident" not in self.ircd.servconfig:
self.ircd.servconfig["services_operserv_ident"] = "OperServ"
if "services_operserv_host" not in self.ircd.servconfig:
self.ircd.servconfig["services_operserv_host"] = "services.desertbus.org"
if "services_operserv_gecos" not in self.ircd.servconfig:
self.ircd.servconfig["services_operserv_gecos"] = "Operator Service"
self.nickserv = Service(self.ircd, self.ircd.servconfig["services_nickserv_nick"], self.ircd.servconfig["services_nickserv_ident"], self.ircd.servconfig["services_nickserv_host"], self.ircd.servconfig["services_nickserv_gecos"], self.helpText["nickserv"], self)
self.chanserv = Service(self.ircd, self.ircd.servconfig["services_chanserv_nick"], self.ircd.servconfig["services_chanserv_ident"], self.ircd.servconfig["services_chanserv_host"], self.ircd.servconfig["services_chanserv_gecos"], self.helpText["chanserv"], self)
self.bidserv = Service(self.ircd, self.ircd.servconfig["services_bidserv_nick"], self.ircd.servconfig["services_bidserv_ident"], self.ircd.servconfig["services_bidserv_host"], self.ircd.servconfig["services_bidserv_gecos"], self.helpText["bidserv"], self)
self.operserv = Service(self.ircd, self.ircd.servconfig["services_operserv_nick"], self.ircd.servconfig["services_operserv_ident"], self.ircd.servconfig["services_operserv_host"], self.ircd.servconfig["services_operserv_gecos"], self.helpText["operserv"], self)
self.chanserv.cache["registered"] = CaseInsensitiveDictionary()
self.nickserv.cache["certfp"] = {}
self.ircd.users[self.ircd.servconfig["services_nickserv_nick"]] = self.nickserv
self.ircd.users[self.ircd.servconfig["services_chanserv_nick"]] = self.chanserv
self.ircd.users[self.ircd.servconfig["services_bidserv_nick"]] = self.bidserv
self.ircd.users[self.ircd.servconfig["services_operserv_nick"]] = self.operserv
self.ircd.userid[self.nickserv.uuid] = self.nickserv
self.ircd.userid[self.chanserv.uuid] = self.chanserv
self.ircd.userid[self.bidserv.uuid] = self.bidserv
self.ircd.userid[self.operserv.uuid] = self.operserv
self.nickserv.addToServers()
self.chanserv.addToServers()
self.bidserv.addToServers()
self.operserv.addToServers()
self.ircd.module_data_cache["sasl_agent"] = self
for server in self.ircd.servers.itervalues(): # Propagate information to other servers
server.callRemote(ModuleMessage, destserver=server.name, type="ServiceServer", args=[self.ircd.name])
for adminType, adminList in self.admins.iteritems():
server.callRemote(ModuleMessage, destserver=server.name, type="ServiceAdmins", args=[adminType] + adminList)
return {
"commands": {
"NICKSERV": NickServAlias(),
"NS": NickServAlias(),
"CHANSERV": ChanServAlias(),
"CS": ChanServAlias(),
"BIDSERV": BidServAlias(),
"BS": BidServAlias(),
"OPERSERV": OperServAlias(),
"OS": OperServAlias(),
"IDENTIFY": NSIdentifyCommand(self, self.nickserv),
"ID": NSIdentifyCommand(self, self.nickserv),
"GHOST": NSGhostCommand(self, self.nickserv),
"LOGIN": NSLoginCommand(self, self.nickserv),
"LOGOUT": NSLogoutCommand(self, self.nickserv),
"DROP": NSDropCommand(self, self.nickserv),
"NICKLIST": NSNicklistCommand(self, self.nickserv),
"CERT": NSCertCommand(self, self.nickserv),
"REGISTER": CSRegisterCommand(self, self.chanserv),
"ACCESS": CSAccessCommand(self, self.chanserv),
"CDROP": CSCdropCommand(self, self.chanserv),
"START": BSStartCommand(self, self.bidserv),
"STOP": BSStopCommand(self, self.bidserv),
"BID": BSBidCommand(self, self.bidserv),
"REVERT": BSRevertCommand(self, self.bidserv),
"ONCE": BSOnceCommand(self, self.bidserv),
"TWICE": BSTwiceCommand(self, self.bidserv),
"SOLD": BSSoldCommand(self, self.bidserv),
"HIGHBIDDER": BSHighbidderCommand(self, self.bidserv),
"CURRENTAUCTION": BSCurrentAuctionCommand(self, self.bidserv),
"SERVADMIN": OSServAdminCommand(self, self.operserv)
},
"actions": {
"register": self.onRegister,
"join": self.promote,
"quit": self.onQuit,
"nick": self.onNickChange,
"topic": self.onTopicChange,
"chancreate": self.onChanCreate,
"netmerge": self.onNetmerge,
"commandpermission": self.commandPermission
}
}
def cleanup(self):
if self.db:
self.db.close()
self.nickserv.removeFromServers()
self.chanserv.removeFromServers()
self.bidserv.removeFromServers()
self.operserv.removeFromServers()
del self.ircd.users[self.nickserv.nickname]
del self.ircd.users[self.chanserv.nickname]
del self.ircd.users[self.bidserv.nickname]
del self.ircd.users[self.operserv.nickname]
del self.ircd.userid[self.nickserv.uuid]
del self.ircd.userid[self.chanserv.uuid]
del self.ircd.userid[self.bidserv.uuid]
del self.ircd.userid[self.operserv.uuid]
def data_serialize(self):
outputDict = {}
outputDict["registeredchannels"] = self.chanserv.cache["registered"]._data
if "auction" in self.bidserv.cache:
outputDict["currentauction"] = self.bidserv.cache["auction"]
outputDict["certfp"] = self.nickserv.cache["certfp"]
outputDict["admins"] = self.admins
return [outputDict, {"auth_timers": self.auth_timer, "saslusers": self.saslUsers}]
def data_unserialize(self, data):
if "currentauction" in data:
self.bidserv.cache["auction"] = data["currentauction"]
if "certfp" in data:
self.nickserv.cache["certfp"] = data["certfp"]
if "registeredchannels" in data:
for key, value in data["registeredchannels"].iteritems():
self.chanserv.cache["registered"][key] = value
if "admins" in data:
self.admins = data["admins"]
if "auth_timers" in data:
self.auth_timer = data["auth_timers"]
if "saslusers" in data:
self.saslUsers = data["saslusers"]
# Services Functions
def query(self, query, *args):
query = query.format(self.ircd.servconfig["servdb_marker"])
return self.db.runQuery(query, args)
def exclaimServerError(self, result, user, service):
if user in self.saslUsers:
self.saslUsers[user]["failure"](user)
del self.saslUsers[user]
else:
user.sendMessage("NOTICE", ":A server error has occurred.", prefix=service.prefix())
def genGuestNick(self):
nick = "{}{:>06d}".format(self.ircd.servconfig["services_nickserv_guest_prefix"] if "services_nickserv_guest_prefix" in self.ircd.servconfig and self.ircd.servconfig["services_nickserv_guest_prefix"] else "Guest", random.randrange(1000000))
if nick in self.ircd.users:
return self.genGuestNick()
return nick
def auth(self, user, username, password):
d = self.query("SELECT id, display_name, password FROM donors WHERE email = {0}", username)
d.addCallback(self.verifyPassword, user, password)
d.addErrback(self.exclaimServerError, user, self.nickserv)
return d
def authByCert(self, user, cert, username):
d = self.query("SELECT id, display_name FROM donors WHERE email = {0}", username)
d.addCallback(self.verifyCert, user, cert)
d.addErrback(self.exclaimServerError, user, self.nickserv)
return d
def token(self, user, password):
d = self.query("SELECT donor_id FROM irctokens WHERE token = {0}", password)
d.addCallback(self.loadDonorInfo, user)
return d
def checkNick(self, user):
if user in self.auth_timer:
self.removeAuthTimer(user)
if irc_lower(user.nickname).startswith(irc_lower(self.ircd.servconfig["services_nickserv_guest_prefix"])):
return # Don't check guest nicks
d = self.query("SELECT donor_id, nick FROM ircnicks WHERE nick = {0} LIMIT 1", irc_lower(user.nickname))
d.addCallback(self.beginVerify, user)
return d
def verifyPassword(self, result, user, password):
if not result:
if user in self.saslUsers:
self.saslUsers[user]["failure"](user)
del self.saslUsers[user]
else:
self.checkNick(user)
user.sendMessage("NOTICE", ":The login credentials you provided were incorrect.", prefix=self.nickserv.prefix())
return
hash = result[0][2]
check = crypt(password, hash)
if check == hash:
self.loginUser(result, user)
else:
if user in self.saslUsers:
self.saslUsers[user]["failure"](user)
del self.saslUsers[user]
else:
self.checkNick(user)
user.sendMessage("NOTICE", ":The login credentials you provided were incorrect.", prefix=self.nickserv.prefix())
def verifyCert(self, result, user, cert):
def failValidation():
if user in self.saslUsers:
self.saslUsers[user]["failure"](user)
del self.saslUsers[user]
else:
self.checkNick(user)
user.sendMessage("NOTICE", ":The login credentials you provided were incorrect.", prefix=self.nickserv.prefix())
if not result:
failValidation()
return
accid = result[0][0]
if accid not in self.nickserv.cache["certfp"]:
failValidation()
return
if cert in self.nickserv.cache["certfp"][accid]:
self.loginUser(result, user)
else:
failValidation()
def loginUser(self, result, user):
user.cache["accountid"] = str(result[0][0])
if result[0][1]:
user.setMetadata("ext", "accountname", result[0][1].replace(" ", "_"))
else:
user.setMetadata("ext", "accountname", "Anonymous") # The account name can't be blank, so fill in a default one
if user in self.auth_timer:
self.removeAuthTimer(user)
if user in self.saslUsers:
self.saslUsers[user]["success"](user)
del self.saslUsers[user]
else:
user.sendMessage("NOTICE", ":You are now identified. Welcome, {}.".format(user.metadata["ext"]["accountname"]), prefix=self.nickserv.prefix())
self.checkNick(user)
for server in self.ircd.servers.itervalues():
server.callRemote(ModuleMessage, destserver=server.name, type="ServiceLogin", args=[user.uuid, user.cache["accountid"]])
self.registered(user)
def logoutUser(self, user):
del user.cache["accountid"]
user.delMetadata("ext", "accountname")
self.checkNick(user)
self.unregistered(user)
for server in self.ircd.servers.itervalues():
server.callRemote(ModuleMessage, destserver=server.name, type="ServiceLogout", args=[user.uuid])
user.sendMessage("NOTICE", ":You are now logged out.", prefix=self.nickserv.prefix())
def loadDonorInfo(self, result, user):
if not result:
self.checkNick(user)
user.sendMessage("NOTICE", ":An invalid authentication token was provided.", prefix=self.nickserv.prefix())
return
d = self.query("SELECT id, display_name FROM donors WHERE id = {0}", result[0][0])
d.addCallback(self.setDonorInfo, user)
d.addErrback(self.exclaimServerError, user, self.nickserv)
return d
def beginVerify(self, result, user):
if result:
id = str(result[0][0])
if irc_lower(user.nickname) != irc_lower(result[0][1]):
# The user changed nicks before the SQL result returned, so we'll just pretend none of this ever happened
return
if "accountid" in user.cache and user.cache["accountid"] == id:
if user in self.auth_timer: # Clear the timer
self.removeAuthTimer(user)
return # Already identified
user.sendMessage("NOTICE", ":This is a registered nick. Please use \x02/msg {} login EMAIL PASSWORD\x0F to verify your identity.".format(self.nickserv.nickname), prefix=self.nickserv.prefix())
self.unregistered(user)
if user in self.auth_timer:
self.removeAuthTimer(user)
self.setAuthTimer(user)
elif "accountid" in user.cache:
# Try to register the nick
d = self.query("SELECT nick FROM ircnicks WHERE donor_id = {0}", user.cache["accountid"])
d.addCallback(self.registerNick, user, user.nickname)
d.addErrback(self.failedRegisterNick, user, user.nickname)
def setAuthTimer(self, user):
self.auth_timer[user] = reactor.callLater(self.ircd.servconfig["services_nickserv_timeout"] if "services_nickserv_timeout" in self.ircd.servconfig else 60, self.changeNick, user, id, user.nickname)
if user.server != self.ircd.name:
self.ircd.servers[user.server].callRemote(ModuleMessage, destserver=user.server, type="ServiceBlockUser", args=[user.uuid])
def removeAuthTimer(self, user):
self.auth_timer[user].cancel()
del self.auth_timer[user]
if user.server != self.ircd.name:
self.ircd.servers[user.server].callRemote(ModuleMessage, destserver=user.server, type="ServiceUnblockUser", args=[user.uuid])
def setDonorInfo(self, result, user):
if not result:
self.checkNick(user)
self.exclaimServerError(user, self.nickserv)
return
self.loginUser(result, user)
def changeNick(self, user, id, nickname):
if user in self.auth_timer:
del self.auth_timer[user]
if user.server != self.ircd.name and user.server in self.ircd.servers: # Make sure the target server is still on the network
d = self.ircd.servers[user.server].callRemote(ModuleMessage, destserver=user.server, type="ServiceUnblockUser", args=[user.uuid])
d.addErrback(lambda err: log.msg("Couldn't unblock remote user {}: server no longer connected to network".format(user.nickname)))
if "accountid" in user.cache and user.cache["accountid"] == id:
return # Somehow we auth'd and didn't clear the timer?
if irc_lower(user.nickname) != irc_lower(nickname):
return # Changed nick before the timeout. Whatever
user.nick(self.genGuestNick())
def registerNick(self, result, user, nickname):
if "services_nickserv_nick_limit" in self.ircd.servconfig and self.ircd.servconfig["services_nickserv_nick_limit"] and len(result) >= self.ircd.servconfig["services_nickserv_nick_limit"]:
# Already registered all the nicks we can
nicklist = ", ".join([l[0] for l in result[:-1]])+", or "+result[-1][0] if len(result) > 1 else result[0][0]
message = ":Warning: You already have {!s} registered nicks, so {} will not be protected. Please switch to {} to prevent impersonation!".format(self.ircd.servconfig["services_nickserv_nick_limit"], nickname, nicklist)
user.sendMessage("NOTICE", message, prefix=self.nickserv.prefix())
else:
d = self.query("INSERT INTO ircnicks(donor_id, nick) VALUES({0},{0})", user.cache["accountid"], irc_lower(nickname))
d.addCallback(self.successRegisterNick, user, nickname)
d.addErrback(self.failedRegisterNick, user, nickname)
def failedRegisterNick(self, result, user, nickname):
user.sendMessage("NOTICE", ":Failed to register nick {} to account {}. Other users may still use it.".format(nickname, user.metadata["ext"]["accountname"]), prefix=self.nickserv.prefix())
def successRegisterNick(self, result, user, nickname):
user.sendMessage("NOTICE", ":Nickname {} is now registered to account {} and can not be used by any other user.".format(nickname, user.metadata["ext"]["accountname"]), prefix=self.nickserv.prefix())
def binaryString(self, num):
strnum = "{0:x}".format(num)
if len(strnum) % 2:
strnum = "0" + strnum
return strnum.decode("hex")
def saslStart(self, user, mechanism):
try:
setupfunc = getattr(self, "saslSetup_{}".format(mechanism.replace("-", "_")))
except AttributeError:
return "fail"
self.saslUsers[user] = { "mechanism": mechanism }
return setupfunc(user)
def saslSetup_PLAIN(self, user):
user.sendMessage("AUTHENTICATE", "+", to=None, prefix=None)
def saslSetup_DH_BLOWFISH(self, user):
encodedP = self.binaryString(self.dh_params["prime"])
lengthP = self.binaryString(len(encodedP))
if len(lengthP) == 1:
lengthP = "\x00" + lengthP
encodedG = self.binaryString(self.dh_params["generator"])
lengthG = self.binaryString(len(encodedG))
if len(lengthG) == 1:
lengthG = "\x00" + lengthG
encodedY = self.binaryString(self.dh_params["pubkey"])
lengthY = self.binaryString(len(encodedY))
if len(lengthY) == 1:
lengthY = "\x00" + lengthY
outStr = "{}{}{}{}{}{}".format(lengthP, encodedP, lengthG, encodedG, lengthY, encodedY)
output = b64encode(outStr)
splitOut = [output[i:i+400] for i in range(0, len(output), 400)]
for line in splitOut:
user.sendMessage("AUTHENTICATE", line, to=None, prefix=None)
if len(splitOut[-1]) == 400:
user.sendMessage("AUTHENTICATE", "+", to=None, prefix=None)
def saslSetup_DH_AES(self, user):
return self.saslSetup_DH_BLOWFISH(user)
def saslSetup_EXTERNAL(self, user):
if "certfp" not in self.nickserv.cache:
return "fail"
user.sendMessage("AUTHENTICATE", "+", to=None, prefix=None)
def saslNext(self, user, data):
try:
processfunc = getattr(self, "saslProcess_{}".format(self.saslUsers[user]["mechanism"].replace("-", "_")))
except AttributeError:
return "done"
return processfunc(user, data)
def saslProcess_PLAIN(self, user, data):
try:
authorizationID, authenticationID, password = b64decode(data).split("\0")
except (TypeError, ValueError):
return "done"
self.auth(user, authenticationID, password)
return "wait"
def saslProcess_DH_BLOWFISH(self, user, data):
try:
encryptedData = b64decode(data)
except TypeError:
return "done"
if len(encryptedData) < 2:
return "done"
pubkeyLen = int(encryptedData[:2].encode("hex"), 16)
encryptedData = encryptedData[2:]
if pubkeyLen > len(encryptedData):
return "done"
pubkey = int(encryptedData[:pubkeyLen].encode("hex"), 16)
encryptedData = encryptedData[pubkeyLen:]
try:
username, encryptedData = encryptedData.split("\0", 1)
except ValueError:
return "done"
if not encryptedData: # Ensure there is remaining data
return "done"
sharedSecret = self.binaryString(pow(pubkey, self.dh_params["privkey"], self.dh_params["prime"]))
blowfishKey = Blowfish.new(sharedSecret)
try:
password = blowfishKey.decrypt(encryptedData)
except ValueError: # decrypt raises ValueError if the message is not of the correct length
return "done"
self.auth(user, username, password)
return "wait"
def saslProcess_DH_AES(self, user, data):
try:
encryptedData = b64decode(data)
except TypeError:
return "done"
if len(encryptedData) < 2:
return "done"
pubkeyLen = int(encryptedData[:2].encode("hex"), 16)
encryptedData = encryptedData[2:]
if pubkeyLen > len(encryptedData):
return "done"
pubkey = int(encryptedData[:pubkeyLen].encode("hex"), 16)
encryptedData = encryptedData[pubkeyLen:]
if len(encryptedData) < AES.block_size * 2:
return "done" # The remaining data is too short to be valid for AES
iv = encryptedData[:AES.block_size]
encryptedData = encryptedData[AES.block_size:]
sharedSecret = self.binaryString(pow(pubkey, self.dh_params["privkey"], self.dh_params["prime"]))
aesCipher = AES.new(sharedSecret, mode=AES.MODE_CBC, IV=iv)
try:
decryptedData = aesCipher.decrypt(encryptedData)
except ValueError:
return "done"
try:
username, password, padding = decryptedData.split("\0", 2)
except ValueError:
return "done"
self.auth(user, username, password)
return "wait"
def saslProcess_EXTERNAL(self, user, data):
try:
username = b64decode(data[0])
except TypeError:
return "done"
if "certfp" not in user.metadata["server"]:
return "done"
self.authByCert(user, user.metadata["server"]["certfp"], username)
return "wait"
def saslDone(self, user, success):
del self.saslUsers[user]
def bindSaslResult(self, user, successFunction, failureFunction):
self.saslUsers[user]["success"] = successFunction
self.saslUsers[user]["failure"] = failureFunction
def isServiceAdmin(self, user, service):
if "o" in user.mode:
return True
if "accountid" not in user.cache:
return False
id = user.cache["accountid"]
convertServices = {
self.nickserv: "nickserv",
self.chanserv: "chanserv",
self.bidserv: "bidserv",
self.operserv: "operserv"
}
if service not in convertServices:
return False
return id in self.admins[convertServices[service]]
def registered(self, user):
for c in self.ircd.channels.itervalues():
if user in c.users:
self.promote(user, c, True)
if "certfp" in user.metadata["server"]:
self.addCert(user, user.metadata["server"]["certfp"])
def unregistered(self, user):
for channel in self.ircd.channels.itervalues():
if user in channel.users:
status = channel.users[user]
if status:
channel.setMode(None, "-{}".format(status), [user.nickname for i in range(len(status))], self.chanserv.prefix())
def promote(self, user, channel, keepOldStatus=False):
if user in self.auth_timer:
return
if channel.name in self.chanserv.cache["registered"]:
flags = set()
if "o" in user.mode and "~o" in self.chanserv.cache["registered"][channel.name]["access"]:
for flag in self.chanserv.cache["registered"][channel.name]["access"]["~o"]:
flags.add(flag)
if "accountid" in user.cache:
if "~r" in self.chanserv.cache["registered"][channel.name]["access"]:
for flag in self.chanserv.cache["registered"][channel.name]["access"]["~r"]:
flags.add(flag)
if user.cache["accountid"] in self.chanserv.cache["registered"][channel.name]["access"]:
for flag in self.chanserv.cache["registered"][channel.name]["access"][user.cache["accountid"]]:
flags.add(flag)
if keepOldStatus:
for flag in channel.users[user]:
flags.discard(flag)
else:
userStatus = channel.users[user]
if userStatus:
channel.setMode(None, "-{}".format(userStatus), [user.nickname for i in range(len(userStatus))], self.chanserv.prefix())
flagList = set(flags)
for flag in flagList:
if flag not in self.ircd.prefix_order:
flags.discard(flag)
if flags:
channel.setMode(None, "+{}".format("".join(flags)), [user.nickname for i in range(len(flags))], self.chanserv.prefix())
def addCert(self, user, certfp):
accountid = user.cache["accountid"]
if accountid not in self.nickserv.cache["certfp"]:
self.nickserv.cache["certfp"][accountid] = []
if certfp not in self.nickserv.cache["certfp"][accountid]:
self.nickserv.cache["certfp"][accountid].append(certfp)
return True
return False
def onRegister(self, user):
if user.password:
if ":" in user.password:
email, password = user.password.split(":", 1)
self.auth(user, email, password)
elif " " in user.password:
email, password = user.password.split(" ", 1)
self.auth(user, email, password)
else:
self.token(user, user.password)
self.checkNick(user)
return True
def onQuit(self, user, reason):
if user in self.auth_timer:
self.removeAuthTimer(user)
def onNickChange(self, user, oldNick):
if irc_lower(user.nickname) != irc_lower(oldNick):
self.checkNick(user)
def onTopicChange(self, channel, newTopic, newSetter):
if channel.name in self.chanserv.cache["registered"]:
self.chanserv.cache["registered"][channel.name]["topic"] = [newTopic, newSetter, now()]
def onChanCreate(self, channel):
if channel.name in self.chanserv.cache["registered"] and "topic" in self.chanserv.cache["registered"][channel.name]:
topicData = self.chanserv.cache["registered"][channel.name]["topic"]
channel.setTopic(topicData[0], topicData[1])
channel.topicTime = topicData[2]
channel.created = self.chanserv.cache["registered"][channel.name]["registertime"]
def onNetmerge(self, name):
server = self.ircd.servers[name]
loggedInUserList = []
for u in self.ircd.users.itervalues():
if "accountid" in u.cache:
loggedInUserList.append(u)
server.callRemote(ModuleMessage, destserver=name, type="ServiceServer", args=[self.ircd.name])
for adminType, adminList in self.admins.iteritems():
server.callRemote(ModuleMessage, destserver=name, type="ServiceAdmins", args=[adminType] + adminList)
for u in loggedInUserList:
server.callRemote(ModuleMessage, destserver=name, type="ServiceLogin", args=[u.uuid, u.cache["accountid"]])
def commandPermission(self, user, cmd, data):
if user not in self.auth_timer:
return data
if cmd == "PRIVMSG":
to_nickserv = False
for u in data["targetuser"]:
if irc_lower(u.nickname) == irc_lower(self.nickserv.nickname):
to_nickserv = True
break
if to_nickserv:
data["targetuser"] = [self.nickserv]
data["targetchan"] = []
data["chanmod"] = []
return data
user.sendMessage("NOTICE", ":You cannot message anyone other than NickServ until you identify or change nicks.", prefix=self.nickserv.prefix())
return {}
if cmd in [ "PING", "PONG", "NICK", "QUIT", "NS", "NICKSERV", "LOGIN", "ID", "IDENTIFY" ]:
return data
user.sendMessage("NOTICE", ":You cannot use the command \x02{}\x02 until you identify or change nicks.".format(cmd), prefix=self.nickserv.prefix())
return {} | DesertBus/txircd | txircd/modules/db_services.py | Python | bsd-3-clause | 98,469 | [
"VisIt"
] | df634c0561950e35d3b59873f5f28ac2bf02305b1096de3a68c48bcca88c10ac |
"""
Utility functions for manipulating neuprint-python output.
"""
import sys
import inspect
import functools
import warnings
from textwrap import dedent
from collections.abc import Iterable, Iterator, Collection
import numpy as np
import pandas as pd
class NotNull:
"""Filter for existing properties.
Translates to::
WHERE neuron.{property} IS NOT NULL
"""
pass
class IsNull:
"""Filter for missing properties.
Translates to::
WHERE neuron.{property} IS NULL
"""
pass
#
# Import the notebook-aware version of tqdm if
# we appear to be running within a notebook context.
#
try:
import ipykernel.iostream
if isinstance(sys.stdout, ipykernel.iostream.OutStream):
from tqdm.notebook import tqdm
try:
import ipywidgets
ipywidgets
except ImportError:
msg = dedent("""\
Progress bar will not work well in the notebook without ipywidgets.
Run the following commands (for notebook and jupyterlab users):
conda install -c conda-forge ipywidgets
jupyter nbextension enable --py widgetsnbextension
jupyter labextension install @jupyter-widgets/jupyterlab-manager
...and then reload your jupyter session, and restart your kernel.
""")
warnings.warn(msg)
else:
from tqdm import tqdm
except ImportError:
from tqdm import tqdm
class tqdm(tqdm):
"""
Same as tqdm, but auto-disable the progress bar if there's only one item.
"""
def __init__(self, iterable=None, *args, disable=None, **kwargs):
if disable is None:
disable = (iterable is not None
and hasattr(iterable, '__len__')
and len(iterable) <= 1)
super().__init__(iterable, *args, disable=disable, **kwargs)
def trange(*args, **kwargs):
return tqdm(range(*args), **kwargs)
def UMAP(*args, **kwargs):
"""
UMAP is an optional dependency, so this wrapper emits
a nicer error message if it's not available.
"""
try:
from umap import UMAP
except ImportError as ex:
msg = (
"The 'umap' dimensionality reduction package is required for some "
"plotting functionality, but it isn't currently installed.\n\n"
"Please install it:\n\n"
" conda install -c conda-forge umap-learn\n\n"
)
raise RuntimeError(msg) from ex
return UMAP(*args, **kwargs)
def make_iterable(x):
"""
If ``x`` is already a list or array, return it unchanged.
If ``x`` is Series, return its values.
If ``x`` is ``None``, return an empty list ``[]``.
Otherwise, wrap it in a list.
"""
if x is None:
return []
if isinstance(x, np.ndarray):
return x
if isinstance(x, pd.Series):
return x.values
if isinstance(x, Collection) and not isinstance(x, str):
return x
else:
return [x]
def make_args_iterable(argnames):
"""
Returns a decorator.
For the given argument names, the decorator converts the
arguments into iterables via ``make_iterable()``.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
callargs = inspect.getcallargs(f, *args, **kwargs)
for name in argnames:
callargs[name] = make_iterable(callargs[name])
return f(**callargs)
wrapper.__signature__ = inspect.signature(f)
return wrapper
return decorator
@make_args_iterable(['properties'])
def merge_neuron_properties(neuron_df, conn_df, properties=['type', 'instance']):
"""
Merge neuron properties to a connection table.
Given a table of neuron properties and a connection table, append
``_pre`` and ``_post`` columns to the connection table for each of
the given properties via the appropriate merge operations.
Args:
neuron_df:
DataFrame with columns for 'bodyId' and any properties you want to merge
conn_df:
DataFrame with columns ``bodyId_pre`` and ``bodyId_post``
properties:
Column names from ``neuron_df`` to merge onto ``conn_df``.
Returns:
Updated ``conn_df`` with new columns.
Example:
.. code-block:: ipython
In [1]: from neuprint import fetch_adjacencies, NeuronCriteria as NC, merge_neuron_properties
...: neuron_df, conn_df = fetch_adjacencies(rois='PB', min_roi_weight=120)
...: print(conn_df)
bodyId_pre bodyId_post roi weight
0 880875736 1631450739 PB 123
1 880880259 849421763 PB 141
2 910442723 849421763 PB 139
3 910783961 5813070465 PB 184
4 911129204 724280817 PB 127
5 911134009 849421763 PB 125
6 911565419 5813070465 PB 141
7 911911004 1062526223 PB 125
8 911919044 973566036 PB 122
9 5813080838 974239375 PB 136
In [2]: merge_neuron_properties(neuron_df, conn_df, 'type')
Out[2]:
bodyId_pre bodyId_post roi weight type_pre type_post
0 880875736 1631450739 PB 123 Delta7_a PEN_b(PEN2)
1 880880259 849421763 PB 141 Delta7_a PEN_b(PEN2)
2 910442723 849421763 PB 139 Delta7_a PEN_b(PEN2)
3 910783961 5813070465 PB 184 Delta7_a PEN_b(PEN2)
4 911129204 724280817 PB 127 Delta7_a PEN_b(PEN2)
5 911134009 849421763 PB 125 Delta7_a PEN_b(PEN2)
6 911565419 5813070465 PB 141 Delta7_a PEN_b(PEN2)
7 911911004 1062526223 PB 125 Delta7_b PEN_b(PEN2)
8 911919044 973566036 PB 122 Delta7_a PEN_b(PEN2)
9 5813080838 974239375 PB 136 EPG PEG
"""
neuron_df = neuron_df[['bodyId', *properties]]
newcols = [f'{prop}_pre' for prop in properties]
newcols += [f'{prop}_post' for prop in properties]
conn_df = conn_df.drop(columns=newcols, errors='ignore')
conn_df = conn_df.merge(neuron_df, 'left', left_on='bodyId_pre', right_on='bodyId')
del conn_df['bodyId']
conn_df = conn_df.merge(neuron_df, 'left', left_on='bodyId_post', right_on='bodyId',
suffixes=['_pre', '_post'])
del conn_df['bodyId']
return conn_df
def connection_table_to_matrix(conn_df, group_cols='bodyId', weight_col='weight', sort_by=None):
"""
Given a weighted connection table, produce a weighted adjacency matrix.
Args:
conn_df:
A DataFrame with columns for pre- and post- identifiers
(e.g. bodyId, type or instance), and a column for the
weight of the connection.
group_cols:
Which two columns to use as the row index and column index
of the returned matrix, respetively.
Or give a single string (e.g. ``"body"``, in which case the
two column names are chosen by appending the suffixes
``_pre`` and ``_post`` to your string.
If a pair of pre/post values occurs more than once in the
connection table, all of its weights will be summed in the
output matrix.
weight_col:
Which column holds the connection weight, to be aggregated for each unique pre/post pair.
sort_by:
How to sort the rows and columns of the result.
Can be two strings, e.g. ``("type_pre", "type_post")``,
or a single string, e.g. ``"type"`` in which case the suffixes are assumed.
Returns:
DataFrame, shape NxM, where N is the number of unique values in
the 'pre' group column, and M is the number of unique values in
the 'post' group column.
Example:
.. code-block:: ipython
In [1]: from neuprint import fetch_simple_connections, NeuronCriteria as NC
...: kc_criteria = NC(type='KC.*', regex=True)
...: conn_df = fetch_simple_connections(kc_criteria, kc_criteria)
In [1]: conn_df.head()
Out[1]:
bodyId_pre bodyId_post weight type_pre type_post instance_pre instance_post conn_roiInfo
0 1224137495 5813032771 29 KCg KCg KCg KCg(super) {'MB(R)': {'pre': 26, 'post': 26}, 'gL(R)': {'...
1 1172713521 5813067826 27 KCg KCg KCg(super) KCg-d {'MB(R)': {'pre': 26, 'post': 26}, 'PED(R)': {...
2 517858947 5813032943 26 KCab-p KCab-p KCab-p KCab-p {'MB(R)': {'pre': 25, 'post': 25}, 'PED(R)': {...
3 642680826 5812980940 25 KCab-p KCab-p KCab-p KCab-p {'MB(R)': {'pre': 25, 'post': 25}, 'PED(R)': {...
4 5813067826 1172713521 24 KCg KCg KCg-d KCg(super) {'MB(R)': {'pre': 23, 'post': 23}, 'gL(R)': {'...
In [2]: from neuprint.utils import connection_table_to_matrix
...: connection_table_to_matrix(conn_df, 'type')
Out[2]:
type_post KC KCa'b' KCab-p KCab-sc KCg
type_pre
KC 3 139 6 5 365
KCa'b' 154 102337 245 997 1977
KCab-p 7 310 17899 3029 127
KCab-sc 4 2591 3975 247038 3419
KCg 380 1969 79 1526 250351
"""
if isinstance(group_cols, str):
group_cols = (f"{group_cols}_pre", f"{group_cols}_post")
assert len(group_cols) == 2, \
"Please provide two group_cols (e.g. 'bodyId_pre', 'bodyId_post')"
assert group_cols[0] in conn_df, \
f"Column missing: {group_cols[0]}"
assert group_cols[1] in conn_df, \
f"Column missing: {group_cols[1]}"
assert weight_col in conn_df, \
f"Column missing: {weight_col}"
col_pre, col_post = group_cols
dtype = conn_df[weight_col].dtype
grouped = conn_df.groupby([col_pre, col_post], as_index=False, sort=False)
agg_weights_df = grouped[weight_col].sum()
matrix = agg_weights_df.pivot(col_pre, col_post, weight_col)
matrix = matrix.fillna(0).astype(dtype)
if sort_by:
if isinstance(sort_by, str):
sort_by = (f"{sort_by}_pre", f"{sort_by}_post")
assert len(sort_by) == 2, \
"Please provide two sort_by column names (e.g. 'type_pre', 'type_post')"
pre_order = conn_df.sort_values(sort_by[0])[col_pre].unique()
post_order = conn_df.sort_values(sort_by[1])[col_post].unique()
matrix = matrix.reindex(index=pre_order, columns=post_order)
else:
# No sort: Keep the order as close to the input order as possible.
pre_order = conn_df[col_pre].unique()
post_order = conn_df[col_post].unique()
matrix = matrix.reindex(index=pre_order, columns=post_order)
return matrix
def iter_batches(it, batch_size):
"""
Iterator.
Consume the given iterator/iterable in batches and
yield each batch as a list of items.
The last batch might be smaller than the others,
if there aren't enough items to fill it.
If the given iterator supports the __len__ method,
the returned batch iterator will, too.
"""
if hasattr(it, '__len__'):
return _iter_batches_with_len(it, batch_size)
else:
return _iter_batches(it, batch_size)
class _iter_batches:
def __init__(self, it, batch_size):
self.base_iterator = it
self.batch_size = batch_size
def __iter__(self):
return self._iter_batches(self.base_iterator, self.batch_size)
def _iter_batches(self, it, batch_size):
if isinstance(it, (pd.DataFrame, pd.Series)):
for batch_start in range(0, len(it), batch_size):
yield it.iloc[batch_start:batch_start+batch_size]
return
elif isinstance(it, (list, np.ndarray)):
for batch_start in range(0, len(it), batch_size):
yield it[batch_start:batch_start+batch_size]
return
else:
if not isinstance(it, Iterator):
assert isinstance(it, Iterable)
it = iter(it)
while True:
batch = []
try:
for _ in range(batch_size):
batch.append(next(it))
except StopIteration:
return
finally:
if batch:
yield batch
class _iter_batches_with_len(_iter_batches):
def __len__(self):
return int(np.ceil(len(self.base_iterator) / self.batch_size))
def compile_columns(client, core_columns=[]):
"""
Compile list of columns from available :Neuron keys (excluding ROIs).
Args:
client:
neu.Client to collect columns for.
core_columns:
List of core columns (optional). If provided, new columns will be
added to the end of the list and non-existing columns will be
dropped.
Returns:
columns:
List of key names.
"""
# Fetch existing keys. This call is cached.
keys = client.fetch_neuron_keys()
# Drop ROIs
keys = [k for k in keys if k not in client.all_rois]
# Drop missing columns from core_columns
columns = [k for k in core_columns if k in keys]
# Add new keys (sort to make deterministic)
columns += [k for k in sorted(keys) if k not in columns]
return columns
| connectome-neuprint/neuprint-python | neuprint/utils.py | Python | bsd-3-clause | 13,938 | [
"NEURON"
] | 8ad043eabccfb1a3b06b97e00743e9cebc1a4e0c72c01e3c14d9be17879363c7 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright (C) 2008 Evan Martin <martine@danga.com>
"""A git-command for integrating reviews on Rietveld."""
import json
import logging
import optparse
import os
import re
import stat
import sys
import textwrap
import urlparse
import urllib2
try:
import readline # pylint: disable=F0401,W0611
except ImportError:
pass
from third_party import upload
import breakpad # pylint: disable=W0611
import fix_encoding
import gclient_utils
import presubmit_support
import rietveld
import scm
import subprocess2
import watchlists
DEFAULT_SERVER = 'https://codereview.appspot.com'
POSTUPSTREAM_HOOK_PATTERN = '.git/hooks/post-cl-%s'
DESCRIPTION_BACKUP_FILE = '~/.git_cl_description_backup'
GIT_INSTRUCTIONS_URL = 'http://code.google.com/p/chromium/wiki/UsingNewGit'
CHANGE_ID = 'Change-Id:'
# Initialized in main()
settings = None
def DieWithError(message):
print >> sys.stderr, message
sys.exit(1)
def RunCommand(args, error_ok=False, error_message=None, **kwargs):
try:
return subprocess2.check_output(args, shell=False, **kwargs)
except subprocess2.CalledProcessError, e:
if not error_ok:
DieWithError(
'Command "%s" failed.\n%s' % (
' '.join(args), error_message or e.stdout or ''))
return e.stdout
def RunGit(args, **kwargs):
"""Returns stdout."""
return RunCommand(['git'] + args, **kwargs)
def RunGitWithCode(args):
"""Returns return code and stdout."""
try:
out, code = subprocess2.communicate(['git'] + args, stdout=subprocess2.PIPE)
return code, out[0]
except ValueError:
# When the subprocess fails, it returns None. That triggers a ValueError
# when trying to unpack the return value into (out, code).
return 1, ''
def usage(more):
def hook(fn):
fn.usage_more = more
return fn
return hook
def ask_for_data(prompt):
try:
return raw_input(prompt)
except KeyboardInterrupt:
# Hide the exception.
sys.exit(1)
def git_set_branch_value(key, value):
branch = Changelist().GetBranch()
if branch:
git_key = 'branch.%s.%s' % (branch, key)
RunGit(['config', '--int', git_key, "%d" % value])
def git_get_branch_default(key, default):
branch = Changelist().GetBranch()
if branch:
git_key = 'branch.%s.%s' % (branch, key)
(_, stdout) = RunGitWithCode(['config', '--int', '--get', git_key])
try:
return int(stdout.strip())
except ValueError:
pass
return default
def add_git_similarity(parser):
parser.add_option(
'--similarity', metavar='SIM', type='int', action='store',
help='Sets the percentage that a pair of files need to match in order to'
' be considered copies (default 50)')
parser.add_option(
'--find-copies', action='store_true',
help='Allows git to look for copies.')
parser.add_option(
'--no-find-copies', action='store_false', dest='find_copies',
help='Disallows git from looking for copies.')
old_parser_args = parser.parse_args
def Parse(args):
options, args = old_parser_args(args)
if options.similarity is None:
options.similarity = git_get_branch_default('git-cl-similarity', 50)
else:
print('Note: Saving similarity of %d%% in git config.'
% options.similarity)
git_set_branch_value('git-cl-similarity', options.similarity)
options.similarity = max(0, min(options.similarity, 100))
if options.find_copies is None:
options.find_copies = bool(
git_get_branch_default('git-find-copies', True))
else:
git_set_branch_value('git-find-copies', int(options.find_copies))
print('Using %d%% similarity for rename/copy detection. '
'Override with --similarity.' % options.similarity)
return options, args
parser.parse_args = Parse
def MatchSvnGlob(url, base_url, glob_spec, allow_wildcards):
"""Return the corresponding git ref if |base_url| together with |glob_spec|
matches the full |url|.
If |allow_wildcards| is true, |glob_spec| can contain wildcards (see below).
"""
fetch_suburl, as_ref = glob_spec.split(':')
if allow_wildcards:
glob_match = re.match('(.+/)?(\*|{[^/]*})(/.+)?', fetch_suburl)
if glob_match:
# Parse specs like "branches/*/src:refs/remotes/svn/*" or
# "branches/{472,597,648}/src:refs/remotes/svn/*".
branch_re = re.escape(base_url)
if glob_match.group(1):
branch_re += '/' + re.escape(glob_match.group(1))
wildcard = glob_match.group(2)
if wildcard == '*':
branch_re += '([^/]*)'
else:
# Escape and replace surrounding braces with parentheses and commas
# with pipe symbols.
wildcard = re.escape(wildcard)
wildcard = re.sub('^\\\\{', '(', wildcard)
wildcard = re.sub('\\\\,', '|', wildcard)
wildcard = re.sub('\\\\}$', ')', wildcard)
branch_re += wildcard
if glob_match.group(3):
branch_re += re.escape(glob_match.group(3))
match = re.match(branch_re, url)
if match:
return re.sub('\*$', match.group(1), as_ref)
# Parse specs like "trunk/src:refs/remotes/origin/trunk".
if fetch_suburl:
full_url = base_url + '/' + fetch_suburl
else:
full_url = base_url
if full_url == url:
return as_ref
return None
def print_stats(similarity, find_copies, args):
"""Prints statistics about the change to the user."""
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env:
del env['GIT_EXTERNAL_DIFF']
if find_copies:
similarity_options = ['--find-copies-harder', '-l100000',
'-C%s' % similarity]
else:
similarity_options = ['-M%s' % similarity]
return subprocess2.call(
['git', 'diff', '--no-ext-diff', '--stat'] + similarity_options + args,
env=env)
class Settings(object):
def __init__(self):
self.default_server = None
self.cc = None
self.root = None
self.is_git_svn = None
self.svn_branch = None
self.tree_status_url = None
self.viewvc_url = None
self.updated = False
self.is_gerrit = None
def LazyUpdateIfNeeded(self):
"""Updates the settings from a codereview.settings file, if available."""
if not self.updated:
cr_settings_file = FindCodereviewSettingsFile()
if cr_settings_file:
LoadCodereviewSettingsFromFile(cr_settings_file)
self.updated = True
DownloadHooks(False)
self.updated = True
def GetDefaultServerUrl(self, error_ok=False):
if not self.default_server:
self.LazyUpdateIfNeeded()
self.default_server = gclient_utils.UpgradeToHttps(
self._GetConfig('rietveld.server', error_ok=True))
if error_ok:
return self.default_server
if not self.default_server:
error_message = ('Could not find settings file. You must configure '
'your review setup by running "git cl config".')
self.default_server = gclient_utils.UpgradeToHttps(
self._GetConfig('rietveld.server', error_message=error_message))
return self.default_server
def GetRoot(self):
if not self.root:
self.root = os.path.abspath(RunGit(['rev-parse', '--show-cdup']).strip())
return self.root
def GetIsGitSvn(self):
"""Return true if this repo looks like it's using git-svn."""
if self.is_git_svn is None:
# If you have any "svn-remote.*" config keys, we think you're using svn.
self.is_git_svn = RunGitWithCode(
['config', '--get-regexp', r'^svn-remote\.'])[0] == 0
return self.is_git_svn
def GetSVNBranch(self):
if self.svn_branch is None:
if not self.GetIsGitSvn():
DieWithError('Repo doesn\'t appear to be a git-svn repo.')
# Try to figure out which remote branch we're based on.
# Strategy:
# 1) iterate through our branch history and find the svn URL.
# 2) find the svn-remote that fetches from the URL.
# regexp matching the git-svn line that contains the URL.
git_svn_re = re.compile(r'^\s*git-svn-id: (\S+)@', re.MULTILINE)
# We don't want to go through all of history, so read a line from the
# pipe at a time.
# The -100 is an arbitrary limit so we don't search forever.
cmd = ['git', 'log', '-100', '--pretty=medium']
proc = subprocess2.Popen(cmd, stdout=subprocess2.PIPE)
url = None
for line in proc.stdout:
match = git_svn_re.match(line)
if match:
url = match.group(1)
proc.stdout.close() # Cut pipe.
break
if url:
svn_remote_re = re.compile(r'^svn-remote\.([^.]+)\.url (.*)$')
remotes = RunGit(['config', '--get-regexp',
r'^svn-remote\..*\.url']).splitlines()
for remote in remotes:
match = svn_remote_re.match(remote)
if match:
remote = match.group(1)
base_url = match.group(2)
fetch_spec = RunGit(
['config', 'svn-remote.%s.fetch' % remote],
error_ok=True).strip()
if fetch_spec:
self.svn_branch = MatchSvnGlob(url, base_url, fetch_spec, False)
if self.svn_branch:
break
branch_spec = RunGit(
['config', 'svn-remote.%s.branches' % remote],
error_ok=True).strip()
if branch_spec:
self.svn_branch = MatchSvnGlob(url, base_url, branch_spec, True)
if self.svn_branch:
break
tag_spec = RunGit(
['config', 'svn-remote.%s.tags' % remote],
error_ok=True).strip()
if tag_spec:
self.svn_branch = MatchSvnGlob(url, base_url, tag_spec, True)
if self.svn_branch:
break
if not self.svn_branch:
DieWithError('Can\'t guess svn branch -- try specifying it on the '
'command line')
return self.svn_branch
def GetTreeStatusUrl(self, error_ok=False):
if not self.tree_status_url:
error_message = ('You must configure your tree status URL by running '
'"git cl config".')
self.tree_status_url = self._GetConfig('rietveld.tree-status-url',
error_ok=error_ok,
error_message=error_message)
return self.tree_status_url
def GetViewVCUrl(self):
if not self.viewvc_url:
self.viewvc_url = gclient_utils.UpgradeToHttps(
self._GetConfig('rietveld.viewvc-url', error_ok=True))
return self.viewvc_url
def GetDefaultCCList(self):
return self._GetConfig('rietveld.cc', error_ok=True)
def GetIsGerrit(self):
"""Return true if this repo is assosiated with gerrit code review system."""
if self.is_gerrit is None:
self.is_gerrit = self._GetConfig('gerrit.host', error_ok=True)
return self.is_gerrit
def _GetConfig(self, param, **kwargs):
self.LazyUpdateIfNeeded()
return RunGit(['config', param], **kwargs).strip()
def ShortBranchName(branch):
"""Convert a name like 'refs/heads/foo' to just 'foo'."""
return branch.replace('refs/heads/', '')
class Changelist(object):
def __init__(self, branchref=None):
# Poke settings so we get the "configure your server" message if necessary.
global settings
if not settings:
# Happens when git_cl.py is used as a utility library.
settings = Settings()
settings.GetDefaultServerUrl()
self.branchref = branchref
if self.branchref:
self.branch = ShortBranchName(self.branchref)
else:
self.branch = None
self.rietveld_server = None
self.upstream_branch = None
self.has_issue = False
self.issue = None
self.has_description = False
self.description = None
self.has_patchset = False
self.patchset = None
self._rpc_server = None
self.cc = None
self.watchers = ()
self._remote = None
def GetCCList(self):
"""Return the users cc'd on this CL.
Return is a string suitable for passing to gcl with the --cc flag.
"""
if self.cc is None:
base_cc = settings .GetDefaultCCList()
more_cc = ','.join(self.watchers)
self.cc = ','.join(filter(None, (base_cc, more_cc))) or ''
return self.cc
def SetWatchers(self, watchers):
"""Set the list of email addresses that should be cc'd based on the changed
files in this CL.
"""
self.watchers = watchers
def GetBranch(self):
"""Returns the short branch name, e.g. 'master'."""
if not self.branch:
self.branchref = RunGit(['symbolic-ref', 'HEAD']).strip()
self.branch = ShortBranchName(self.branchref)
return self.branch
def GetBranchRef(self):
"""Returns the full branch name, e.g. 'refs/heads/master'."""
self.GetBranch() # Poke the lazy loader.
return self.branchref
def FetchUpstreamTuple(self):
"""Returns a tuple containg remote and remote ref,
e.g. 'origin', 'refs/heads/master'
"""
remote = '.'
branch = self.GetBranch()
upstream_branch = RunGit(['config', 'branch.%s.merge' % branch],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'branch.%s.remote' % branch]).strip()
else:
upstream_branch = RunGit(['config', 'rietveld.upstream-branch'],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'rietveld.upstream-remote']).strip()
else:
# Fall back on trying a git-svn upstream branch.
if settings.GetIsGitSvn():
upstream_branch = settings.GetSVNBranch()
else:
# Else, try to guess the origin remote.
remote_branches = RunGit(['branch', '-r']).split()
if 'origin/master' in remote_branches:
# Fall back on origin/master if it exits.
remote = 'origin'
upstream_branch = 'refs/heads/master'
elif 'origin/trunk' in remote_branches:
# Fall back on origin/trunk if it exists. Generally a shared
# git-svn clone
remote = 'origin'
upstream_branch = 'refs/heads/trunk'
else:
DieWithError("""Unable to determine default branch to diff against.
Either pass complete "git diff"-style arguments, like
git cl upload origin/master
or verify this branch is set up to track another (via the --track argument to
"git checkout -b ...").""")
return remote, upstream_branch
def GetUpstreamBranch(self):
if self.upstream_branch is None:
remote, upstream_branch = self.FetchUpstreamTuple()
if remote is not '.':
upstream_branch = upstream_branch.replace('heads', 'remotes/' + remote)
self.upstream_branch = upstream_branch
return self.upstream_branch
def GetRemote(self):
if not self._remote:
self._remote = self.FetchUpstreamTuple()[0]
if self._remote == '.':
remotes = RunGit(['remote'], error_ok=True).split()
if len(remotes) == 1:
self._remote, = remotes
elif 'origin' in remotes:
self._remote = 'origin'
logging.warning('Could not determine which remote this change is '
'associated with, so defaulting to "%s". This may '
'not be what you want. You may prevent this message '
'by running "git svn info" as documented here: %s',
self._remote,
GIT_INSTRUCTIONS_URL)
else:
logging.warn('Could not determine which remote this change is '
'associated with. You may prevent this message by '
'running "git svn info" as documented here: %s',
GIT_INSTRUCTIONS_URL)
return self._remote
def GetGitBaseUrlFromConfig(self):
"""Return the configured base URL from branch.<branchname>.baseurl.
Returns None if it is not set.
"""
return RunGit(['config', 'branch.%s.base-url' % self.GetBranch()],
error_ok=True).strip()
def GetRemoteUrl(self):
"""Return the configured remote URL, e.g. 'git://example.org/foo.git/'.
Returns None if there is no remote.
"""
remote = self.GetRemote()
return RunGit(['config', 'remote.%s.url' % remote], error_ok=True).strip()
def GetIssue(self):
"""Returns the issue number as a int or None if not set."""
if not self.has_issue:
issue = RunGit(['config', self._IssueSetting()], error_ok=True).strip()
if issue:
self.issue = int(issue)
else:
self.issue = None
self.has_issue = True
return self.issue
def GetRietveldServer(self):
if not self.rietveld_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue():
self.rietveld_server = gclient_utils.UpgradeToHttps(RunGit(
['config', self._RietveldServer()], error_ok=True).strip())
if not self.rietveld_server:
self.rietveld_server = settings.GetDefaultServerUrl()
return self.rietveld_server
def GetIssueURL(self):
"""Get the URL for a particular issue."""
return '%s/%s' % (self.GetRietveldServer(), self.GetIssue())
def GetDescription(self, pretty=False):
if not self.has_description:
if self.GetIssue():
issue = self.GetIssue()
try:
self.description = self.RpcServer().get_description(issue).strip()
except urllib2.HTTPError, e:
if e.code == 404:
DieWithError(
('\nWhile fetching the description for issue %d, received a '
'404 (not found)\n'
'error. It is likely that you deleted this '
'issue on the server. If this is the\n'
'case, please run\n\n'
' git cl issue 0\n\n'
'to clear the association with the deleted issue. Then run '
'this command again.') % issue)
else:
DieWithError(
'\nFailed to fetch issue description. HTTP error ' + e.code)
self.has_description = True
if pretty:
wrapper = textwrap.TextWrapper()
wrapper.initial_indent = wrapper.subsequent_indent = ' '
return wrapper.fill(self.description)
return self.description
def GetPatchset(self):
"""Returns the patchset number as a int or None if not set."""
if not self.has_patchset:
patchset = RunGit(['config', self._PatchsetSetting()],
error_ok=True).strip()
if patchset:
self.patchset = int(patchset)
else:
self.patchset = None
self.has_patchset = True
return self.patchset
def SetPatchset(self, patchset):
"""Set this branch's patchset. If patchset=0, clears the patchset."""
if patchset:
RunGit(['config', self._PatchsetSetting(), str(patchset)])
else:
RunGit(['config', '--unset', self._PatchsetSetting()],
stderr=subprocess2.PIPE, error_ok=True)
self.has_patchset = False
def GetMostRecentPatchset(self, issue):
return self.RpcServer().get_issue_properties(
int(issue), False)['patchsets'][-1]
def GetPatchSetDiff(self, issue, patchset):
return self.RpcServer().get(
'/download/issue%s_%s.diff' % (issue, patchset))
def SetIssue(self, issue):
"""Set this branch's issue. If issue=0, clears the issue."""
if issue:
RunGit(['config', self._IssueSetting(), str(issue)])
if self.rietveld_server:
RunGit(['config', self._RietveldServer(), self.rietveld_server])
else:
RunGit(['config', '--unset', self._IssueSetting()])
self.SetPatchset(0)
self.has_issue = False
def GetChange(self, upstream_branch, author):
root = RunCommand(['git', 'rev-parse', '--show-cdup']).strip() or '.'
absroot = os.path.abspath(root)
# We use the sha1 of HEAD as a name of this change.
name = RunCommand(['git', 'rev-parse', 'HEAD']).strip()
# Need to pass a relative path for msysgit.
try:
files = scm.GIT.CaptureStatus([root], '.', upstream_branch)
except subprocess2.CalledProcessError:
DieWithError(
('\nFailed to diff against upstream branch %s!\n\n'
'This branch probably doesn\'t exist anymore. To reset the\n'
'tracking branch, please run\n'
' git branch --set-upstream %s trunk\n'
'replacing trunk with origin/master or the relevant branch') %
(upstream_branch, self.GetBranch()))
issue = self.GetIssue()
patchset = self.GetPatchset()
if issue:
description = self.GetDescription()
else:
# If the change was never uploaded, use the log messages of all commits
# up to the branch point, as git cl upload will prefill the description
# with these log messages.
description = RunCommand(['git', 'log', '--pretty=format:%s%n%n%b',
'%s...' % (upstream_branch)]).strip()
if not author:
author = RunGit(['config', 'user.email']).strip() or None
return presubmit_support.GitChange(
name,
description,
absroot,
files,
issue,
patchset,
author)
def RunHook(self, committing, upstream_branch, may_prompt, verbose, author):
"""Calls sys.exit() if the hook fails; returns a HookResults otherwise."""
change = self.GetChange(upstream_branch, author)
# Apply watchlists on upload.
if not committing:
watchlist = watchlists.Watchlists(change.RepositoryRoot())
files = [f.LocalPath() for f in change.AffectedFiles()]
self.SetWatchers(watchlist.GetWatchersForPaths(files))
try:
return presubmit_support.DoPresubmitChecks(change, committing,
verbose=verbose, output_stream=sys.stdout, input_stream=sys.stdin,
default_presubmit=None, may_prompt=may_prompt,
rietveld_obj=self.RpcServer())
except presubmit_support.PresubmitFailure, e:
DieWithError(
('%s\nMaybe your depot_tools is out of date?\n'
'If all fails, contact maruel@') % e)
def CloseIssue(self):
"""Updates the description and closes the issue."""
issue = self.GetIssue()
self.RpcServer().update_description(issue, self.description)
return self.RpcServer().close_issue(issue)
def SetFlag(self, flag, value):
"""Patchset must match."""
if not self.GetPatchset():
DieWithError('The patchset needs to match. Send another patchset.')
try:
return self.RpcServer().set_flag(
self.GetIssue(), self.GetPatchset(), flag, value)
except urllib2.HTTPError, e:
if e.code == 404:
DieWithError('The issue %s doesn\'t exist.' % self.GetIssue())
if e.code == 403:
DieWithError(
('Access denied to issue %s. Maybe the patchset %s doesn\'t '
'match?') % (self.GetIssue(), self.GetPatchset()))
raise
def RpcServer(self):
"""Returns an upload.RpcServer() to access this review's rietveld instance.
"""
if not self._rpc_server:
self._rpc_server = rietveld.Rietveld(self.GetRietveldServer(),
None, None)
return self._rpc_server
def _IssueSetting(self):
"""Return the git setting that stores this change's issue."""
return 'branch.%s.rietveldissue' % self.GetBranch()
def _PatchsetSetting(self):
"""Return the git setting that stores this change's most recent patchset."""
return 'branch.%s.rietveldpatchset' % self.GetBranch()
def _RietveldServer(self):
"""Returns the git setting that stores this change's rietveld server."""
return 'branch.%s.rietveldserver' % self.GetBranch()
def GetCodereviewSettingsInteractively():
"""Prompt the user for settings."""
# TODO(ukai): ask code review system is rietveld or gerrit?
server = settings.GetDefaultServerUrl(error_ok=True)
prompt = 'Rietveld server (host[:port])'
prompt += ' [%s]' % (server or DEFAULT_SERVER)
newserver = ask_for_data(prompt + ':')
if not server and not newserver:
newserver = DEFAULT_SERVER
if newserver:
newserver = gclient_utils.UpgradeToHttps(newserver)
if newserver != server:
RunGit(['config', 'rietveld.server', newserver])
def SetProperty(initial, caption, name, is_url):
prompt = caption
if initial:
prompt += ' ("x" to clear) [%s]' % initial
new_val = ask_for_data(prompt + ':')
if new_val == 'x':
RunGit(['config', '--unset-all', 'rietveld.' + name], error_ok=True)
elif new_val:
if is_url:
new_val = gclient_utils.UpgradeToHttps(new_val)
if new_val != initial:
RunGit(['config', 'rietveld.' + name, new_val])
SetProperty(settings.GetDefaultCCList(), 'CC list', 'cc', False)
SetProperty(settings.GetTreeStatusUrl(error_ok=True), 'Tree status URL',
'tree-status-url', False)
SetProperty(settings.GetViewVCUrl(), 'ViewVC URL', 'viewvc-url', True)
# TODO: configure a default branch to diff against, rather than this
# svn-based hackery.
class ChangeDescription(object):
"""Contains a parsed form of the change description."""
def __init__(self, log_desc, reviewers):
self.log_desc = log_desc
self.reviewers = reviewers
self.description = self.log_desc
def Prompt(self):
content = """# Enter a description of the change.
# This will displayed on the codereview site.
# The first line will also be used as the subject of the review.
"""
content += self.description
if ('\nR=' not in self.description and
'\nTBR=' not in self.description and
self.reviewers):
content += '\nR=' + self.reviewers
if '\nBUG=' not in self.description:
content += '\nBUG='
content = content.rstrip('\n') + '\n'
content = gclient_utils.RunEditor(content, True)
if not content:
DieWithError('Running editor failed')
content = re.compile(r'^#.*$', re.MULTILINE).sub('', content).strip()
if not content.strip():
DieWithError('No CL description, aborting')
self.description = content
def ParseDescription(self):
"""Updates the list of reviewers and subject from the description."""
self.description = self.description.strip('\n') + '\n'
# Retrieves all reviewer lines
regexp = re.compile(r'^\s*(TBR|R)=(.+)$', re.MULTILINE)
reviewers = ','.join(
i.group(2).strip() for i in regexp.finditer(self.description))
if reviewers:
self.reviewers = reviewers
def IsEmpty(self):
return not self.description
def FindCodereviewSettingsFile(filename='codereview.settings'):
"""Finds the given file starting in the cwd and going up.
Only looks up to the top of the repository unless an
'inherit-review-settings-ok' file exists in the root of the repository.
"""
inherit_ok_file = 'inherit-review-settings-ok'
cwd = os.getcwd()
root = os.path.abspath(RunGit(['rev-parse', '--show-cdup']).strip())
if os.path.isfile(os.path.join(root, inherit_ok_file)):
root = '/'
while True:
if filename in os.listdir(cwd):
if os.path.isfile(os.path.join(cwd, filename)):
return open(os.path.join(cwd, filename))
if cwd == root:
break
cwd = os.path.dirname(cwd)
def LoadCodereviewSettingsFromFile(fileobj):
"""Parse a codereview.settings file and updates hooks."""
keyvals = gclient_utils.ParseCodereviewSettingsContent(fileobj.read())
def SetProperty(name, setting, unset_error_ok=False):
fullname = 'rietveld.' + name
if setting in keyvals:
RunGit(['config', fullname, keyvals[setting]])
else:
RunGit(['config', '--unset-all', fullname], error_ok=unset_error_ok)
SetProperty('server', 'CODE_REVIEW_SERVER')
# Only server setting is required. Other settings can be absent.
# In that case, we ignore errors raised during option deletion attempt.
SetProperty('cc', 'CC_LIST', unset_error_ok=True)
SetProperty('tree-status-url', 'STATUS', unset_error_ok=True)
SetProperty('viewvc-url', 'VIEW_VC', unset_error_ok=True)
if 'GERRIT_HOST' in keyvals and 'GERRIT_PORT' in keyvals:
RunGit(['config', 'gerrit.host', keyvals['GERRIT_HOST']])
RunGit(['config', 'gerrit.port', keyvals['GERRIT_PORT']])
if 'PUSH_URL_CONFIG' in keyvals and 'ORIGIN_URL_CONFIG' in keyvals:
#should be of the form
#PUSH_URL_CONFIG: url.ssh://gitrw.chromium.org.pushinsteadof
#ORIGIN_URL_CONFIG: http://src.chromium.org/git
RunGit(['config', keyvals['PUSH_URL_CONFIG'],
keyvals['ORIGIN_URL_CONFIG']])
def urlretrieve(source, destination):
"""urllib is broken for SSL connections via a proxy therefore we
can't use urllib.urlretrieve()."""
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
def DownloadHooks(force):
"""downloads hooks
Args:
force: True to update hooks. False to install hooks if not present.
"""
if not settings.GetIsGerrit():
return
server_url = settings.GetDefaultServerUrl()
src = '%s/tools/hooks/commit-msg' % server_url
dst = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.access(dst, os.X_OK):
if os.path.exists(dst):
if not force:
return
os.remove(dst)
try:
urlretrieve(src, dst)
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception:
if os.path.exists(dst):
os.remove(dst)
DieWithError('\nFailed to download hooks from %s' % src)
@usage('[repo root containing codereview.settings]')
def CMDconfig(parser, args):
"""edit configuration for this tree"""
_, args = parser.parse_args(args)
if len(args) == 0:
GetCodereviewSettingsInteractively()
DownloadHooks(True)
return 0
url = args[0]
if not url.endswith('codereview.settings'):
url = os.path.join(url, 'codereview.settings')
# Load code review settings and download hooks (if available).
LoadCodereviewSettingsFromFile(urllib2.urlopen(url))
DownloadHooks(True)
return 0
def CMDbaseurl(parser, args):
"""get or set base-url for this branch"""
branchref = RunGit(['symbolic-ref', 'HEAD']).strip()
branch = ShortBranchName(branchref)
_, args = parser.parse_args(args)
if not args:
print("Current base-url:")
return RunGit(['config', 'branch.%s.base-url' % branch],
error_ok=False).strip()
else:
print("Setting base-url to %s" % args[0])
return RunGit(['config', 'branch.%s.base-url' % branch, args[0]],
error_ok=False).strip()
def CMDstatus(parser, args):
"""show status of changelists"""
parser.add_option('--field',
help='print only specific field (desc|id|patch|url)')
(options, args) = parser.parse_args(args)
# TODO: maybe make show_branches a flag if necessary.
show_branches = not options.field
if show_branches:
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if branches:
print 'Branches associated with reviews:'
changes = (Changelist(branchref=b) for b in branches.splitlines())
branches = dict((cl.GetBranch(), cl.GetIssue()) for cl in changes)
alignment = max(5, max(len(b) for b in branches))
for branch in sorted(branches):
print " %*s: %s" % (alignment, branch, branches[branch])
cl = Changelist()
if options.field:
if options.field.startswith('desc'):
print cl.GetDescription()
elif options.field == 'id':
issueid = cl.GetIssue()
if issueid:
print issueid
elif options.field == 'patch':
patchset = cl.GetPatchset()
if patchset:
print patchset
elif options.field == 'url':
url = cl.GetIssueURL()
if url:
print url
else:
print
print 'Current branch:',
if not cl.GetIssue():
print 'no issue assigned.'
return 0
print cl.GetBranch()
print 'Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL())
print 'Issue description:'
print cl.GetDescription(pretty=True)
return 0
@usage('[issue_number]')
def CMDissue(parser, args):
"""Set or display the current code review issue number.
Pass issue number 0 to clear the current issue.
"""
_, args = parser.parse_args(args)
cl = Changelist()
if len(args) > 0:
try:
issue = int(args[0])
except ValueError:
DieWithError('Pass a number to set the issue or none to list it.\n'
'Maybe you want to run git cl status?')
cl.SetIssue(issue)
print 'Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL())
return 0
def CMDcomments(parser, args):
"""show review comments of the current changelist"""
(_, args) = parser.parse_args(args)
if args:
parser.error('Unsupported argument: %s' % args)
cl = Changelist()
if cl.GetIssue():
data = cl.RpcServer().get_issue_properties(cl.GetIssue(), True)
for message in sorted(data['messages'], key=lambda x: x['date']):
print '\n%s %s' % (message['date'].split('.', 1)[0], message['sender'])
if message['text'].strip():
print '\n'.join(' ' + l for l in message['text'].splitlines())
return 0
def CreateDescriptionFromLog(args):
"""Pulls out the commit log to use as a base for the CL description."""
log_args = []
if len(args) == 1 and not args[0].endswith('.'):
log_args = [args[0] + '..']
elif len(args) == 1 and args[0].endswith('...'):
log_args = [args[0][:-1]]
elif len(args) == 2:
log_args = [args[0] + '..' + args[1]]
else:
log_args = args[:] # Hope for the best!
return RunGit(['log', '--pretty=format:%s\n\n%b'] + log_args)
def CMDpresubmit(parser, args):
"""run presubmit tests on the current changelist"""
parser.add_option('--upload', action='store_true',
help='Run upload hook instead of the push/dcommit hook')
parser.add_option('--force', action='store_true',
help='Run checks even if tree is dirty')
(options, args) = parser.parse_args(args)
# Make sure index is up-to-date before running diff-index.
RunGit(['update-index', '--refresh', '-q'], error_ok=True)
if not options.force and RunGit(['diff-index', 'HEAD']):
# TODO(maruel): Is this really necessary?
print ('Cannot presubmit with a dirty tree.\n'
'You must commit locally first (or use --force).')
return 1
cl = Changelist()
if args:
base_branch = args[0]
else:
# Default to diffing against the "upstream" branch.
base_branch = cl.GetUpstreamBranch()
cl.RunHook(committing=not options.upload, upstream_branch=base_branch,
may_prompt=False, verbose=options.verbose,
author=None)
return 0
def AddChangeIdToCommitMessage(options, args):
"""Re-commits using the current message, assumes the commit hook is in
place.
"""
log_desc = options.message or CreateDescriptionFromLog(args)
git_command = ['commit', '--amend', '-m', log_desc]
RunGit(git_command)
new_log_desc = CreateDescriptionFromLog(args)
if CHANGE_ID in new_log_desc:
print 'git-cl: Added Change-Id to commit message.'
else:
print >> sys.stderr, 'ERROR: Gerrit commit-msg hook not available.'
def GerritUpload(options, args, cl):
"""upload the current branch to gerrit."""
# We assume the remote called "origin" is the one we want.
# It is probably not worthwhile to support different workflows.
remote = 'origin'
branch = 'master'
if options.target_branch:
branch = options.target_branch
log_desc = options.message or CreateDescriptionFromLog(args)
if CHANGE_ID not in log_desc:
AddChangeIdToCommitMessage(options, args)
if options.reviewers:
log_desc += '\nR=' + options.reviewers
change_desc = ChangeDescription(log_desc, options.reviewers)
change_desc.ParseDescription()
if change_desc.IsEmpty():
print "Description is empty; aborting."
return 1
receive_options = []
cc = cl.GetCCList().split(',')
if options.cc:
cc += options.cc.split(',')
cc = filter(None, cc)
if cc:
receive_options += ['--cc=' + email for email in cc]
if change_desc.reviewers:
reviewers = filter(None, change_desc.reviewers.split(','))
if reviewers:
receive_options += ['--reviewer=' + email for email in reviewers]
git_command = ['push']
if receive_options:
git_command.append('--receive-pack=git receive-pack %s' %
' '.join(receive_options))
git_command += [remote, 'HEAD:refs/for/' + branch]
RunGit(git_command)
# TODO(ukai): parse Change-Id: and set issue number?
return 0
def RietveldUpload(options, args, cl):
"""upload the patch to rietveld."""
upload_args = ['--assume_yes'] # Don't ask about untracked files.
upload_args.extend(['--server', cl.GetRietveldServer()])
if options.emulate_svn_auto_props:
upload_args.append('--emulate_svn_auto_props')
change_desc = None
if cl.GetIssue():
if options.title:
upload_args.extend(['--title', options.title])
elif options.message:
# TODO(rogerta): for now, the -m option will also set the --title option
# for upload.py. Soon this will be changed to set the --message option.
# Will wait until people are used to typing -t instead of -m.
upload_args.extend(['--title', options.message])
upload_args.extend(['--issue', str(cl.GetIssue())])
print ("This branch is associated with issue %s. "
"Adding patch to that issue." % cl.GetIssue())
else:
if options.title:
upload_args.extend(['--title', options.title])
message = options.message or CreateDescriptionFromLog(args)
change_desc = ChangeDescription(message, options.reviewers)
if not options.force:
change_desc.Prompt()
change_desc.ParseDescription()
if change_desc.IsEmpty():
print "Description is empty; aborting."
return 1
upload_args.extend(['--message', change_desc.description])
if change_desc.reviewers:
upload_args.extend(['--reviewers', change_desc.reviewers])
if options.send_mail:
if not change_desc.reviewers:
DieWithError("Must specify reviewers to send email.")
upload_args.append('--send_mail')
cc = ','.join(filter(None, (cl.GetCCList(), options.cc)))
if cc:
upload_args.extend(['--cc', cc])
upload_args.extend(['--git_similarity', str(options.similarity)])
if not options.find_copies:
upload_args.extend(['--git_no_find_copies'])
# Include the upstream repo's URL in the change -- this is useful for
# projects that have their source spread across multiple repos.
remote_url = cl.GetGitBaseUrlFromConfig()
if not remote_url:
if settings.GetIsGitSvn():
# URL is dependent on the current directory.
data = RunGit(['svn', 'info'], cwd=settings.GetRoot())
if data:
keys = dict(line.split(': ', 1) for line in data.splitlines()
if ': ' in line)
remote_url = keys.get('URL', None)
else:
if cl.GetRemoteUrl() and '/' in cl.GetUpstreamBranch():
remote_url = (cl.GetRemoteUrl() + '@'
+ cl.GetUpstreamBranch().split('/')[-1])
if remote_url:
upload_args.extend(['--base_url', remote_url])
try:
issue, patchset = upload.RealMain(['upload'] + upload_args + args)
except KeyboardInterrupt:
sys.exit(1)
except:
# If we got an exception after the user typed a description for their
# change, back up the description before re-raising.
if change_desc:
backup_path = os.path.expanduser(DESCRIPTION_BACKUP_FILE)
print '\nGot exception while uploading -- saving description to %s\n' \
% backup_path
backup_file = open(backup_path, 'w')
backup_file.write(change_desc.description)
backup_file.close()
raise
if not cl.GetIssue():
cl.SetIssue(issue)
cl.SetPatchset(patchset)
if options.use_commit_queue:
cl.SetFlag('commit', '1')
return 0
@usage('[args to "git diff"]')
def CMDupload(parser, args):
"""upload the current changelist to codereview"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-m', dest='message', help='message for patchset')
parser.add_option('-t', dest='title', help='title for patchset')
parser.add_option('-r', '--reviewers',
help='reviewer email addresses')
parser.add_option('--cc',
help='cc email addresses')
parser.add_option('--send-mail', action='store_true',
help='send email to reviewer immediately')
parser.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props",
help="Emulate Subversion's auto properties feature.")
parser.add_option('-c', '--use-commit-queue', action='store_true',
help='tell the commit queue to commit this patchset')
if settings.GetIsGerrit():
parser.add_option('--target_branch', dest='target_branch', default='master',
help='target branch to upload')
add_git_similarity(parser)
(options, args) = parser.parse_args(args)
# Print warning if the user used the -m/--message argument. This will soon
# change to -t/--title.
if options.message:
print >> sys.stderr, (
'\nWARNING: Use -t or --title to set the title of the patchset.\n'
'In the near future, -m or --message will send a message instead.\n'
'See http://goo.gl/JGg0Z for details.\n')
# Make sure index is up-to-date before running diff-index.
RunGit(['update-index', '--refresh', '-q'], error_ok=True)
if RunGit(['diff-index', 'HEAD']):
print 'Cannot upload with a dirty tree. You must commit locally first.'
return 1
cl = Changelist()
if args:
# TODO(ukai): is it ok for gerrit case?
base_branch = args[0]
else:
# Default to diffing against the "upstream" branch.
base_branch = cl.GetUpstreamBranch()
args = [base_branch + "..."]
if not options.bypass_hooks:
hook_results = cl.RunHook(committing=False, upstream_branch=base_branch,
may_prompt=not options.force,
verbose=options.verbose,
author=None)
if not hook_results.should_continue():
return 1
if not options.reviewers and hook_results.reviewers:
options.reviewers = hook_results.reviewers
print_stats(options.similarity, options.find_copies, args)
if settings.GetIsGerrit():
return GerritUpload(options, args, cl)
return RietveldUpload(options, args, cl)
def IsSubmoduleMergeCommit(ref):
# When submodules are added to the repo, we expect there to be a single
# non-git-svn merge commit at remote HEAD with a signature comment.
pattern = '^SVN changes up to revision [0-9]*$'
cmd = ['rev-list', '--merges', '--grep=%s' % pattern, '%s^!' % ref]
return RunGit(cmd) != ''
def SendUpstream(parser, args, cmd):
"""Common code for CmdPush and CmdDCommit
Squashed commit into a single.
Updates changelog with metadata (e.g. pointer to review).
Pushes/dcommits the code upstream.
Updates review and closes.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-m', dest='message',
help="override review description")
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-c', dest='contributor',
help="external contributor for patch (appended to " +
"description and used as author for git). Should be " +
"formatted as 'First Last <email@example.com>'")
add_git_similarity(parser)
(options, args) = parser.parse_args(args)
cl = Changelist()
if not args or cmd == 'push':
# Default to merging against our best guess of the upstream branch.
args = [cl.GetUpstreamBranch()]
if options.contributor:
if not re.match('^.*\s<\S+@\S+>$', options.contributor):
print "Please provide contibutor as 'First Last <email@example.com>'"
return 1
base_branch = args[0]
base_has_submodules = IsSubmoduleMergeCommit(base_branch)
# Make sure index is up-to-date before running diff-index.
RunGit(['update-index', '--refresh', '-q'], error_ok=True)
if RunGit(['diff-index', 'HEAD']):
print 'Cannot %s with a dirty tree. You must commit locally first.' % cmd
return 1
# This rev-list syntax means "show all commits not in my branch that
# are in base_branch".
upstream_commits = RunGit(['rev-list', '^' + cl.GetBranchRef(),
base_branch]).splitlines()
if upstream_commits:
print ('Base branch "%s" has %d commits '
'not in this branch.' % (base_branch, len(upstream_commits)))
print 'Run "git merge %s" before attempting to %s.' % (base_branch, cmd)
return 1
# This is the revision `svn dcommit` will commit on top of.
svn_head = RunGit(['log', '--grep=^git-svn-id:', '-1',
'--pretty=format:%H'])
if cmd == 'dcommit':
# If the base_head is a submodule merge commit, the first parent of the
# base_head should be a git-svn commit, which is what we're interested in.
base_svn_head = base_branch
if base_has_submodules:
base_svn_head += '^1'
extra_commits = RunGit(['rev-list', '^' + svn_head, base_svn_head])
if extra_commits:
print ('This branch has %d additional commits not upstreamed yet.'
% len(extra_commits.splitlines()))
print ('Upstream "%s" or rebase this branch on top of the upstream trunk '
'before attempting to %s.' % (base_branch, cmd))
return 1
if not options.bypass_hooks:
author = None
if options.contributor:
author = re.search(r'\<(.*)\>', options.contributor).group(1)
hook_results = cl.RunHook(
committing=True,
upstream_branch=base_branch,
may_prompt=not options.force,
verbose=options.verbose,
author=author)
if not hook_results.should_continue():
return 1
if cmd == 'dcommit':
# Check the tree status if the tree status URL is set.
status = GetTreeStatus()
if 'closed' == status:
print('The tree is closed. Please wait for it to reopen. Use '
'"git cl dcommit --bypass-hooks" to commit on a closed tree.')
return 1
elif 'unknown' == status:
print('Unable to determine tree status. Please verify manually and '
'use "git cl dcommit --bypass-hooks" to commit on a closed tree.')
else:
breakpad.SendStack(
'GitClHooksBypassedCommit',
'Issue %s/%s bypassed hook when committing' %
(cl.GetRietveldServer(), cl.GetIssue()),
verbose=False)
description = options.message
if not description and cl.GetIssue():
description = cl.GetDescription()
if not description:
if not cl.GetIssue() and options.bypass_hooks:
description = CreateDescriptionFromLog([base_branch])
else:
print 'No description set.'
print 'Visit %s/edit to set it.' % (cl.GetIssueURL())
return 1
if cl.GetIssue():
description += "\n\nReview URL: %s" % cl.GetIssueURL()
if options.contributor:
description += "\nPatch from %s." % options.contributor
print 'Description:', repr(description)
branches = [base_branch, cl.GetBranchRef()]
if not options.force:
print_stats(options.similarity, options.find_copies, branches)
ask_for_data('About to commit; enter to confirm.')
# We want to squash all this branch's commits into one commit with the proper
# description. We do this by doing a "reset --soft" to the base branch (which
# keeps the working copy the same), then dcommitting that. If origin/master
# has a submodule merge commit, we'll also need to cherry-pick the squashed
# commit onto a branch based on the git-svn head.
MERGE_BRANCH = 'git-cl-commit'
CHERRY_PICK_BRANCH = 'git-cl-cherry-pick'
# Delete the branches if they exist.
for branch in [MERGE_BRANCH, CHERRY_PICK_BRANCH]:
showref_cmd = ['show-ref', '--quiet', '--verify', 'refs/heads/%s' % branch]
result = RunGitWithCode(showref_cmd)
if result[0] == 0:
RunGit(['branch', '-D', branch])
# We might be in a directory that's present in this branch but not in the
# trunk. Move up to the top of the tree so that git commands that expect a
# valid CWD won't fail after we check out the merge branch.
rel_base_path = RunGit(['rev-parse', '--show-cdup']).strip()
if rel_base_path:
os.chdir(rel_base_path)
# Stuff our change into the merge branch.
# We wrap in a try...finally block so if anything goes wrong,
# we clean up the branches.
retcode = -1
try:
RunGit(['checkout', '-q', '-b', MERGE_BRANCH])
RunGit(['reset', '--soft', base_branch])
if options.contributor:
RunGit(['commit', '--author', options.contributor, '-m', description])
else:
RunGit(['commit', '-m', description])
if base_has_submodules:
cherry_pick_commit = RunGit(['rev-list', 'HEAD^!']).rstrip()
RunGit(['branch', CHERRY_PICK_BRANCH, svn_head])
RunGit(['checkout', CHERRY_PICK_BRANCH])
RunGit(['cherry-pick', cherry_pick_commit])
if cmd == 'push':
# push the merge branch.
remote, branch = cl.FetchUpstreamTuple()
retcode, output = RunGitWithCode(
['push', '--porcelain', remote, 'HEAD:%s' % branch])
logging.debug(output)
else:
# dcommit the merge branch.
retcode, output = RunGitWithCode(['svn', 'dcommit',
'-C%s' % options.similarity,
'--no-rebase', '--rmdir'])
finally:
# And then swap back to the original branch and clean up.
RunGit(['checkout', '-q', cl.GetBranch()])
RunGit(['branch', '-D', MERGE_BRANCH])
if base_has_submodules:
RunGit(['branch', '-D', CHERRY_PICK_BRANCH])
if cl.GetIssue():
if cmd == 'dcommit' and 'Committed r' in output:
revision = re.match('.*?\nCommitted r(\\d+)', output, re.DOTALL).group(1)
elif cmd == 'push' and retcode == 0:
match = (re.match(r'.*?([a-f0-9]{7})\.\.([a-f0-9]{7})$', l)
for l in output.splitlines(False))
match = filter(None, match)
if len(match) != 1:
DieWithError("Couldn't parse ouput to extract the committed hash:\n%s" %
output)
revision = match[0].group(2)
else:
return 1
viewvc_url = settings.GetViewVCUrl()
if viewvc_url and revision:
cl.description += ('\n\nCommitted: ' + viewvc_url + revision)
elif revision:
cl.description += ('\n\nCommitted: ' + revision)
print ('Closing issue '
'(you may be prompted for your codereview password)...')
cl.CloseIssue()
cl.SetIssue(0)
if retcode == 0:
hook = POSTUPSTREAM_HOOK_PATTERN % cmd
if os.path.isfile(hook):
RunCommand([hook, base_branch], error_ok=True)
return 0
@usage('[upstream branch to apply against]')
def CMDdcommit(parser, args):
"""commit the current changelist via git-svn"""
if not settings.GetIsGitSvn():
message = """This doesn't appear to be an SVN repository.
If your project has a git mirror with an upstream SVN master, you probably need
to run 'git svn init', see your project's git mirror documentation.
If your project has a true writeable upstream repository, you probably want
to run 'git cl push' instead.
Choose wisely, if you get this wrong, your commit might appear to succeed but
will instead be silently ignored."""
print(message)
ask_for_data('[Press enter to dcommit or ctrl-C to quit]')
return SendUpstream(parser, args, 'dcommit')
@usage('[upstream branch to apply against]')
def CMDpush(parser, args):
"""commit the current changelist via git"""
if settings.GetIsGitSvn():
print('This appears to be an SVN repository.')
print('Are you sure you didn\'t mean \'git cl dcommit\'?')
ask_for_data('[Press enter to push or ctrl-C to quit]')
return SendUpstream(parser, args, 'push')
@usage('<patch url or issue id>')
def CMDpatch(parser, args):
"""patch in a code review"""
parser.add_option('-b', dest='newbranch',
help='create a new branch off trunk for the patch')
parser.add_option('-f', action='store_true', dest='force',
help='with -b, clobber any existing branch')
parser.add_option('--reject', action='store_true', dest='reject',
help='allow failed patches and spew .rej files')
parser.add_option('-n', '--no-commit', action='store_true', dest='nocommit',
help="don't commit after patch applies")
(options, args) = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
issue_arg = args[0]
# TODO(maruel): Use apply_issue.py
# TODO(ukai): use gerrit-cherry-pick for gerrit repository?
if issue_arg.isdigit():
# Input is an issue id. Figure out the URL.
cl = Changelist()
issue = int(issue_arg)
patchset = cl.GetMostRecentPatchset(issue)
patch_data = cl.GetPatchSetDiff(issue, patchset)
else:
# Assume it's a URL to the patch. Default to https.
issue_url = gclient_utils.UpgradeToHttps(issue_arg)
match = re.match(r'.*?/issue(\d+)_(\d+).diff', issue_url)
if not match:
DieWithError('Must pass an issue ID or full URL for '
'\'Download raw patch set\'')
issue = int(match.group(1))
patchset = int(match.group(2))
patch_data = urllib2.urlopen(issue_arg).read()
if options.newbranch:
if options.force:
RunGit(['branch', '-D', options.newbranch],
stderr=subprocess2.PIPE, error_ok=True)
RunGit(['checkout', '-b', options.newbranch,
Changelist().GetUpstreamBranch()])
# Switch up to the top-level directory, if necessary, in preparation for
# applying the patch.
top = RunGit(['rev-parse', '--show-cdup']).strip()
if top:
os.chdir(top)
# Git patches have a/ at the beginning of source paths. We strip that out
# with a sed script rather than the -p flag to patch so we can feed either
# Git or svn-style patches into the same apply command.
# re.sub() should be used but flags=re.MULTILINE is only in python 2.7.
try:
patch_data = subprocess2.check_output(
['sed', '-e', 's|^--- a/|--- |; s|^+++ b/|+++ |'], stdin=patch_data)
except subprocess2.CalledProcessError:
DieWithError('Git patch mungling failed.')
logging.info(patch_data)
# We use "git apply" to apply the patch instead of "patch" so that we can
# pick up file adds.
# The --index flag means: also insert into the index (so we catch adds).
cmd = ['git', 'apply', '--index', '-p0']
if options.reject:
cmd.append('--reject')
try:
subprocess2.check_call(cmd, stdin=patch_data, stdout=subprocess2.VOID)
except subprocess2.CalledProcessError:
DieWithError('Failed to apply the patch')
# If we had an issue, commit the current state and register the issue.
if not options.nocommit:
RunGit(['commit', '-m', 'patch from issue %s' % issue])
cl = Changelist()
cl.SetIssue(issue)
cl.SetPatchset(patchset)
print "Committed patch."
else:
print "Patch applied to index."
return 0
def CMDrebase(parser, args):
"""rebase current branch on top of svn repo"""
# Provide a wrapper for git svn rebase to help avoid accidental
# git svn dcommit.
# It's the only command that doesn't use parser at all since we just defer
# execution to git-svn.
return subprocess2.call(['git', 'svn', 'rebase'] + args)
def GetTreeStatus():
"""Fetches the tree status and returns either 'open', 'closed',
'unknown' or 'unset'."""
url = settings.GetTreeStatusUrl(error_ok=True)
if url:
status = urllib2.urlopen(url).read().lower()
if status.find('closed') != -1 or status == '0':
return 'closed'
elif status.find('open') != -1 or status == '1':
return 'open'
return 'unknown'
return 'unset'
def GetTreeStatusReason():
"""Fetches the tree status from a json url and returns the message
with the reason for the tree to be opened or closed."""
url = settings.GetTreeStatusUrl()
json_url = urlparse.urljoin(url, '/current?format=json')
connection = urllib2.urlopen(json_url)
status = json.loads(connection.read())
connection.close()
return status['message']
def CMDtree(parser, args):
"""show the status of the tree"""
_, args = parser.parse_args(args)
status = GetTreeStatus()
if 'unset' == status:
print 'You must configure your tree status URL by running "git cl config".'
return 2
print "The tree is %s" % status
print
print GetTreeStatusReason()
if status != 'open':
return 1
return 0
def CMDtry(parser, args):
"""Triggers a try job through Rietveld."""
group = optparse.OptionGroup(parser, "Try job options")
group.add_option(
"-b", "--bot", action="append",
help=("IMPORTANT: specify ONE builder per --bot flag. Use it multiple "
"times to specify multiple builders. ex: "
"'-bwin_rel:ui_tests,webkit_unit_tests -bwin_layout'. See "
"the try server waterfall for the builders name and the tests "
"available. Can also be used to specify gtest_filter, e.g. "
"-bwin_rel:base_unittests:ValuesTest.*Value"))
group.add_option(
"-r", "--revision",
help="Revision to use for the try job; default: the "
"revision will be determined by the try server; see "
"its waterfall for more info")
group.add_option(
"-c", "--clobber", action="store_true", default=False,
help="Force a clobber before building; e.g. don't do an "
"incremental build")
group.add_option(
"--project",
help="Override which project to use. Projects are defined "
"server-side to define what default bot set to use")
group.add_option(
"-t", "--testfilter", action="append", default=[],
help=("Apply a testfilter to all the selected builders. Unless the "
"builders configurations are similar, use multiple "
"--bot <builder>:<test> arguments."))
group.add_option(
"-n", "--name", help="Try job name; default to current branch name")
parser.add_option_group(group)
options, args = parser.parse_args(args)
if args:
parser.error('Unknown arguments: %s' % args)
cl = Changelist()
if not cl.GetIssue():
parser.error('Need to upload first')
if not options.name:
options.name = cl.GetBranch()
# Process --bot and --testfilter.
if not options.bot:
# Get try slaves from PRESUBMIT.py files if not specified.
change = cl.GetChange(cl.GetUpstreamBranch(), None)
options.bot = presubmit_support.DoGetTrySlaves(
change,
change.LocalPaths(),
settings.GetRoot(),
None,
None,
options.verbose,
sys.stdout)
if not options.bot:
parser.error('No default try builder to try, use --bot')
builders_and_tests = {}
for bot in options.bot:
if ':' in bot:
builder, tests = bot.split(':', 1)
builders_and_tests.setdefault(builder, []).extend(tests.split(','))
elif ',' in bot:
parser.error('Specify one bot per --bot flag')
else:
builders_and_tests.setdefault(bot, []).append('defaulttests')
if options.testfilter:
forced_tests = sum((t.split(',') for t in options.testfilter), [])
builders_and_tests = dict(
(b, forced_tests) for b, t in builders_and_tests.iteritems()
if t != ['compile'])
if any('triggered' in b for b in builders_and_tests):
print >> sys.stderr, (
'ERROR You are trying to send a job to a triggered bot. This type of'
' bot requires an\ninitial job from a parent (usually a builder). '
'Instead send your job to the parent.\n'
'Bot list: %s' % builders_and_tests)
return 1
patchset = cl.GetPatchset()
if not cl.GetPatchset():
patchset = cl.GetMostRecentPatchset(cl.GetIssue())
cl.RpcServer().trigger_try_jobs(
cl.GetIssue(), patchset, options.name, options.clobber, options.revision,
builders_and_tests)
print('Tried jobs on:')
length = max(len(builder) for builder in builders_and_tests)
for builder in sorted(builders_and_tests):
print ' %*s: %s' % (length, builder, ','.join(builders_and_tests[builder]))
return 0
@usage('[new upstream branch]')
def CMDupstream(parser, args):
"""prints or sets the name of the upstream branch, if any"""
_, args = parser.parse_args(args)
if len(args) > 1:
parser.error('Unrecognized args: %s' % ' '.join(args))
return 0
cl = Changelist()
if args:
# One arg means set upstream branch.
RunGit(['branch', '--set-upstream', cl.GetBranch(), args[0]])
cl = Changelist()
print "Upstream branch set to " + cl.GetUpstreamBranch()
else:
print cl.GetUpstreamBranch()
return 0
def CMDset_commit(parser, args):
"""set the commit bit"""
_, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
cl.SetFlag('commit', '1')
return 0
def Command(name):
return getattr(sys.modules[__name__], 'CMD' + name, None)
def CMDhelp(parser, args):
"""print list of commands or help for a specific command"""
_, args = parser.parse_args(args)
if len(args) == 1:
return main(args + ['--help'])
parser.print_help()
return 0
def GenUsage(parser, command):
"""Modify an OptParse object with the function's documentation."""
obj = Command(command)
more = getattr(obj, 'usage_more', '')
if command == 'help':
command = '<command>'
else:
# OptParser.description prefer nicely non-formatted strings.
parser.description = re.sub('[\r\n ]{2,}', ' ', obj.__doc__)
parser.set_usage('usage: %%prog %s [options] %s' % (command, more))
def main(argv):
"""Doesn't parse the arguments here, just find the right subcommand to
execute."""
if sys.hexversion < 0x02060000:
print >> sys.stderr, (
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0])
return 2
# Reload settings.
global settings
settings = Settings()
# Do it late so all commands are listed.
CMDhelp.usage_more = ('\n\nCommands are:\n' + '\n'.join([
' %-10s %s' % (fn[3:], Command(fn[3:]).__doc__.split('\n')[0].strip())
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD')]))
# Create the option parse and add --verbose support.
parser = optparse.OptionParser()
parser.add_option(
'-v', '--verbose', action='count', default=0,
help='Use 2 times for more debugging info')
old_parser_args = parser.parse_args
def Parse(args):
options, args = old_parser_args(args)
if options.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
return options, args
parser.parse_args = Parse
if argv:
command = Command(argv[0])
if command:
# "fix" the usage and the description now that we know the subcommand.
GenUsage(parser, argv[0])
try:
return command(parser, argv[1:])
except urllib2.HTTPError, e:
if e.code != 500:
raise
DieWithError(
('AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e)))
# Not a known command. Default to help.
GenUsage(parser, 'help')
return CMDhelp(parser, argv)
if __name__ == '__main__':
fix_encoding.fix_encoding()
sys.exit(main(sys.argv[1:]))
| DarrelHsu/cvsClient | git_cl.py | Python | bsd-3-clause | 64,418 | [
"VisIt"
] | 6a74b8fb420e9fffd9f38128b389eee2b8f5f9eb9b2dcd5d945bc1339c17c7bf |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to convert database structured txt-files into netCDF format.
For usage from command line see
python convert_db2nc.py -h
Requirements:
- numpy
- netCDF4
- argparse
Converter class: DB2NC
Check out current version at
https://github.com/Chilipp/Convert-Database-to-NetCDF
"""
import os
import pickle
from collections import OrderedDict
from argparse import ArgumentParser
from netCDF4 import Dataset
from numpy import transpose, loadtxt, unique, shape, zeros, array, roll, \
dstack, ones, set_printoptions, get_printoptions
from numpy import all as npall
from numpy import any as npany
from numpy.ma import masked_where, filled
from datetime import datetime, timedelta
from itertools import izip, chain, product, imap
import sys
__author__ = "Philipp Sommer (philipp.sommer@studium.uni-hamburg.de)"
__version__ = "1.0"
missval = -9999
# dictionary containing documentation for keyword arguments
# (used for the argument parser and in the methods of DB2NC class)
kwargdocs = {'init': OrderedDict(( # init specific options
('ifile', """
Path to the text file containing the information in columns which
shall be converted into NetCDF."""),
('gridcols', """
List of integers representing the columns with the
grid info of what shall be considered in inputfile. The sorting and
number of columns must correspond to the variables for the
mask-file."""),
('defaultflags', """
List of integers representing the columns with the
flags of what shall be considered in inputfile and do not belong to
concatenation columns (see cat option)."""),
('mask', """
Netcdf file containing grid definition masks stored
in the specified variable names, separated by comma. The sorting and
number of variables must correspond to the gridcols."""),
('alias', """
Use aliases for the items in col (for gridcols only) as specified in
file. The given file must contain two columns: the first one consists
of the unique items from ifile, the second one of their aliases."""),
('noheader', """
If set, first rows of all text files is not omitted"""),
('cat', """
Create variables in the NetCDF file concatenated from the given
column pairs. For each argument col1,col2 the variables in the
NetCDF-file will be like flag1_flag2 where flag1 is out of the items
of the col1-th column in inputfile and flag2 out of the col2-th column
in inputfile"""),
('sort', """
Sort items in column col as specified in file. File must contain two
columns: the first columns consists the unique flag names of the
inputfile, the second column consists of the final names for which to
sort (see also option -redistribute)."""),
('redistribute', """
Sort item flag in column col as specified in file by its fraction in
each gridcell. File must contain 2-dimensional fields stored in the
given varnames representing the fraction of the flag in the given
itemname for each gridcell."""),
('valcol', """
Column containing the value in inputfile which will be added up.
Default: last, i.e. -1"""),
('time', """
Columns with time information and format, separated by comma.""")
))}
kwargdocs['convert'] = OrderedDict(( # convert specific options
('verbose', """
If set, use verbose"""),
('maxrows', """
Number of rows to loop through in ifiledata""")
))
kwargdocs['output'] = OrderedDict((
('output', """
Output filename."""),
('header', """
Dictionary containing header information (e.g. {'title': 'test'})"""),
('metadata', """
Dictionary containing metadata information. Keys are flagname, values are
dictionaries with items as (attribute, value). One example for variable
'tair': {'tair': {'long_name': 'Air Temperature',
'units': 'K'}}"""),
('clobber', """
Enable clobber (will significantly reduce file size). Input must be 'auto'
or a list of the chunking parameters (the first one corresponds to time,
the others to the dimension as stored in the netCDF file (usually the
second corresponds to lat, the third to lon).
If 'auto' chunking parameters are deterimined such that 1D and 2D access
are balanced. The calculation function is taken from
http://www.unidata.ucar.edu/staff/russ/public/chunk_shape_3D.py"""),
('compression', """
Dictionary with compression parameters for netCDF4 variable (determined by
netCDF4 package. Possible keywords are zlib, complevel, shuffle and
least_significant_digit. For documentation see
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4.Variable-class.html
If compression is not a dictionary, the value will be used for the zlib
keyword in netCDF4 variables.""")
))
def loadpickle(string):
"""Function to load a dictionary with pickle.load(f), where f is the file
handler of the 'string')"""
with open(string) as f:
val = pickle.load(f)
return val
def determine_chunk(string):
if string == 'auto':
return string
else:
return map(int, string.split(','))
class MyParser(ArgumentParser):
"""Subclass of ArgumentParser modified convert_arg_line_to_args method
to read in multiple arguments from one line in an input file and to
enable commands."""
def convert_arg_line_to_args(self, arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
if arg[0] == '#':
break
yield arg
convert_arg_line_to_args.__doc__ = \
ArgumentParser.convert_arg_line_to_args.__doc__
parser = MyParser(
description="%s\nAuthor: %s\nVersion: %s" % (__doc__, __author__,
__version__),
usage='%(prog)s [options]',
fromfile_prefix_chars="@")
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument(
'ifile', help=kwargdocs['init']['ifile'],
metavar='<<<inputfile>>>')
parser.add_argument(
'mask',
help=kwargdocs['init']['mask'],
metavar='<<<mask-file>>>,<<<var1>>>,<<<var2>>>,...')
parser.add_argument('-v', '--verbose', help=kwargdocs['convert']['verbose'],
action='store_true')
parser.add_argument(
'-info',
help="""
Same as verbose option (-v) but only makes the initialization, print
information on the final output and exit.""",
action='store_true')
parser.add_argument(
'-noheader',
help=kwargdocs['init']['noheader'],
action='store_true')
# spatial reference columns
_colgrp = parser.add_argument_group(
'Column numbers',
"""Set up how the column shall be used.""")
_colgrp.add_argument(
'-gridcols',
help=kwargdocs['init']['gridcols'],
metavar='<<<gridcol>>>', required=True, type=int, nargs='+')
# flag columns
_colgrp.add_argument(
'-flagcols',
help=kwargdocs['init']['defaultflags'],
metavar='<<<flagcol>>>', type=int, default=[], nargs='+',
dest='defaultflags')
_colgrp.add_argument(
'-cat',
help=kwargdocs['init']['cat'],
nargs='+', dest='cat', metavar='<<<col1>>>,<<<col2>>>', default=[])
# value column
_colgrp.add_argument(
'-valcol',
help='Default: %(default)s. ' + kwargdocs['init']['valcol'],
default=-1, type=int)
# time column
_colgrp.add_argument(
'-t', '--time',
help=kwargdocs['init']['time'],
metavar="<<<col>>>,<<<format>>>", nargs='+', default=None)
_colhandlegrp = parser.add_argument_group(
'Special treatment of columns',
"""Set up if columns shall be treated in a special manner.""")
_colhandlegrp.add_argument(
'-alias',
help=kwargdocs['init']['alias'],
metavar='<<<col>>>,<<<file>>>', nargs='+', default=[])
_colhandlegrp.add_argument(
'-sort',
help=kwargdocs['init']['sort'],
metavar='<<<col>>>,<<<file>>>', nargs='+', default=[])
_colhandlegrp.add_argument(
'-rd', '--redistribute',
help=kwargdocs['init']['redistribute'],
metavar=(
'<<<col>>>,<<<flag>>>,<<<file>>>,<<<item1>>>,<<<var1>>>'
'[,<<<item2>>>,<< <var2>>>,...]'),
nargs='+', default=[])
# misc
_miscgrp = parser.add_argument_group('Miscellaneous Output Options')
_miscgrp.add_argument(
'-o', '--output',
help=' Default: %(default)s. ' + kwargdocs['output']['output'],
default='landuse.nc', metavar='<<<file name>>>')
_miscgrp.add_argument(
'-c', '--clobber',
help="Default: %(default)s, without arguments: %(const)s. " +
kwargdocs['output']['clobber'],
nargs='?', default=False, const='auto',
type=determine_chunk,
metavar='chunk_time,chunk_lon,chunk_lat')
_miscgrp.add_argument(
'-z', '--compression', nargs='?', default=False, const=True,
type=lambda x: {'zlib': True, 'complevel': int(x)},
help="""
Enable compression. Without arguments, simply set zlib=True in netCDF
variables (i.e. compression level is 4).
Arguments may be integer between 1 and 9 determining the compression
level.""",
metavar='<<<complevel>>>')
_miscgrp.add_argument(
'-meta', '--metadata', default={},
help="""
Pickle file (i.e. dictionary stored as pickle file) containing metadata
information. Keys are flagname, values are dictionaries with items as
(attribute, value). One example for variable 'tair':
{'tair': {'long_name': 'Air Temperature',
'units': 'K'}}""",
type=loadpickle,
metavar='<<<pickle file>>>')
_miscgrp.add_argument(
'-header', default={},
help="""
Pickle file (i.e. dictionary stored as pickle file) containing header
information for NetCDF file. For example: {'title': 'test'}""",
type=loadpickle,
metavar='<<<pickle file>>>')
_miscgrp.add_argument(
'-rows', '--maxrows', help=kwargdocs['convert']['maxrows'], type=int)
def main(*args):
t = datetime.now()
if args != ():
opt = parser.parse_args(args)
else:
opt = parser.parse_args()
opt, initkwargs, loopkwargs, outputkwargs = set_options(*args)
converter = DB2NC(**initkwargs)
if opt.info: # if test option enabled, quit
converter.info()
return converter
elif opt.verbose:
converter.info()
converter.convert(**loopkwargs)
converter.output_nc(**outputkwargs)
print(datetime.now()-t)
return converter
def set_options(*args):
"""Function to set up the keyword arguments for the methods of DB2NC class
from the Argument Parser.
Arguments will be passed to the parser.
Return:
- initkwargs: Dictionary with keyword arguments for __init__ method of
DB2NC instance
- loopkwargs: Dictionary with keyword arguments for convert method of
DB2NC instance
- outputkwargs: Dictionary with keyword arguments for output_nc method
of DB2NC instance
"""
if args != ():
opt = parser.parse_args(args)
else:
opt = parser.parse_args()
initkwargs = {key: val for key, val in vars(opt).items()
if key in kwargdocs['init']}
loopkwargs = {key: val for key, val in vars(opt).items()
if key in kwargdocs['convert']}
outputkwargs = {key: val for key, val in vars(opt).items()
if key in kwargdocs['output']}
return opt, initkwargs, loopkwargs, outputkwargs
class Redistributer(object):
"""class which stores in data attribute how to redistribute the data"""
__slots__ = ('col', 'flag', 'data', 'ncfile', 'variables')
def __init__(self, sortitem):
splitteditem = sortitem.split(',')
# col attribute: Column in ifile which includes the flag to
# redistribute
self.col = int(splitteditem[0])
# flag attribute: Name of the flag which shall be redistributed
self.flag = splitteditem[1]
self.ncfile = splitteditem[2] # NetCDF file name
self.variables = { # variables and their counterpart in ncfile
aliasflag: var for aliasflag, var in
izip(splitteditem[3::2], splitteditem[4::2])}
# --- read redistribution data
nco = Dataset(splitteditem[2])
# data attribute: Dictionary with target flag as keys and
# fractions for the redistribution as values
self.data = {
aliasflag: nco.variables[var][0, :] for aliasflag, var in
izip(splitteditem[3::2], splitteditem[4::2])}
nco.close()
def info(self):
print('Redistribute %s in column %i with variables from %s' % (
self.flag, self.col, self.ncfile))
print(' Variables in NetCDF file correspond to %s' % (
', '.join('%s (%s)' % item for item in self.variables.items())))
class Adder(object):
"""Class determining how the data shall be added and to where"""
def __init__(self, target, mulc=None):
"""
Input:
- target: Array where the data of addfunc will be added
- mulc: multiplication number (if given, use redist method (for
redistribution data), else use addnormal)"""
self.target = target
if mulc is not None:
self.mulc = mulc
self.addfunc = self.redist
else:
self.addfunc = self.addnormal
def redist(self, itime, fields, val):
"""method for redistribution"""
self.target[itime, :][fields] += val*self.mulc[fields]
def addnormal(self, itime, fields, val):
"""method for normal add"""
self.target[itime, :][fields] += val
class LoopInfo(object):
"""Class giving information about the loop"""
__slots__ = ('total', 'counter', 't0', 'info')
def __init__(self, total, t0, verbose):
"""
Input:
- total: Integer. Total lenght of loop
- t0: datetime object of initial time
- verbose: If True, will print update to stdout, else do nothing
"""
self.counter = 0
self.total = total
self.t0 = t0
if verbose:
self.info = self._verboseprint
else:
self.info = self._donothing
def _donothing(self):
pass
def _verboseprint(self):
"""Function which will be called if verbose is set at initialization"""
if not self.counter % 100:
if self.counter == 0:
sys.stdout.write(
"\rProcessed %i of %i." % (self.counter, self.total))
sys.stdout.flush()
else:
sys.stdout.write(
("\rProcessed %i of %i. "
"Remaining time: %4.1f minutes.") % (
self.counter, self.total,
# expected time
(datetime.now() - self.t0).total_seconds() *
self.total / (60. * self.counter) -
# time already passed
(datetime.now() - self.t0).total_seconds()/60.
))
sys.stdout.flush()
self.counter += 1
class Data(object):
# container for large data arrays
__slots__ = ['ifiledata', 'finaldata', 'maskdata', 'lat', 'lon']
def __init__(self):
pass
class DB2NC(object):
"""Class to convert database structured data to netCDF file"""
def __init__(self, ifile, mask, gridcols, defaultflags=[], alias=[],
noheader=False, cat=[], sort=[], redistribute=[],
weights=None, valcol=-1, time=None):
"""Initialization function for DB2NC class"""
# docstring is extended below
self.data = Data() # data container
if not noheader:
kwargs = {'skiprows': 1}
else:
kwargs = {}
# set up column data
self._set_cols(gridcols=gridcols, time=time,
defaultflags=defaultflags, cat=cat,
valcol=valcol)
# read data from file
self._read_data(ifile=ifile, **kwargs)
# read grid data
self._read_grid(mask=mask)
# read data for columns with alias
self.read_alias(alias=alias)
# set up time data
self._set_up_timedata()
# --- handling flags ---
self._read_sorting(sort, **kwargs)
self._read_redist(redistribute)
# set up variable names and initialize data
self._set_names()
# set up how data shall be added to data array
self._set_add()
def info(self):
"""Print information about the DB2NC instance"""
# store current numpy print options
printops = get_printoptions()
# limit size of printed numpy arrays
set_printoptions(threshold=5, edgeitems=3)
# input file name
print('Input file: ' + self.ifile)
# mask file name
print('Mask file with grid informations: ' + self.maskfile)
# value column
print('Column containing value: %i' % self.valcol)
# grid columns
print('Columns containing spatial information: %s' % (
', '.join(map(str, self.gridcols))))
# flag columns
print('Columns with flag definitions: %s' % (
', '.join(map(str, self.flagcols))))
# concatenation columns
print('Columns that shall be concatenated: %s' % (
', '.join(map(lambda x: ' and '.join(imap(str, x)),
self.catcolpairs))))
# time column
print('Columns with time information: ' + (
', '.join(map(str, self.timecols))))
# number of time steps
print('Number of timesteps found: %i' % self.ntimes)
# time information
print('Time data:\n%s' % (array(
map(datetime.isoformat, self.timedata))))
# original flags
for col, value in self.origuniqueflags.items():
print('Original flags in column %i:\n%s' % (col, ', '.join(value)))
# resorting option
for col, value in self.sortdict.items():
print('Sort options in column %i:\n%s' % (
col, ', '.join(
'%s --> %s' % item for item in value.items())))
# redistribution option
for rd in self.redistdata:
rd.info()
# final names
print('---------------------------------------------')
print('Final names in NetCDF file:\n' +
', '.join(self.finalnames))
# restore numpy print options
set_printoptions(**printops)
def _set_cols(self, gridcols, time, defaultflags, cat, valcol):
"""function to set up column arrays.
This function is called at initialization
"""
self.defaultflagcols = defaultflags
self.gridcols = gridcols
self.valcol = valcol
# set up columns which shall be concatenated
self.catcolpairs = list(map(int, catcol.split(',')) for catcol in cat)
self.catcols = list(chain(*self.catcolpairs))
# columns which contain the flags
self.flagcols = defaultflags+self.catcols
# handling time
if time is not None:
# convert to dictionary
self.time = {int(t): fmt for t, fmt in imap(
lambda x: x.split(','), time)}
self.timecols = sorted(self.time.keys())
self.timefunc = self.gettimeindex
else:
self.time = {}
self.timecols = []
self.timefunc = self.dummytimeindex
# all columns which shall be read from ifile
self.usecols = sorted(gridcols + self.timecols + self.flagcols +
[valcol])
def _read_data(self, ifile, **kwargs):
"""function to read data from text input file during initialization"""
self.ifile = ifile
self.data.ifiledata = loadtxt(ifile, dtype=str, delimiter='\t',
usecols=self.usecols, unpack=True,
**kwargs)
def _read_grid(self, mask):
"""function to read in grid data from netCDF files during
initialization"""
# read mask data from mask file
self.maskfile = mask.split(',')[0]
with Dataset(mask.split(',')[0]) as nco:
data = [
nco.variables[varname][0, :] for varname in
mask.split(',')[1:]]
# convert masked arrays to normal arrays (much faster in loop)
for idata in xrange(len(data)):
if hasattr(data[idata], 'mask'):
data[idata] = data[idata].filled(missval)
self.data.maskdata = data
self.data.lon = nco.variables['lon'][:]
self.data.lat = nco.variables['lat'][:]
def read_alias(self, alias):
# read alias grid data file. aliasdata is a list of numpy.ndarrays with
# shape (2,n) where n is the number of the flags in the aliasfile.
# Aliasdict is a dictionary with colums as keys and the converted
# aliasdata
self.aliasdict = {
int(aliasitem.split(',')[0]): {
key: val.astype(self.data.maskdata[self.gridcols.index(int(
aliasitem.split(',')[0]))].dtype)
for key, val in roll(
loadtxt(aliasitem.split(',')[1], dtype=str, delimiter='\t',
usecols=[0, 1]), 1, axis=1)
}
for aliasitem in alias}
def _set_up_timedata(self):
# handling time
if self.time != {}:
self.timedata = unique(map(
self.converttime,
izip(*(self.data.ifiledata[self.usecols.index(tcol)]
for tcol in sorted(self.time.keys())))
))
self.ntimes = len(self.timedata)
else:
self.timedata = array([datetime.now()])
self.ntimes = 1
def _read_sorting(self, sort, **kwargs):
"""function called at initialization to read sorting data from txt
file"""
# read 1d sorting data. sortdict is a dictionary with the column as key
# for dictionaries with, again the flag as key and the alias as value
self.sortdict = {
int(sortitem.split(',')[0]): {
flag: aliasflag for flag, aliasflag in
loadtxt(sortitem.split(',')[1], dtype=str, delimiter='\t',
**kwargs)
}
for sortitem in sort}
def _read_redist(self, redistribute):
# read redistribution data. redistict is a dictionary which contains
# the column as keys for dictionaries with, againg the keys 'flag'
# for the name of the flag and a key 'data' for a dictionary which
# contains the aliasflag as key and the 2-dimensional fraction data as
# value
self.redistdata = [Redistributer(sortitem) for sortitem in
redistribute]
self.redistcols = unique(
[sortitem.col for sortitem in self.redistdata]).tolist()
self.redistflags = [sortitem.flag for sortitem in self.redistdata]
def _set_names(self):
"""Method to set up final names for the variables and initializes
final data arrays"""
# get original flags
self.origuniqueflags = {
col: [flag for flag in
unique(self.data.ifiledata[self.usecols.index(col)])
if flag not in self.redistflags]
for col in self.flagcols}
# set up unique flags including sorted flags
self.uniqueflags = {
col: [flag for flag in
unique(self.data.ifiledata[self.usecols.index(col)])
if flag not in self.redistflags]
for col in self.flagcols}
self.uniqueflags.update({
col: unique([self.sortdict[col][flag]
for flag in self.sortdict[col]]).tolist()
for col in self.sortdict})
# set up final names
namesfromdefault = list(chain(*(chain(
data for col, data in self.uniqueflags.items() if col not in
self.catcols))))
namesfromcatcols = list(chain(*(chain(
flag for flag in map(
lambda x: '_'.join(k for k in x),
product(self.uniqueflags[col1], self.uniqueflags[col2])))
for col1, col2 in self.catcolpairs)))
self.finalnames = namesfromdefault + namesfromcatcols
self.data.finaldata = {var: zeros([self.ntimes] +
list(shape(self.data.maskdata[0])))
for var in self.finalnames}
# we don't use masked arrays because much slower in the loop
mask = self.data.maskdata[0] == missval
for value in self.data.finaldata.values():
for i in xrange(self.ntimes):
value[i, :][mask] = missval
def _set_add(self):
"""Method called during initialization to set up how data shall be
added"""
# dictionary containing the Adder instances for defaultcols which
# determine where to add the value in finaldata for the given flag
self.defaultadddict = {
col: {flag: [
Adder(
target=self.data.finaldata[self.sortdict.get(
col, {flag: flag}).get(flag, flag)])
] for flag in self.sortdict.get(col, self.uniqueflags[col])}
for col in self.defaultflagcols}
# dictionary containing the Adder instances for catcols which
# determine where to add the value in finaldata for the given flag
catadddict = {
(col1, col2): {
flagpair: [
Adder(self.data.finaldata['_'.join(self.sortdict.get(
[col1, col2][flagpair.index(flag)], {flag: flag})[flag]
for flag in flagpair)])]
for flagpair in product(self.origuniqueflags[col1],
self.origuniqueflags[col2])}
for col1, col2 in self.catcolpairs}
# now handle redistributed data
for sortitem in self.redistdata:
if sortitem.col in self.defaultflagcols:
self.defaultadddict[sortitem.col][sortitem.flag] = [
Adder(target=self.data.finaldata[aliasflag],
mulc=sortitem.data[aliasflag])
for aliasflag in sortitem.data]
elif sortitem.col in self.catcols:
catcol = tuple(self.catcolpairs[sortitem.col in
self.catcolpairs])
sortitems = [
sortitem2 for sortitem2 in self.redistdata if sortitem2.col
in catcol and sortitem2 != sortitem]
flags = []
flags.insert(catcol.index(sortitem.col), [sortitem.flag])
for col in catcol:
if col != sortitem.col and col not in self.sortdict.keys():
flags.insert(catcol.index(col), self.uniqueflags[col])
if col != sortitem.col and col in self.sortdict:
flags.insert(
catcol.index(col), unique(self.data.ifiledata[
self.usecols.index(col)]).tolist())
try:
flags.remove([])
except ValueError:
pass
for flagpair in product(*flags):
catadddict[catcol][flagpair] = [0]*len(
sortitem.data.keys())
for i, replaceflag in izip(
xrange(len(sortitem.data.keys())),
sorted(sortitem.data.keys())):
newflagpair = list(flagpair)
for flag in newflagpair:
if flag == sortitem.flag:
newflagpair[newflagpair.index(flag)] = \
replaceflag
elif catcol[flagpair.index(flag)] in self.sortdict:
newflagpair[newflagpair.index(flag)] = \
self.sortdict[
catcol[flagpair.index(flag)]][flag]
catadddict[catcol][flagpair][i] = Adder(
target=self.data.finaldata['_'.join(
flag for flag in newflagpair)],
mulc=sortitem.data[replaceflag])
self.catadddict = catadddict
def convert(self, verbose=False, maxrows=None):
"""Method to loop through the data and convert it"""
# docstring is extended below
dataslice = slice(0, maxrows)
info = LoopInfo(total=len(self.data.ifiledata[0, dataslice]),
t0=datetime.now(), verbose=verbose)
wrongvalues = 0
wrongarea = 0
for datatuple in izip(*self.data.ifiledata[:, dataslice]):
info.info()
fields = npall(
# normal gridcols
[self.data.maskdata[self.gridcols.index(col)] == datatuple[
self.usecols.index(col)].astype(
self.data.maskdata[self.gridcols.index(col)].dtype)
for col in self.gridcols if col not in self.aliasdict] +
# alias cols
[self.data.maskdata[self.gridcols.index(col)] ==
self.aliasdict[col][datatuple[
self.usecols.index(col)]] for col in self.aliasdict],
axis=0)
itime = self.timefunc([datatuple[self.usecols.index(col)]
for col in sorted(self.time.keys())])
for catcol in self.catadddict:
for adderinstance in self.catadddict[catcol][tuple(datatuple[
self.usecols.index(col)] for col in catcol)]:
adderinstance.addfunc(
itime, fields, datatuple[
self.usecols.index(self.valcol)].astype(float))
for col in self.defaultadddict:
for adderinstance in self.defaultadddict[col][datatuple[
self.usecols.index(col)]]:
adderinstance.addfunc(
itime, fields,
datatuple[
self.usecols.index(self.valcol)].astype(float))
if not npany(fields):
wrongvalues += 1
wrongarea += float(datatuple[self.usecols.index(self.valcol)])
if verbose:
print('\nNumber of wrong values: %i' % wrongvalues)
print('Missed Area [ha]: %6.4f' % wrongarea)
print('Missed Area: %1.3e %%' % (
wrongarea/sum(self.data.ifiledata[
self.usecols.index(self.valcol)].astype(float))*100.))
def output_nc(self, output, clobber=False, header=None, metadata={},
compression={}):
"""Method to create netCDF file out of final data
"""
# docstring is extended below
# set chunking parameter
if os.path.exists(output):
os.remove(output)
if clobber is not False:
if clobber == 'auto':
clobber = chunk_shape_3D(
[self.ntimes] + list(self.data.maskdata[0].shape))
nco = Dataset(output, 'w', format='NETCDF4_CLASSIC',
clobber=True)
else:
nco = Dataset(output, 'w', format='NETCDF4_CLASSIC')
if not isinstance(compression, dict):
compression = {'zlib': compression}
if header is not None:
nco.setncatts(header)
nco.createDimension('time', None)
nco.createDimension('lon', len(self.data.lon))
nco.createDimension('lat', len(self.data.lat))
timeo = nco.createVariable('time', 'f8', ('time'))
timeo.standard_name = 'time'
secondsperday = float(60*60*24)
mystrftime = lambda x: (
float(x.strftime('%Y%m%d')) +
timedelta(hours=x.hour, minutes=x.minute, seconds=x.second,
microseconds=x.microsecond).seconds/secondsperday)
timeo[:] = map(mystrftime, self.timedata)
timeo.units = 'day as %Y%m%d.%f'
lono = nco.createVariable("lon", "f4", ("lon"))
lono.units = "degrees_east"
lono.standard_name = "longitude"
lono[:] = self.data.lon
lato = nco.createVariable("lat", "f4", ("lat"))
lato.units = "degrees_north"
lato.standard_name = "latitude"
lato[:] = self.data.lat
for var, value in self.data.finaldata.items():
if clobber is not False:
varno = nco.createVariable(
var, "f4", ("time", "lat", "lon"),
chunksizes=clobber, fill_value=missval, **compression
)
else:
varno = nco.createVariable(
var, "f4", ("time", "lat", "lon"),
fill_value=missval, **compression
)
for attr, val in metadata.get(var, {}).items():
setattr(varno, attr, val)
varno.standard_name = var
varno[:] = value
nco.close()
def gettimeindex(self, data):
return self.timedata.searchsorted(self.converttime(data))
def dummytimeindex(self, data):
# function which just returns the index 0
return 0
def converttime(self, data):
# function to convert a data tuple into datetime instance
return datetime.strptime(
','.join(data),
','.join([val for key, val in sorted(self.time.items())]))
# ---- modify docstrings here ----
__init__.__doc__ += '\nKeyword Arguments:\n - ' + '\n - '.join(
['%s: %s' % (key, val) for key, val in kwargdocs['init'].items()])
convert.__doc__ += '\nKeyword Arguments:\n - ' + '\n - '.join(
['%s: %s' % (key, val) for key, val in kwargdocs['convert'].items()])
output_nc.__doc__ += '\nKeyword Arguments:\n - ' + '\n - '.join(
['%s: %s' % (key, val) for key, val in kwargdocs['output'].items()])
# ---- automatic determination of chunking parameters -----
# functions taken from
# http://www.unidata.ucar.edu/staff/russ/public/chunk_shape_3D.py
# see also
"""
http://www.unidata.ucar.edu/blogs/developer/entry/chunking_data_choosing_shapes
"""
import math
import operator
def binlist(n, width=0):
"""Return list of bits that represent a non-negative integer.
n -- non-negative integer
width -- number of bits in returned zero-filled list (default 0)
"""
return map(int, list(bin(n)[2:].zfill(width)))
def numVals(shape):
"""Return number of values in chunk of specified shape, given by a list of
dimension lengths.
shape -- list of variable dimension sizes"""
if(len(shape) == 0):
return 1
return reduce(operator.mul, shape)
def perturbShape(shape, onbits):
"""Return shape perturbed by adding 1 to elements corresponding to 1 bits
in onbits
shape -- list of variable dimension sizes
onbits -- non-negative integer less than 2**len(shape)
"""
return map(sum, zip(shape, binlist(onbits, len(shape))))
def chunk_shape_3D(varShape, valSize=4, chunkSize=4096):
"""
Return a 'good shape' for a 3D variable, assuming balanced 1D, 2D access
varShape -- length 3 list of variable dimension sizes
chunkSize -- maximum chunksize desired, in bytes (default 4096)
valSize -- size of each data value, in bytes (default 4)
Returns integer chunk lengths of a chunk shape that provides
balanced access of 1D subsets and 2D subsets of a netCDF or HDF5
variable var with shape (T, X, Y), where the 1D subsets are of the
form var[:,x,y] and the 2D slices are of the form var[t,:,:],
typically 1D time series and 2D spatial slices. 'Good shape' for
chunks means that the number of chunks accessed to read either
kind of 1D or 2D subset is approximately equal, and the size of
each chunk (uncompressed) is no more than chunkSize, which is
often a disk block size.
"""
rank = 3 # this is a special case of n-dimensional function chunk_shape
# ideal number of values in a chunk
chunkVals = chunkSize / float(valSize)
# ideal number of chunks
numChunks = varShape[0]*varShape[1]*varShape[2] / chunkVals
axisChunks = numChunks ** 0.25 # ideal number of chunks along each 2D axis
cFloor = [] # will be first estimate of good chunk shape
# cFloor = [varShape[0] // axisChunks**2, varShape[1] // axisChunks,
# varShape[2] // axisChunks]
# except that each chunk shape dimension must be at least 1
# chunkDim = max(1.0, varShape[0] // axisChunks**2)
if varShape[0] / axisChunks**2 < 1.0:
chunkDim = 1.0
axisChunks = axisChunks / math.sqrt(varShape[0]/axisChunks**2)
else:
chunkDim = varShape[0] // axisChunks**2
cFloor.append(chunkDim)
# factor to increase other dims if some must be increased to 1.0
prod = 1.0
for i in range(1, rank):
if varShape[i] / axisChunks < 1.0:
prod *= axisChunks / varShape[i]
for i in range(1, rank):
if varShape[i] / axisChunks < 1.0:
chunkDim = 1.0
else:
chunkDim = (prod*varShape[i]) // axisChunks
cFloor.append(chunkDim)
# cFloor is typically too small, (numVals(cFloor) < chunkSize)
# Adding 1 to each shape dim results in chunks that are too large,
# (numVals(cCeil) > chunkSize). Want to just add 1 to some of the
# axes to get as close as possible to chunkSize without exceeding
# it. Here we use brute force, compute numVals(cCand) for all
# 2**rank candidates and return the one closest to chunkSize
# without exceeding it.
bestChunkSize = 0
cBest = cFloor
for i in range(8):
# cCand = map(sum,zip(cFloor, binlist(i, rank)))
cCand = perturbShape(cFloor, i)
thisChunkSize = valSize * numVals(cCand)
if bestChunkSize < thisChunkSize <= chunkSize:
bestChunkSize = thisChunkSize
cBest = list(cCand) # make a copy of best candidate so far
return map(int, cBest)
if __name__ == '__main__':
main()
| Chilipp/Convert-Database-to-NetCDF | convert_db2nc.py | Python | gpl-2.0 | 38,805 | [
"NetCDF"
] | 73caad4c30fa636538d866fec1dbf3751796dea5e352def28f9e0ca9d0b73358 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import re
from jinja2.compiler import generate
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import text_type
from ansible.module_utils._text import to_native
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
from ansible.template.safe_eval import safe_eval
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
class Conditional:
'''
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
'''
_when = FieldAttribute(isa='list', default=[])
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super(Conditional, self).__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [ value ])
def _get_attr_when(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
when = self._attributes['when']
if when is None:
when = []
if hasattr(self, '_get_parent_attribute'):
when = self._get_parent_attribute('when', extend=True, prepend=True)
return when
def extract_defined_undefined(self, conditional):
results = []
cond = conditional
m = DEFINED_REGEX.search(cond)
while m:
results.append(m.groups())
cond = cond[m.end():]
m = DEFINED_REGEX.search(cond)
return results
def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
# since this is a mix-in, it may not have an underlying datastructure
# associated with it, so we pull it out now in case we need it for
# error reporting below
ds = None
if hasattr(self, '_ds'):
ds = getattr(self, '_ds')
try:
# this allows for direct boolean assignments to conditionals "when: False"
if isinstance(self.when, bool):
return self.when
for conditional in self.when:
if not self._check_conditional(conditional, templar, all_vars):
return False
except Exception as e:
raise AnsibleError(
"The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds
)
return True
def _check_conditional(self, conditional, templar, all_vars):
'''
This method does the low-level evaluation of each conditional
set on this object, using jinja2 to wrap the conditionals for
evaluation.
'''
original = conditional
if conditional is None or conditional == '':
return True
# pull the "bare" var out, which allows for nested conditionals
# and things like:
# - assert:
# that:
# - item
# with_items:
# - 1 == 1
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
conditional = all_vars[conditional]
if templar._clean_data(conditional) != conditional:
display.warning('when statements should not include jinja2 '
'templating delimiters such as {{ }} or {%% %%}. '
'Found: %s' % conditional)
# make sure the templar is using the variables specified with this method
templar.set_available_variables(variables=all_vars)
try:
# if the conditional is "unsafe", disable lookups
disable_lookups = hasattr(conditional, '__UNSAFE__')
conditional = templar.template(conditional, disable_lookups=disable_lookups)
if not isinstance(conditional, text_type) or conditional == "":
return conditional
# update the lookups flag, as the string returned above may now be unsafe
# and we don't want future templating calls to do unsafe things
disable_lookups |= hasattr(conditional, '__UNSAFE__')
# First, we do some low-level jinja2 parsing involving the AST format of the
# statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False, inside_yield=False):
if isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Yield):
inside_yield = True
elif isinstance(node, ast.Str):
if disable_lookups:
if inside_call and node.s.startswith("__"):
# calling things with a dunder is generally bad at this point...
raise AnsibleError(
"Invalid access found in the conditional: '%s'" % conditional
)
elif inside_yield:
# we're inside a yield, so recursively parse and traverse the AST
# of the result to catch forbidden syntax from executing
parsed = ast.parse(node.s, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(
child_node,
inside_call=inside_call,
inside_yield=inside_yield
)
try:
e = templar.environment.overlay()
e.filters.update(templar._get_filters())
e.tests.update(templar._get_tests())
res = e._parse(conditional, None, None)
res = generate(res, e, None, None)
parsed = ast.parse(res, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
except Exception as e:
raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
# and finally we generate and template the presented string and look at the resulting string
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
val = templar.template(presented, disable_lookups=disable_lookups).strip()
if val == "True":
return True
elif val == "False":
return False
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
except (AnsibleUndefinedVariable, UndefinedError) as e:
# the templating failed, meaning most likely a variable was undefined. If we happened
# to be looking for an undefined variable, return True, otherwise fail
try:
# first we extract the variable name from the error message
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
# next we extract all defined/undefined tests from the conditional string
def_undef = self.extract_defined_undefined(conditional)
# then we loop through these, comparing the error variable name against
# each def/undef test we found above. If there is a match, we determine
# whether the logic/state mean the variable should exist or not and return
# the corresponding True/False
for (du_var, logic, state) in def_undef:
# when we compare the var names, normalize quotes because something
# like hostvars['foo'] may be tested against hostvars["foo"]
if var_name.replace("'", '"') == du_var.replace("'", '"'):
# the should exist is a xor test between a negation in the logic portion
# against the state (defined or undefined)
should_exist = ('not' in logic) != (state == 'defined')
if should_exist:
return False
else:
return True
# as nothing above matched the failed var name, re-raise here to
# trigger the AnsibleUndefinedVariable exception again below
raise
except Exception as new_e:
raise AnsibleUndefinedVariable(
"error while evaluating conditional (%s): %s" % (original, e)
)
| Slezhuk/ansible | lib/ansible/playbook/conditional.py | Python | gpl-3.0 | 10,551 | [
"VisIt"
] | b6f5c5cd921ad35eaa4f6f4a7f33e8865345a6fe4dead82003cf5719cac97752 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2017-10-31
# @Filename: base_quantity.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last modified time: 2018-08-12 13:51:26
from __future__ import absolute_import, division, print_function
import warnings
import astropy.units as units
import numpy
import marvin.core.exceptions
from marvin.tools.spaxel import Spaxel
from marvin.utils.general import maskbit
from marvin.utils.general.general import _sort_dir
class BinInfo(object):
"""Provides information about the bin associated with this quantity."""
def __init__(self, spaxel=None, parent=None, datamodel=None):
self._spaxel = spaxel
self._parent = parent
self._datamodel = datamodel
def __repr__(self):
return '<BinInfo (binid={0.binid}, n_spaxels={1})>'.format(
self, numpy.sum(self.binid_mask))
@property
def binid_map(self):
"""Returns the binid Map associated to this quantity."""
return self._parent.get_binid(self._datamodel)
@property
def binid(self):
"""Returns the binid associated to this quantity and spaxel."""
return int(self.binid_map[self._spaxel.y, self._spaxel.x].value)
@property
def binid_mask(self):
"""Returns a mask of the spaxels with the same binid."""
return self.binid_map.value == self.binid
@property
def is_binned(self):
"""Returns `True`` if the parent object is binned."""
return self._parent.is_binned()
def get_bin_spaxels(self, lazy=True):
"""Returns a list of the spaxels associated with this bin.
Parameters
----------
lazy : bool
If ``True``, the spaxels returned will be lazy loaded. Spaxels
can be fully loaded by calling their `~.Spaxel.load` method.
Returns
-------
spaxels : list
A list of all the `.Spaxel` instances associated with this
quantity binid.
"""
if self.binid < 0:
raise marvin.core.exceptions.MarvinError(
'coordinates ({}, {}) do not correspond to a valid binid.'.format(self._spaxel.x,
self._spaxel.y))
spaxel_coords = zip(*numpy.where(self.binid_map.value == self.binid))
spaxels = []
for ii, jj in spaxel_coords:
spaxels.append(Spaxel(x=jj, y=ii, plateifu=self._spaxel.plateifu,
release=self._spaxel.release, cube=self._spaxel._cube,
maps=self._spaxel._maps, modelcube=self._spaxel._modelcube,
bintype=self._spaxel.bintype, template=self._spaxel.template,
lazy=lazy))
return spaxels
class QuantityMixIn(object):
"""A MixIn that provides common functionalities to Quantity classes."""
def __dir__(self):
return_list = _sort_dir(self, self.__class__)
return_list += ['value']
return return_list
def _init_bin(self, spaxel=None, datamodel=None, parent=None):
self.bin = BinInfo(spaxel=spaxel, datamodel=datamodel, parent=parent)
@property
def pixmask(self):
"""Maskbit instance for the pixmask flag.
See :ref:`marvin-utils-maskbit` for documentation and
`~marvin.utils.general.maskbit.Maskbit` for API reference.
"""
assert self.pixmask_flag, 'pixmask flag not set'
pixmask = maskbit.Maskbit(self.pixmask_flag)
pixmask.mask = self.mask
return pixmask
@property
def masked(self):
"""Return a masked array.
If the `~QuantityMixIn.pixmask` is set, and the maskbit contains the
``DONOTUSE`` and ``NOCOV`` labels, the returned array will be masked
for the values containing those bits. Otherwise, all values where the
mask is greater than zero will be masked.
"""
assert self.mask is not None, 'mask is not set'
try:
pixmask = self.pixmask
except AssertionError:
warnings.warn('pixmask not set. Applying full mask.',
marvin.core.exceptions.MarvinUserWarning)
return numpy.ma.array(self.value, mask=(self.mask > 0))
labels = pixmask.schema.label.tolist()
if 'DONOTUSE' in labels and 'NOCOV' in labels:
return numpy.ma.array(self.value,
mask=self.pixmask.get_mask(['DONOTUSE', 'NOCOV']) > 0)
elif 'DONOTUSE' in labels:
return numpy.ma.array(self.value, mask=self.pixmask.get_mask('DONOTUSE') > 0)
else:
return numpy.ma.array(self.value, mask=(self.mask > 0))
@property
def error(self):
"""Compute the standard deviation of the measurement."""
if hasattr(self, '_std') and self._std is not None:
return self._std
if self.ivar is None:
return None
numpy.seterr(divide='ignore')
return numpy.sqrt(1. / self.ivar) * self.unit
@property
def snr(self):
"""Return the signal-to-noise ratio for each spaxel in the map."""
return numpy.abs(self.value * numpy.sqrt(self.ivar))
def descale(self):
"""Returns a copy of the object in which the scale is unity.
Example:
>>> dc.unit
Unit("1e-17 erg / (Angstrom cm2 s spaxel)")
>> dc[100, 15, 15]
<DataCube 0.270078063011169 1e-17 erg / (Angstrom cm2 s spaxel)>
>>> dc_descaled = dc.descale()
>>> d_descaled.unit
Unit("Angstrom cm2 s spaxel")
>>> dc[100, 15, 15]
<DataCube 2.70078063011169e-18 erg / (Angstrom cm2 s spaxel)>
"""
if self.unit.scale == 1:
return self
value_descaled = self.value * self.unit.scale
value_unit = units.CompositeUnit(1, self.unit.bases, self.unit.powers)
if self.ivar is not None:
ivar_descaled = self.ivar / (self.unit.scale ** 2)
else:
ivar_descaled = None
return self.__class__(value_descaled, self.wavelength, unit=value_unit,
ivar=ivar_descaled, mask=self.mask)
| albireox/marvin | python/marvin/tools/quantities/base_quantity.py | Python | bsd-3-clause | 6,492 | [
"Brian"
] | 193a0dd6c84c3f3779358727686821a72f0424511957658773150197e29287f7 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'IonChannelSyn'
db.create_table('neuroelectro_ionchannelsyn', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('term', self.gf('django.db.models.fields.CharField')(max_length=500)),
))
db.send_create_signal('neuroelectro', ['IonChannelSyn'])
# Adding model 'IonChannel'
db.create_table('neuroelectro_ionchannel', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nlexID', self.gf('django.db.models.fields.CharField')(max_length=100)),
('name', self.gf('django.db.models.fields.CharField')(max_length=500)),
('gene', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
))
db.send_create_signal('neuroelectro', ['IonChannel'])
# Adding M2M table for field synonyms on 'IonChannel'
db.create_table('neuroelectro_ionchannel_synonyms', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('ionchannel', models.ForeignKey(orm['neuroelectro.ionchannel'], null=False)),
('ionchannelsyn', models.ForeignKey(orm['neuroelectro.ionchannelsyn'], null=False))
))
db.create_unique('neuroelectro_ionchannel_synonyms', ['ionchannel_id', 'ionchannelsyn_id'])
# Adding M2M table for field articles on 'IonChannel'
db.create_table('neuroelectro_ionchannel_articles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('ionchannel', models.ForeignKey(orm['neuroelectro.ionchannel'], null=False)),
('article', models.ForeignKey(orm['neuroelectro.article'], null=False))
))
db.create_unique('neuroelectro_ionchannel_articles', ['ionchannel_id', 'article_id'])
# Adding model 'SuperProtein'
db.create_table('neuroelectro_superprotein', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=400)),
('is_channel', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('neuroelectro', ['SuperProtein'])
# Adding M2M table for field synonyms on 'SuperProtein'
db.create_table('neuroelectro_superprotein_synonyms', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('superprotein', models.ForeignKey(orm['neuroelectro.superprotein'], null=False)),
('proteinsyn', models.ForeignKey(orm['neuroelectro.proteinsyn'], null=False))
))
db.create_unique('neuroelectro_superprotein_synonyms', ['superprotein_id', 'proteinsyn_id'])
def backwards(self, orm):
# Deleting model 'IonChannelSyn'
db.delete_table('neuroelectro_ionchannelsyn')
# Deleting model 'IonChannel'
db.delete_table('neuroelectro_ionchannel')
# Removing M2M table for field synonyms on 'IonChannel'
db.delete_table('neuroelectro_ionchannel_synonyms')
# Removing M2M table for field articles on 'IonChannel'
db.delete_table('neuroelectro_ionchannel_articles')
# Deleting model 'SuperProtein'
db.delete_table('neuroelectro_superprotein')
# Removing M2M table for field synonyms on 'SuperProtein'
db.delete_table('neuroelectro_superprotein_synonyms')
models = {
'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Journal']", 'null': 'True'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'full_text': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'table': ('django.db.models.fields.CharField', [], {'max_length': '10000'}),
'table_headers': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'neuroelectro.ionchannel': {
'Meta': {'object_name': 'IonChannel'},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Article']", 'symmetrical': 'False'}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlexID': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.IonChannelSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.ionchannelsyn': {
'Meta': {'object_name': 'IonChannelSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['neuroelectro.BrainRegion']"}),
'val': ('django.db.models.fields.FloatField', [], {})
},
'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specie': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.superprotein': {
'Meta': {'object_name': 'SuperProtein'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['neuroelectro']
| neuroelectro/neuroelectro_org | neuroelectro/south_migrations/0008_auto__add_ionchannelsyn__add_ionchannel__add_superprotein.py | Python | gpl-2.0 | 12,704 | [
"NEURON"
] | 7adc9a616fc230b712b1cd9873a13997d4171f01adab0ba839825cfa0658536e |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various utilities to be used by other modules."""
import json
import logging
import operator
import os
import random
import socket
import subprocess
from collections import namedtuple
from functools import reduce
from pkg_resources import get_distribution
from time import sleep, time
import docker
from .config import defaults
logger = logging.getLogger(__name__)
client = docker.from_env()
def nested_get(dict_, keys):
"""Utility function that returns the value of a sequence of nested keys.
Example:
>>> details = {'name': {'first': {'english': 'Dima'}}}
>>> nested_get(details, ['name', 'first', 'english'])
'Dima'
Args:
dict_ (:obj:`dict`): Dictionary to access.
keys (:obj:`list`): A list of keys to access in a nested manner.
Returns:
The value.
"""
return reduce(operator.getitem, keys, dict_)
# The `#:` constructs at the end of assignments are part of Sphinx's autodoc functionality.
DEFAULT_TIME_BETWEEN_CHECKS = 1 #:
DEFAULT_TIMEOUT = 60 #:
def wait_for_condition(condition, condition_args=None, condition_kwargs=None,
time_between_checks=DEFAULT_TIME_BETWEEN_CHECKS, timeout=DEFAULT_TIMEOUT,
time_to_success=0, success=None, failure=None):
"""Wait until a condition is satisfied (or timeout).
Args:
condition: Callable to evaluate.
condition_args (optional): A list of args to pass to the
``condition``. Default: ``None``
condition_kwargs (optional): A dictionary of kwargs to pass to the
``condition``. Default: ``None``
time_between_checks (:obj:`int`, optional): Seconds between condition checks.
Default: :py:const:`DEFAULT_TIME_BETWEEN_CHECKS`
timeout (:obj:`int`, optional): Seconds to wait before timing out.
Default: :py:const:`DEFAULT_TIMEOUT`
time_to_success (:obj:`int`, optional): Seconds for the condition to hold true
before it is considered satisfied. Default: ``0``
success (optional): Callable to invoke when ``condition`` succeeds. A ``time``
variable will be passed as an argument, so can be used. Default: ``None``
failure (optional): Callable to invoke when timeout occurs. ``timeout`` will
be passed as an argument. Default: ``None``
Raises:
:py:obj:`TimeoutError`
"""
start_time = time()
stop_time = start_time + timeout
success_start_time = None
while time() < stop_time:
outcome = condition(*condition_args or [], **condition_kwargs or {})
if outcome:
success_start_time = success_start_time or time()
if time() >= success_start_time + time_to_success:
if success is not None:
success(time='{:.3f}'.format(time() - start_time))
return
else:
success_start_time = None
sleep(time_between_checks)
failure(timeout=timeout)
def join_url_parts(*parts):
"""
Join a URL from a list of parts. See http://stackoverflow.com/questions/24814657 for
examples of why urllib.parse.urljoin is insufficient for what we want to do.
"""
return '/'.join([piece.strip('/') for piece in parts])
def version_tuple(version):
"""
Convert a version string or tuple to a tuple.
Will return (major, minor, release) kind of format.
"""
if isinstance(version, str):
return tuple(int(x) for x in version.split('.'))
elif isinstance(version, tuple):
return version
def version_str(version):
"""
Convert a version tuple or string to a string.
Will return major.minor.release kind of format.
"""
if isinstance(version, str):
return version
elif isinstance(version, tuple):
return '.'.join([str(int(x)) for x in version])
def get_clusterdock_label(cluster_name=None):
"""
Generate a clusterdock meta data label in json format. Meta data such as: clusterdock
package name, version, location of clusterdock install, etc.
Args:
cluster_name (:obj:`str`, optional): Cluster name to attach to meta data label.
Default: ``None``
Returns:
(json): clusterdock meta data label
"""
label_str = ''
try:
package = get_distribution('clusterdock')
label_info = {'name': package.project_name, 'version': package.version,
'location': package.location}
if cluster_name:
label_info['cluster_name'] = cluster_name
label_str = json.dumps(label_info)
except:
pass
return label_str
ADJECTIVES = ['accurate', 'actual', 'angular', 'associative', 'astronomical', 'asymmetrical',
'available', 'beautiful', 'biggest', 'bimodal', 'biochemical', 'biological',
'bright', 'celestial', 'closest', 'colorful', 'comparable', 'computational',
'consistent', 'conspicuous', 'continuous', 'conventional', 'coolest', 'cosmic',
'cosmological', 'critical', 'crucial', 'cubic', 'deeper', 'different',
'difficult', 'distant', 'dynamical', 'early', 'easiest', 'efficient',
'electromagnetic', 'empirical', 'evolutionary', 'faster', 'favorable', 'fewer',
'fissile', 'fissionable', 'functional', 'galactic', 'gaseous', 'gaussian',
'gravitational', 'greater', 'gregarious', 'hard', 'heaviest', 'hierarchical',
'highest', 'historical', 'homogeneous]', 'hot', 'impervious', 'important',
'intelligent', 'intense', 'intergalactic', 'internal', 'interstellar', 'intrinsic',
'invisible', 'kinetic', 'largest', 'linear', 'magnetic', 'mechanical',
'molecular', 'morphological', 'naive', 'nearest', 'nuclear', 'obvious',
'oldest', 'optical', 'orbital', 'outer', 'outward', 'perceptible',
'photographic', 'photometric', 'physical', 'planetary', 'precise', 'proper',
'random', 'reliable', 'richest', 'robust', 'rotational', 'scientific',
'shortest', 'significant', 'similar', 'skeletal', 'smallest', 'solar',
'southern', 'spectral', 'spectroscopic', 'spherical', 'strong', 'subsequent',
'successful', 'sufficient', 'systematic', 'terrestrial', 'thematic', 'tidal',
'tighter', 'typical', 'uncertain', 'uncollected', 'unformed', 'unlikely',
'unrelated', 'unresolved', 'unstable', 'unusual',
'useful', 'violent', 'visible', 'visual', 'weak']
# Astro cluster names
NAMES = ['antlia', 'bullet', 'carolines_rose', 'centaurus', 'chandelier', 'coathanger',
'coma', 'double', 'el_gordo', 'fornax', 'globular', 'hyades', 'hydra',
'laniakea_super', 'm22', 'm35', 'mayall2', 'musket_ball', 'ngc752', 'norma',
'omicron_velorum', 'pandora', 'phoenix', 'pleiades', 'praesepe', 'ptolemy', 'pyxis',
'reticulum', 'beehive', 'hercules', 'wild_duck', 'virgo']
def generate_cluster_name():
"""
Generate a random cluster name.
"""
return '{}_{}'.format(random.choice(ADJECTIVES), random.choice(NAMES))
def print_topology_meta(topology_name, quiet=False):
"""
Given a toplogy name, relative to current directory, print its meta info.
"""
try:
if not quiet:
git_dir = os.path.join(os.path.realpath(topology_name), '.git')
out = subprocess.check_output('git --git-dir {} rev-parse --short HEAD'.format(git_dir),
shell=True, stderr=subprocess.STDOUT).strip().decode()
logger.info('%s has Git hash %s', topology_name, out)
except:
pass
def get_containers(clusterdock=False):
"""
Get Docker containers.
Args:
clusterdock (:obj:`bool`, optional): clusterdock containers only. Default: ``False``
Returns:
(:obj:`list`): List of containers.
"""
Container = namedtuple('Container', ['cluster_name', 'container'])
label_key = defaults['DEFAULT_DOCKER_LABEL_KEY']
cluster_containers = []
if client.containers.list():
for container in client.containers.list(all=True):
if not clusterdock:
cluster_containers.append(Container(None, container))
else:
labels = nested_get(container.attrs, ['Config', 'Labels'])
if label_key in labels:
label = json.loads(labels[label_key])
cluster_containers.append(Container(label['cluster_name'], container))
return cluster_containers
def max_len_list_dict_item(list_dict, attr):
"""
Returns max length of a given attribute from a list of dict items.
"""
length = 0
for item in list_dict:
length = length if length > len(item[attr]) else len(item[attr])
return length
def get_container(hostname):
"""
Get running Docker container for a given hostname.
"""
for container in client.containers.list():
if nested_get(container.attrs, ['Config', 'Hostname']) == hostname:
return container
| clusterdock/framework | clusterdock/utils.py | Python | apache-2.0 | 9,738 | [
"Gaussian"
] | ed89850b673f85805ae2adcd8dd5ac2157dc5f5ba4cc51963a88061e1685c460 |
''' -- imports from installed packages -- '''
import json
import datetime
''' -- imports from django -- '''
from django.shortcuts import render_to_response, render
from gnowsys_ndf.ndf.models import NodeJSONEncoder
from django.template import RequestContext
from django.template import Context
from django.template.defaultfilters import slugify
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.template.loader import get_template
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.contrib.auth.decorators import login_required
''' -- imports from django_mongokit -- '''
''' -- imports from gstudio -- '''
from gnowsys_ndf.ndf.models import GSystemType, GSystem,Node
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.views.methods import get_forum_repl_type, forum_notification_status
from gnowsys_ndf.ndf.views.methods import set_all_urls,check_delete,get_execution_time
from gnowsys_ndf.ndf.views.methods import get_group_name_id, create_grelation
from gnowsys_ndf.ndf.views.notify import set_notif_val,get_userobject
from gnowsys_ndf.ndf.views.file import save_file
from gnowsys_ndf.ndf.templatetags.ndf_tags import get_forum_twists,get_all_replies
from gnowsys_ndf.settings import GAPPS
from gnowsys_ndf.ndf.org2any import org2html
import StringIO
import sys
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
# ##########################################################################
forum_gst = node_collection.one({ '_type': 'GSystemType', 'name': u"Forum" })
reply_gst = node_collection.one({ '_type':'GSystemType' , 'name': u'Reply' })
twist_gst = node_collection.one({ '_type':'GSystemType', 'name': u'Twist' })
start_time = node_collection.one({'$and':[{'_type':'AttributeType'},{'name':'start_time'}]})
end_time = node_collection.one({'$and':[{'_type':'AttributeType'},{'name':'end_time'}]})
sitename = Site.objects.all()[0].name.__str__()
app = forum_gst
@get_execution_time
def forum(request, group_id, node_id=None):
'''
Method to list all the available forums and to return forum-search-query result.
'''
# print "\n\n\n inside forum"
# getting group id and group name
group_name, group_id = get_group_name_id(group_id)
# getting Forum GSystem's ObjectId
node_id = str(forum_gst._id)
if request.method == "POST":
# Forum search view
title = forum_gst.name
search_field = request.POST['search_field']
existing_forums = node_collection.find({'member_of': {'$all': [ObjectId(forum_gst._id)]},
'$or': [{'name': {'$regex': search_field, '$options': 'i'}},
{'tags': {'$regex':search_field, '$options': 'i'}}],
'group_set': {'$all': [ObjectId(group_id)]},
'status':{'$nin':['HIDDEN']}
}).sort('last_update', -1)
return render_to_response("ndf/forum.html",
{'title': title,
'searching': True, 'query': search_field,
'existing_forums': existing_forums, 'groupid':group_id, 'group_id':group_id
},
context_instance=RequestContext(request)
)
elif forum_gst._id == ObjectId(node_id):
# Forum list view
existing_forums = node_collection.find({
'member_of': {'$all': [ObjectId(node_id)]},
'group_set': {'$all': [ObjectId(group_id)]},
'status':{'$nin':['HIDDEN']}
}).sort('last_update', -1)
forum_detail_list = []
'''
for each in existing_forums:
temp_forum = {}
temp_forum['name'] = each.name
temp_forum['created_at'] = each.created_at
temp_forum['created_by'] = each.created_by
temp_forum['tags'] = each.tags
temp_forum['member_of_names_list'] = each.member_of_names_list
temp_forum['user_details_dict'] = each.user_details_dict
temp_forum['html_content'] = each.html_content
temp_forum['contributors'] = each.contributors
temp_forum['id'] = each._id
temp_forum['threads'] = node_collection.find({
'$and':[
{'_type': 'GSystem'},
{'prior_node': ObjectId(each._id)}
],
'status': {'$nin': ['HIDDEN']}
}).count()
forum_detail_list.append(temp_forum)
print "\n\n\n forum detail list",forum_detail_list'''
variables = RequestContext(request, {'existing_forums':existing_forums ,'groupid': group_id, 'group_id': group_id})
return render_to_response("ndf/forum.html",variables)
@login_required
@get_execution_time
def create_forum(request, group_id):
'''
Method to create forum and Retrieve all the forums
'''
# getting group id and group name
group_name, group_id = get_group_name_id(group_id)
# getting all the values from submitted form
if request.method == "POST":
colg = node_collection.one({'_id':ObjectId(group_id)}) # getting group ObjectId
colf = node_collection.collection.GSystem() # creating new/empty GSystem object
name = unicode(request.POST.get('forum_name',"")).strip() # forum name
colf.name = name
content_org = request.POST.get('content_org',"") # forum content
if content_org:
colf.content_org = unicode(content_org)
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colf.content = content_org
usrid = int(request.user.id)
usrname = unicode(request.user.username)
colf.created_by=usrid
colf.modified_by = usrid
if usrid not in colf.contributors:
colf.contributors.append(usrid)
colf.group_set.append(colg._id)
# appending user group's ObjectId in group_set
user_group_obj = node_collection.one({'$and':[{'_type':u'Group'},{'name':usrname}]})
if user_group_obj:
if user_group_obj._id not in colf.group_set:
colf.group_set.append(user_group_obj._id)
colf.member_of.append(forum_gst._id)
################# ADDED 14th July.Its done!
colf.access_policy = u"PUBLIC"
colf.url = set_all_urls(colf.member_of)
### currently timed forum feature is not in use ###
# sdate=request.POST.get('sdate',"")
# shrs= request.POST.get('shrs',"")
# smts= request.POST.get('smts',"")
# edate= request.POST.get('edate',"")
# ehrs= request.POST.get('ehrs',"")
# emts=request.POST.get('emts',"")
# start_dt={}
# end_dt={}
# if not shrs:
# shrs=0
# if not smts:
# smts=0
# if sdate:
# sdate1=sdate.split("/")
# st_date = datetime.datetime(int(sdate1[2]),int(sdate1[0]),int(sdate1[1]),int(shrs),int(smts))
# start_dt[start_time.name]=st_date
# if not ehrs:
# ehrs=0
# if not emts:
# emts=0
# if edate:
# edate1=edate.split("/")
# en_date= datetime.datetime(int(edate1[2]),int(edate1[0]),int(edate1[1]),int(ehrs),int(emts))
# end_dt[end_time.name]=en_date
# colf.attribute_set.append(start_dt)
# colf.attribute_set.append(end_dt)
colf.save(groupid=group_id)
'''Code to send notification to all members of the group except those whose notification preference is turned OFF'''
link="http://"+sitename+"/"+str(colg._id)+"/forum/"+str(colf._id)
for each in colg.author_set:
bx=User.objects.filter(id=each)
if bx:
bx=User.objects.get(id=each)
else:
continue
activity="Added forum"
msg=usrname+" has added a forum in the group -'"+colg.name+"'\n"+"Please visit "+link+" to see the forum."
if bx:
auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
if colg._id and auth:
no_check=forum_notification_status(colg._id,auth._id)
else:
no_check=True
if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
# returning response to ndf/forumdetails.html
return HttpResponseRedirect(reverse('show', kwargs={'group_id':group_id,'forum_id': colf._id }))
# variables=RequestContext(request,{'forum':colf})
# return render_to_response("ndf/forumdetails.html",variables)
# getting all the GSystem of forum to provide autocomplete/intellisence of forum names
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(forum_gst._id),'group_set': ObjectId(group_id) })
nodes_list = []
for each in available_nodes:
nodes_list.append(str((each.name).strip().lower()))
return render_to_response("ndf/create_forum.html",{'group_id':group_id,'groupid':group_id, 'nodes_list': nodes_list},RequestContext(request))
@login_required
@get_execution_time
def edit_forum(request,group_id,forum_id):
'''
Method to create forum and Retrieve all the forums
'''
forum = node_collection.one({ '_id': ObjectId(forum_id) })
# # method to convert group_id to ObjectId if it is groupname
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
group_name, group_id = get_group_name_id(group_id)
# getting all the values from submitted form
if request.method == "POST":
colg = node_collection.one({'_id':ObjectId(group_id)}) # getting group ObjectId
colf = node_collection.one({'_id':ObjectId(forum_id)}) # creating new/empty GSystem object
name = unicode(request.POST.get('forum_name',"")).strip() # forum name
colf.name = name
content_org = request.POST.get('content_org',"") # forum content
if content_org:
colf.content_org = unicode(content_org)
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colf.content = content_org
usrid = int(request.user.id)
usrname = unicode(request.user.username)
colf.modified_by = usrid
if usrid not in colf.contributors:
colf.contributors.append(usrid)
################# ADDED 14th July.Its done!
colf.access_policy = u"PUBLIC"
colf.url = set_all_urls(colf.member_of)
### currently timed forum feature is not in use ###
# sdate=request.POST.get('sdate',"")
# shrs= request.POST.get('shrs',"")
# smts= request.POST.get('smts',"")
# edate= request.POST.get('edate',"")
# ehrs= request.POST.get('ehrs',"")
# emts=request.POST.get('emts',"")
# start_dt={}
# end_dt={}
# if not shrs:
# shrs=0
# if not smts:
# smts=0
# if sdate:
# sdate1=sdate.split("/")
# st_date = datetime.datetime(int(sdate1[2]),int(sdate1[0]),int(sdate1[1]),int(shrs),int(smts))
# start_dt[start_time.name]=st_date
# if not ehrs:
# ehrs=0
# if not emts:
# emts=0
# if edate:
# edate1=edate.split("/")
# en_date= datetime.datetime(int(edate1[2]),int(edate1[0]),int(edate1[1]),int(ehrs),int(emts))
# end_dt[end_time.name]=en_date
# colf.attribute_set.append(start_dt)
# colf.attribute_set.append(end_dt)
colf.save(groupid=group_id)
'''Code to send notification to all members of the group except those whose notification preference is turned OFF'''
link="http://"+sitename+"/"+str(colg._id)+"/forum/"+str(colf._id)
for each in colg.author_set:
bx=User.objects.get(id=each)
activity="Edited forum"
msg=usrname+" has edited forum -" +colf.name+" in the group -'"+colg.name+"'\n"+"Please visit "+link+" to see the forum."
if bx:
auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
if colg._id and auth:
no_check=forum_notification_status(colg._id,auth._id)
else:
no_check=True
if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
# returning response to ndf/forumdetails.html
return HttpResponseRedirect(reverse('show', kwargs={'group_id':group_id,'forum_id': colf._id }))
# variables=RequestContext(request,{'forum':colf})
# return render_to_response("ndf/forumdetails.html",variables)
# getting all the GSystem of forum to provide autocomplete/intellisence of forum names
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(forum_gst._id),'group_set': ObjectId(group_id) })
nodes_list = []
for each in available_nodes:
nodes_list.append(str((each.name).strip().lower()))
return render_to_response("ndf/edit_forum.html",{'group_id':group_id,'groupid':group_id, 'nodes_list': nodes_list,'forum':forum},RequestContext(request))
@get_execution_time
def display_forum(request,group_id,forum_id):
hide_create_thread_btn = True
other_forums_list = None
other_forums = node_collection.find({'member_of': forum_gst._id,'group_set': ObjectId(group_id),
'_id':{'$nin':[ObjectId(forum_id)]}})
if other_forums.count():
other_forums_list = [[str(d._id), d.name] for d in other_forums]
forum = node_collection.one({'_id': ObjectId(forum_id)})
usrname = User.objects.get(id=forum.created_by).username
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
forum_object = node_collection.one({'_id': ObjectId(forum_id)})
if forum_object._type == "GSystemType":
return forum(request, group_id, forum_id)
th_all=get_forum_twists(forum)
if th_all:
th_count=len(list(th_all))
else:
th_count=0
variables = RequestContext(request,{
'forum':forum,
'hide_create_thread_btn': hide_create_thread_btn,
'groupid':group_id,'group_id':group_id,
'forum_created_by':usrname,
'other_forums': other_forums_list,
'thread_count':th_count,
})
return render_to_response("ndf/forumdetails.html",variables)
@get_execution_time
def display_thread(request,group_id, thread_id, forum_id=None):
'''
Method to display thread and it's content
'''
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# # auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
try:
other_threads_list = None
thread = node_collection.one({'_id': ObjectId(thread_id)})
other_threads = node_collection.find({'member_of': twist_gst._id, 'prior_node': thread.prior_node,
'_id': {'$nin': [ObjectId(thread._id)]}})
if other_threads.count():
other_threads_list = [[str(d._id), d.name] for d in other_threads]
rep_lst=get_all_replies(thread)
lst_rep=list(rep_lst)
if lst_rep:
reply_count=len(lst_rep)
else:
reply_count=0
forum = ""
for each in thread.prior_node:
forum=node_collection.one({'$and':[{'member_of': {'$all': [forum_gst._id]}},{'_id':ObjectId(each)}]})
if forum:
usrname = User.objects.get(id=forum.created_by).username
variables = RequestContext(request,
{ 'forum':forum,
'thread':thread,
'groupid':group_id,
'other_threads_list':other_threads_list,
'group_id':group_id,
'eachrep':thread,
'user':request.user,
'reply_count':reply_count,
'forum_created_by':usrname
})
return render_to_response("ndf/thread_details.html",variables)
usrname = User.objects.get(id=thread.created_by).username
variables= RequestContext(request,
{ 'forum':thread,
'thread':None,
'groupid':group_id,
'group_id':group_id,
'other_threads_list':other_threads_list,
'eachrep':thread,
'user':request.user,
'reply_count':reply_count,
'forum_created_by':usrname
})
return render_to_response("ndf/thread_details.html",variables)
except Exception as e:
print "Exception in thread_details "+str(e)
pass
@login_required
@get_execution_time
def create_thread(request, group_id, forum_id):
'''
Method to create thread
'''
forum = node_collection.one({'_id': ObjectId(forum_id)})
# forum_data = {
# 'name':forum.name,
# 'content':forum.content,
# 'created_by':User.objects.get(id=forum.created_by).username
# }
# print forum_data
forum_threads = []
exstng_reply = node_collection.find({'$and':[{'_type':'GSystem'},{'prior_node':ObjectId(forum._id)}],'status':{'$nin':['HIDDEN']}})
exstng_reply.sort('created_at')
for each in exstng_reply:
forum_threads.append((each.name).strip().lower())
if request.method == "POST":
colg = node_collection.one({'_id':ObjectId(group_id)})
name = unicode(request.POST.get('thread_name',""))
content_org = request.POST.get('content_org',"")
# -------------------
colrep = node_collection.collection.GSystem()
colrep.member_of.append(twist_gst._id)
# ADDED ON 14th July
colrep.access_policy = u"PUBLIC"
colrep.url = set_all_urls(colrep.member_of)
colrep.prior_node.append(forum._id)
colrep.name = name
if content_org:
colrep.content_org = unicode(content_org)
# Required to link temporary files with the current user who is modifying this document
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colrep.content = content_org
usrid = int(request.user.id)
colrep.created_by = usrid
colrep.modified_by = usrid
if usrid not in colrep.contributors:
colrep.contributors.append(usrid)
colrep.group_set.append(colg._id)
colrep.save(groupid=group_id)
has_thread_rt = node_collection.one({"_type": "RelationType", "name": u"has_thread"})
gr = create_grelation(forum._id, has_thread_rt, colrep._id)
'''Code to send notification to all members of the group except those whose notification preference is turned OFF'''
link="http://"+sitename+"/"+str(colg._id)+"/forum/thread/"+str(colrep._id)
for each in colg.author_set:
bx=User.objects.filter(id=each)
if bx:
bx=User.objects.get(id=each)
else:
continue
activity="Added thread"
msg=request.user.username+" has added a thread in the forum " + forum.name + " in the group -'" + colg.name+"'\n"+"Please visit "+link+" to see the thread."
if bx:
auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
if colg._id and auth:
no_check=forum_notification_status(colg._id,auth._id)
else:
no_check=True
if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
url_name = "/" + group_id + "/forum/thread/" + str(colrep._id)
return HttpResponseRedirect(url_name)
# variables = RequestContext(request,
# { 'forum':forum,
# 'thread':colrep,
# 'eachrep':colrep,
# 'groupid':group_id,
# 'group_id':group_id,
# 'user': request.user,
# 'reply_count':0,
# 'forum_threads': json.dumps(forum_threads),
# 'forum_created_by':User.objects.get(id=forum.created_by).username
# })
# print "\n\n renedering to thread_details"
# return render_to_response("ndf/thread_details.html",variables)
else:
return render_to_response("ndf/create_thread.html",
{ 'group_id':group_id,
'groupid':group_id,
'forum': forum,
'forum_threads': json.dumps(forum_threads),
'forum_created_by':User.objects.get(id=forum.created_by).username
},
RequestContext(request))
@login_required
@get_execution_time
def add_node(request, group_id):
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
group_name, group_id = get_group_name_id(group_id)
try:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
content_org = request.POST.get("reply","")
node = request.POST.get("node","")
thread = request.POST.get("thread","") # getting thread _id
forumid = request.POST.get("forumid","") # getting forum _id
sup_id = request.POST.get("supnode","") #getting _id of it's parent node
tw_name = request.POST.get("twistname","")
upload_files_count=int(request.POST.get("upload_cnt",0))
#print "upfiles=",upload_files_count
lst=[]
lstobj_collection=[]
usrid = int(request.user.id)
if upload_files_count > 0:
#print "uploaded items",request.FILES.items()
try:
thread_obj = node_collection.one({'_id': ObjectId(thread)})
access_policy = thread_obj.access_policy
except:
access_policy = u'PUBLIC'
for key,value in request.FILES.items():
fname=unicode(value.__dict__['_name'])
#print "key=",key,"value=",value,"fname=",fname
fileobj,fs=save_file(value,fname,usrid,group_id, "", "", username=unicode(request.user.username), access_policy=access_policy, count=0, first_object="")
if type(fileobj) == list:
obid = str(list(fileobj)[1])
else:
obid=str(fileobj)
file_obj=node_collection.find_one({'_id': ObjectId(obid)})
lstobj_collection.append(file_obj._id)
forumobj = ""
groupobj = ""
colg = node_collection.one({'_id':ObjectId(group_id)})
if forumid:
forumobj = node_collection.one({"_id": ObjectId(forumid)})
sup = node_collection.one({"_id": ObjectId(sup_id)})
if not sup :
return HttpResponse("failure")
colrep = node_collection.collection.GSystem()
if node == "Twist":
name = tw_name
colrep.member_of.append(twist_gst._id)
elif node == "Reply":
name = unicode("Reply of:"+str(sup._id))
colrep.member_of.append(reply_gst._id)
#Adding uploaded files id's in collection set of reply
if upload_files_count > 0:
colrep.collection_set = lstobj_collection
colrep.prior_node.append(sup._id)
colrep.name = name
if content_org:
colrep.content_org = unicode(content_org)
# Required to link temporary files with the current user who is modifying this document
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colrep.content = content_org
colrep.created_by = usrid
colrep.modified_by = usrid
if usrid not in colrep.contributors:
colrep.contributors.append(usrid)
colrep.prior_node.append(sup._id)
colrep.name = name
if content_org:
colrep.content_org = unicode(content_org)
# Required to link temporary files with the current user who is modifying this document
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colrep.content = content_org
usrid=int(request.user.id)
colrep.created_by=usrid
colrep.modified_by = usrid
if usrid not in colrep.contributors:
colrep.contributors.append(usrid)
colrep.group_set.append(colg._id)
colrep.save(groupid=group_id)
# print "----------", colrep._id
groupname = colg.name
if node == "Twist" :
url="http://"+sitename+"/"+str(group_id)+"/forum/thread/"+str(colrep._id)
activity=request.user.username+" -added a thread '"
prefix="' on the forum '"+forumobj.name+"'"
nodename=name
if node == "Reply":
threadobj=node_collection.one({"_id": ObjectId(thread)})
url="http://"+sitename+"/"+str(group_id)+"/forum/thread/"+str(threadobj._id)
activity=request.user.username+" -added a reply "
prefix=" on the thread '"+threadobj.name+"' on the forum '"+forumobj.name+"'"
nodename=""
link = url
for each in colg.author_set:
if each != colg.created_by:
bx=User.objects.get(id=each)
msg=activity+"-"+nodename+prefix+" in the group '"+ groupname +"'\n"+"Please visit "+link+" to see the updated page"
if bx:
no_check=forum_notification_status(group_id,auth._id)
if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
bx=User.objects.get(id=colg.created_by)
msg=activity+"-"+nodename+prefix+" in the group '"+groupname+"' created by you"+"\n"+"Please visit "+link+" to see the updated page"
if bx:
no_check=forum_notification_status(group_id,auth._id)
if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
if node == "Reply":
# if exstng_reply:
# exstng_reply.prior_node =[]
# exstng_reply.prior_node.append(colrep._id)
# exstng_reply.save()
threadobj=node_collection.one({"_id": ObjectId(thread)})
variables=RequestContext(request,{'thread':threadobj,'user':request.user,'forum':forumobj,'groupid':group_id,'group_id':group_id})
return render_to_response("ndf/refreshtwist.html",variables)
else:
templ=get_template('ndf/refreshthread.html')
html = templ.render(Context({'forum':forumobj,'user':request.user,'groupid':group_id,'group_id':group_id}))
return HttpResponse(html)
except Exception as e:
return HttpResponse(""+str(e))
return HttpResponse("success")
@get_execution_time
def get_profile_pic(username):
auth = node_collection.one({'_type': 'Author', 'name': unicode(username) })
prof_pic = node_collection.one({'_type': u'RelationType', 'name': u'has_profile_pic'})
dbref_profile_pic = prof_pic.get_dbref()
collection_tr = db[Triple.collection_name]
prof_pic_rel = collection_tr.Triple.find({'_type': 'GRelation', 'subject': ObjectId(auth._id), 'relation_type': dbref_profile_pic })
# prof_pic_rel will get the cursor object of relation of user with its profile picture
if prof_pic_rel.count() :
index = prof_pic_rel[prof_pic_rel.count() - 1].right_subject
img_obj = node_collection.one({'_type': 'File', '_id': ObjectId(index) })
else:
img_obj = ""
return img_obj
@login_required
@check_delete
@get_execution_time
def delete_forum(request,group_id,node_id,relns=None):
""" Changing status of forum to HIDDEN
"""
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
op = node_collection.collection.update({'_id': ObjectId(node_id)}, {'$set': {'status': u"HIDDEN"}})
node=node_collection.one({'_id':ObjectId(node_id)})
#send notifications to all group members
colg=node_collection.one({'_id':ObjectId(group_id)})
for each in colg.author_set:
if each != colg.created_by:
bx=get_userobject(each)
if bx:
activity=request.user.username+" -deleted forum "
msg=activity+"-"+node.name+"- in the group '"+ colg.name
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
activity=request.user.username+" -deleted forum "
bx=get_userobject(colg.created_by)
if bx:
msg=activity+"-"+node.name+"- in the group '"+colg.name+"' created by you"
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
return HttpResponseRedirect(reverse('forum', kwargs={'group_id': group_id}))
@login_required
@get_execution_time
def delete_thread(request,group_id,forum_id,node_id):
""" Changing status of thread to HIDDEN
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(node_id) :
thread=node_collection.one({'_id':ObjectId(node_id)})
else:
return
forum = node_collection.one({'_id': ObjectId(forum_id)})
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
op = node_collection.collection.update({'_id': ObjectId(node_id)}, {'$set': {'status': u"HIDDEN"}})
node=node_collection.one({'_id':ObjectId(node_id)})
forum_threads = []
exstng_reply = node_collection.find({'$and':[{'_type':'GSystem'},{'prior_node':ObjectId(forum._id)}],'status':{'$nin':['HIDDEN']}})
exstng_reply.sort('created_at')
forum_node=node_collection.one({'_id':ObjectId(forum_id)})
for each in exstng_reply:
forum_threads.append(each.name)
#send notifications to all group members
colg=node_collection.one({'_id':ObjectId(group_id)})
for each in colg.author_set:
if each != colg.created_by:
bx=get_userobject(each)
if bx:
activity=request.user.username+" -deleted thread "
prefix=" in the forum "+forum_node.name
link="http://"+sitename+"/"+str(colg._id)+"/forum/"+str(forum_node._id)
msg=activity+"-"+node.name+prefix+"- in the group '"+colg.name+"' created by you."+"'\n"+"Please visit "+link+" to see the forum."
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
activity=request.user.username+" -deleted thread "
prefix=" in the forum "+forum_node.name
bx=get_userobject(colg.created_by)
if bx:
link="http://"+sitename+"/"+str(colg._id)+"/forum/"+str(forum_node._id)
msg=activity+"-"+node.name+prefix+"- in the group '"+colg.name+"' created by you."+"'\n"+"Please visit "+link+" to see the forum."
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
#send notification code ends here
variables = RequestContext(request,{
'forum':forum,
'groupid':group_id,'group_id':group_id,
'forum_created_by':User.objects.get(id=forum.created_by).username
})
return render_to_response("ndf/forumdetails.html",variables)
@login_required
@get_execution_time
def edit_thread(request,group_id,forum_id,thread_id):
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
forum=node_collection.one({'_id':ObjectId(forum_id)})
thread=node_collection.one({'_id':ObjectId(thread_id)})
exstng_reply = node_collection.find({'$and':[{'_type':'GSystem'},{'prior_node':ObjectId(forum._id)}]})
nodes=[]
exstng_reply.sort('created_at')
for each in exstng_reply:
nodes.append(each.name)
request.session['nodes']=json.dumps(nodes)
colg=node_collection.one({'_id':ObjectId(group_id)})
if request.method == 'POST':
name = unicode(request.POST.get('thread_name',"")) # thread name
thread.name = name
content_org = request.POST.get('content_org',"") # thread content
# print "content=",content_org
if content_org:
thread.content_org = unicode(content_org)
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
thread.content = content_org
thread.save(groupid=group_id)
link="http://"+sitename+"/"+str(colg._id)+"/forum/thread/"+str(thread._id)
for each in colg.author_set:
if each != colg.created_by:
bx=get_userobject(each)
if bx:
msg=request.user.username+" has edited thread- "+thread.name+"- in the forum " + forum.name + " in the group -'" + colg.name+"'\n"+"Please visit "+link+" to see the thread."
activity="Edited thread"
#auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
#if colg._id and auth:
#no_check=forum_notification_status(colg._id,auth._id)
# else:
# no_check=True
# if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
activity=request.user.username+" edited thread -"
bx=get_userobject(colg.created_by)
prefix="-in the forum -"+forum.name
if bx:
msg=activity+"-"+thread.name+prefix+" in the group '"+colg.name+"' created by you"+"\n"+"Please visit "+link+" to see the thread"
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
variables = RequestContext(request,{'group_id':group_id,'thread_id': thread._id,'nodes':json.dumps(nodes)})
return HttpResponseRedirect(reverse('thread', kwargs={'group_id':group_id,'thread_id': thread._id }))
else:
return render_to_response("ndf/edit_thread.html",
{ 'group_id':group_id,
'groupid':group_id,
'forum': forum,
'thread':thread,
'forum_created_by':User.objects.get(id=forum.created_by).username
},
RequestContext(request))
@login_required
@get_execution_time
def delete_reply(request,group_id,forum_id,thread_id,node_id):
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
#group_name, group_id = get_group_name_id(group_id)
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
activity = ""
op = node_collection.collection.update({'_id': ObjectId(node_id)}, {'$set': {'status': u"HIDDEN"}})
replyobj=node_collection.one({'_id':ObjectId(node_id)})
forumobj=node_collection.one({"_id": ObjectId(forum_id)})
threadobj=node_collection.one({"_id": ObjectId(thread_id)})
# notifications to all group members
colg=node_collection.one({'_id':ObjectId(group_id)})
link="http://"+sitename+"/"+str(colg._id)+"/forum/thread/"+str(threadobj._id)
for each in colg.author_set:
if each != colg.created_by:
bx=get_userobject(each)
if bx:
msg=request.user.username+" has deleted reply- "+replyobj.content_org+"- in the thread " + threadobj.name + " in the group -'" + colg.name+"'\n"+"Please visit "+link+" to see the thread."
activity="Deleted reply"
#auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
#if colg._id and auth:
#no_check=forum_notification_status(colg._id,auth._id)
# else:
# no_check=True
# if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
prefix="-in the forum -"+forumobj.name
msg=request.user.username+" has deleted reply- "+replyobj.content_org+"- in the thread " + threadobj.name +prefix+ " in the group -'" + colg.name+"' created by you"+"\n Please visit "+link+" to see the thread."
bx=get_userobject(colg.created_by)
if bx:
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
variables=RequestContext(request,{'thread':threadobj,'user':request.user,'forum':forumobj,'groupid':group_id,'group_id':group_id})
return HttpResponseRedirect(reverse('thread', kwargs={'group_id':group_id,'thread_id': threadobj._id }))
# return render_to_response("ndf/replytwistrep.html",variables)
| AvadootNachankar/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/forum.py | Python | agpl-3.0 | 43,873 | [
"VisIt"
] | c495c904788d5c7c98b6ad46b25ccd8602e0ed0a53d51e87946ceeaa9f2598a1 |
# This is the instrument-specific file for the PS3000a series of instruments.
#
# pico-python is Copyright (c) 2013-2014 By:
# Colin O'Flynn <coflynn@newae.com>
# Mark Harfouche <mark.harfouche@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This is the low level driver file for a specific Picoscope.
By this, I mean if parameters want to get passed as strings, they should be
handled by PSBase
All functions here should take things as close to integers as possible, the
only exception here is for array parameters. Array parameters should be passed
in a pythonic way through numpy since the PSBase class should not be aware of
the specifics behind how the clib is called.
The functions should not have any default values as these should be handled
by PSBase.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import math
# to load the proper dll
import platform
# Do not import or use ill definied data types
# such as short int or long
# use the values specified in the h file
# float is always defined as 32 bits
# double is defined as 64 bits
from ctypes import byref, POINTER, create_string_buffer, c_float, \
c_int16, c_int32, c_uint16, c_uint32, c_void_p
from ctypes import c_int32 as c_enum
from picoscope.picobase import _PicoscopeBase
class PS3000a(_PicoscopeBase):
"""The following are low-level functions for the PS3000a"""
LIBNAME = "ps3000a"
NUM_CHANNELS = 4
CHANNELS = {"A": 0, "B": 1, "C": 2, "D": 3,
"External": 4, "MaxChannels": 4, "TriggerAux": 5}
ADC_RESOLUTIONS = {"8":0, "12":1, "14":2, "15":3, "16":4};
CHANNEL_RANGE = [{"rangeV":10E-3, "apivalue":0, "rangeStr":"10 mV"},
{"rangeV":20E-3, "apivalue":1, "rangeStr":"20 mV"},
{"rangeV":50E-3, "apivalue":2, "rangeStr":"50 mV"},
{"rangeV":100E-3, "apivalue":3, "rangeStr":"100 mV"},
{"rangeV":200E-3, "apivalue":4, "rangeStr":"200 mV"},
{"rangeV":500E-3, "apivalue":5, "rangeStr":"500 mV"},
{"rangeV":1.0, "apivalue":6, "rangeStr":"1 V"},
{"rangeV":2.0, "apivalue":7, "rangeStr":"2 V"},
{"rangeV":5.0, "apivalue":8, "rangeStr":"5 V"},
{"rangeV":10.0, "apivalue":9, "rangeStr":"10 V"},
{"rangeV":20.0, "apivalue":10, "rangeStr":"20 V"},
{"rangeV":50.0, "apivalue":11, "rangeStr":"50 V"},
]
CHANNEL_COUPLINGS = {"DC":1, "AC":0}
#has_sig_gen = True
WAVE_TYPES = {"Sine": 0, "Square": 1, "Triangle": 2,
"RampUp": 3, "RampDown": 4,
"Sinc": 5, "Gaussian": 6, "HalfSine": 7, "DCVoltage": 8,
"WhiteNoise": 9}
SIGGEN_TRIGGER_TYPES = {"Rising": 0, "Falling": 1,
"GateHigh": 2, "GateLow": 3}
SIGGEN_TRIGGER_SOURCES = {"None": 0, "ScopeTrig": 1, "AuxIn": 2,
"ExtIn": 3, "SoftTrig": 4, "TriggerRaw": 5}
# This is actually different depending on the AB/CD models
# I wonder how we could detect the difference between the oscilloscopes
# I believe we can obtain this information from the setInfo function
# by readign the hardware version
# for the PS6403B version, the hardware version is "1 1",
# an other possibility is that the PS6403B shows up as 6403 when using
# VARIANT_INFO and others show up as PS6403X where X = A,C or D
AWGPhaseAccumulatorSize = 32
AWGBufferAddressWidth = 14
AWGMaxSamples = 2 ** AWGBufferAddressWidth
AWGDACInterval = 5E-9 # in seconds
AWGDACFrequency = 1 / AWGDACInterval
# Note this is NOT what is written in the Programming guide as of version
# 10_5_0_28
# This issue was acknowledged in this thread
# http://www.picotech.com/support/topic13217.html
AWGMaxVal = 0x0FFF
AWGMinVal = 0x0000
AWG_INDEX_MODES = {"Single": 0, "Dual": 1, "Quad": 2}
MAX_VALUE_8BIT = 32512
MIN_VALUE_8BIT = -32512
MAX_VALUE_OTHER = 32767
MIN_VALUE_OTHER = -32767
EXT_RANGE_VOLTS = 5
def __init__(self, serialNumber=None, connect=True):
"""Load DLL etc"""
if platform.system() == 'Linux':
from ctypes import cdll
self.lib = cdll.LoadLibrary("lib" + self.LIBNAME + ".so")
else:
from ctypes import windll
self.lib = windll.LoadLibrary(self.LIBNAME + ".dll")
self.resolution = self.ADC_RESOLUTIONS["8"]
super(PS3000a, self).__init__(serialNumber, connect)
def _lowLevelOpenUnit(self, sn):
c_handle = c_int16()
if sn is not None:
serialNullTermStr = create_string_buffer(sn)
else:
serialNullTermStr = None
# Passing None is the same as passing NULL
m = self.lib.ps3000aOpenUnit(byref(c_handle), byref(serialNullTermStr))
self.checkResult(m)
self.handle = c_handle.value
def _lowLevelCloseUnit(self):
m = self.lib.ps3000aCloseUnit(c_int16(self.handle))
self.checkResult(m)
def _lowLevelSetChannel(self, chNum, enabled, coupling, VRange, VOffset,
BWLimited):
m = self.lib.ps3000aSetChannel(c_int16(self.handle), c_enum(chNum),
c_int16(enabled), c_enum(coupling),
c_enum(VRange), c_float(VOffset))
self.checkResult(m)
def _lowLevelStop(self):
m = self.lib.ps3000Stop(c_int16(self.handle))
self.checkResult(m)
def _lowLevelGetUnitInfo(self, info):
s = create_string_buffer(256)
requiredSize = c_int16(0)
m = self.lib.ps3000aGetUnitInfo(c_int16(self.handle), byref(s),
c_int16(len(s)), byref(requiredSize),
c_enum(info))
self.checkResult(m)
if requiredSize.value > len(s):
s = create_string_buffer(requiredSize.value + 1)
m = self.lib.ps3000_get_unit_info(c_int16(self.handle), byref(s),
c_int16(len(s)),
byref(requiredSize), c_enum(info))
self.checkResult(m)
# should this bee ascii instead?
# I think they are equivalent...
return s.value.decode('utf-8')
def _lowLevelFlashLed(self, times):
m = self.lib.ps3000aFlashLed(c_int16(self.handle), c_int16(times))
self.checkResult(m)
def _lowLevelSetSimpleTrigger(self, enabled, trigsrc, threshold_adc,
direction, delay, auto):
m = self.lib.ps3000aSetSimpleTrigger(
c_int16(self.handle), c_int16(enabled),
c_enum(trigsrc), c_int16(threshold_adc),
c_enum(direction), c_uint32(delay), c_int16(auto))
self.checkResult(m)
def _lowLevelSetNoOfCaptures(self, numCaptures):
m = self.lib.ps3000aSetNoOfCaptures(c_int16(self.handle),
c_uint16(numCaptures))
self.checkResult(m)
def _lowLevelMemorySegments(self, numSegments):
maxSamples = c_int32()
m = self.lib.ps3000aMemorySegments(c_int16(self.handle),
c_uint16(numSegments), byref(maxSamples))
self.checkResult(m)
return maxSamples.value
def _lowLevelGetMaxSegments(self):
maxSegments = c_int16()
m = self.lib.ps3000aGetMaxSegments(c_int16(self.handle),
byref(maxSegments))
self.checkResult(m)
return maxSegments.value
def _lowLevelRunBlock(self, numPreTrigSamples, numPostTrigSamples,
timebase, oversample, segmentIndex):
#NOT: Oversample is NOT used!
timeIndisposedMs = c_int32()
m = self.lib.ps3000aRunBlock(
c_int16(self.handle), c_uint32(numPreTrigSamples),
c_uint32(numPostTrigSamples), c_uint32(timebase),
c_int16(oversample), byref(timeIndisposedMs), c_uint16(segmentIndex),
c_void_p(), c_void_p())
self.checkResult(m)
return timeIndisposedMs.value
def _lowLevelIsReady(self):
ready = c_int16()
m = self.lib.ps3000aIsReady(c_int16(self.handle), byref(ready))
self.checkResult(m)
if ready.value:
return True
else:
return False
def _lowLevelGetTimebase(self, tb, noSamples, oversample, segmentIndex):
""" returns (timeIntervalSeconds, maxSamples) """
maxSamples = c_int32()
intervalNanoSec = c_float()
m = self.lib.ps3000aGetTimebase2(c_int16(self.handle), c_uint32(tb),
c_uint32(noSamples), byref(intervalNanoSec),
c_int16(oversample), byref(maxSamples),
c_uint32(segmentIndex))
self.checkResult(m)
# divide by 1e9 to return interval in seconds
return (intervalNanoSec.value * 1e-9, maxSamples.value)
def getTimeBaseNum(self, sampleTimeS):
"""
Convert sample time in S to something to pass to API Call
"""
maxSampleTime = (((2 ** 32 - 1) - 2) / 125000000)
if sampleTimeS < 8.0E-9:
st = math.floor(math.log(sampleTimeS * 1E9, 2))
st = max(st, 0)
else:
if sampleTimeS > maxSampleTime:
sampleTimeS = maxSampleTime
st = math.floor((sampleTimeS * 125000000) + 2)
# is this cast needed?
st = int(st)
return st
def getTimestepFromTimebase(self, timebase):
'''
Takes API timestep code (an integer from 0-32) and returns
the sampling interval it indicates, in seconds.
'''
if timebase < 3:
dt = 2. ** timebase / 1.0E9
else:
dt = (timebase - 2.0) / 125000000.
return dt
def _lowLevelSetAWGSimpleDeltaPhase(self, waveform, deltaPhase,
offsetVoltage, pkToPk, indexMode,
shots, triggerType, triggerSource):
""" waveform should be an array of shorts """
waveformPtr = waveform.ctypes.data_as(POINTER(c_int16))
m = self.lib.ps3000_set_siggen(
c_int16(self.handle),
c_uint32(int(offsetVoltage * 1E6)), # offset voltage in microvolts
c_uint32(int(pkToPk * 1E6)), # pkToPk in microvolts
c_uint32(int(deltaPhase)), # startDeltaPhase
c_uint32(int(deltaPhase)), # stopDeltaPhase
c_uint32(0), # deltaPhaseIncrement
c_uint32(0), # dwellCount
waveformPtr, # arbitraryWaveform
c_int32(len(waveform)), # arbitraryWaveformSize
c_enum(0), # sweepType for deltaPhase
c_enum(0), # operation (adding random noise and whatnot)
c_enum(indexMode), # single, dual, quad
c_uint32(shots),
c_uint32(0), # sweeps
c_uint32(triggerType),
c_uint32(triggerSource),
c_int16(0)) # extInThreshold
self.checkResult(m)
def _lowLevelSetDataBuffer(self, channel, data, downSampleMode, segmentIndex):
"""
data should be a numpy array
Be sure to call _lowLevelClearDataBuffer
when you are done with the data array
or else subsequent calls to GetValue will still use the same array.
"""
dataPtr = data.ctypes.data_as(POINTER(c_int16))
numSamples = len(data)
m = self.lib.ps3000aSetDataBuffer(c_int16(self.handle), c_enum(channel),
dataPtr, c_int32(numSamples),
c_uint32(segmentIndex),
c_enum(downSampleMode))
self.checkResult(m)
def _lowLevelSetMultipleDataBuffers(self, channel, data, downSampleMode):
max_segments = self._lowLevelGetMaxSegments()
if data.shape[0] < max_segments:
raise ValueError("data array has fewer rows than current number of memory segments")
if data.shape[1] < self.maxSamples:
raise ValueError("data array has fewer columns than maxSamples")
for i in range(max_segments):
m = ps._lowLevelSetDataBuffer(channel, data[i, :],
downSampleMode, i)
self.checkResult(m)
def _lowLevelClearDataBuffer(self, channel, segmentIndex):
""" data should be a numpy array"""
m = self.lib.ps3000aSetDataBuffer(c_int16(self.handle), c_enum(channel),
c_void_p(), c_uint32(0), c_uint32(segmentIndex),
c_enum(0))
self.checkResult(m)
def _lowLevelGetValues(self, numSamples, startIndex, downSampleRatio,
downSampleMode, segmentIndex):
numSamplesReturned = c_uint32()
numSamplesReturned.value = numSamples
overflow = c_int16()
m = self.lib.ps3000aGetValues(
c_int16(self.handle), c_uint32(startIndex),
byref(numSamplesReturned), c_uint32(downSampleRatio),
c_enum(downSampleMode), c_uint32(segmentIndex),
byref(overflow))
self.checkResult(m)
return (numSamplesReturned.value, overflow.value)
def _lowLevelGetValuesBulk(self, numSamples, fromSegment, toSegment,
downSampleRatio, downSampleMode, overflow):
m = self.lib.ps3000aGetValuesBulk(c_int16(self.handle),
byref(c_int16(numSamples)),
c_int16(fromSegment),
c_int16(toSegment),
c_int32(downSampleRatio),
c_int16(downSampleMode),
overflow.ctypes.data_as(POINTER(c_int16))
)
self.checkResult(m)
return overflow, numSamples
# def _lowLevelSetSigGenBuiltInSimple(self, offsetVoltage, pkToPk, waveType,
# frequency, shots, triggerType,
# triggerSource):
# # TODO, I just noticed that V2 exists
# # Maybe change to V2 in the future
# m = self.lib.ps3000SetSigGenBuiltIn(
# c_int16(self.handle),
# c_int32(int(offsetVoltage * 1000000)),
# c_int32(int(pkToPk * 1000000)),
# c_int16(waveType),
# c_float(frequency), c_float(frequency),
# c_float(0), c_float(0), c_enum(0), c_enum(0),
# c_uint32(shots), c_uint32(0),
# c_enum(triggerType), c_enum(triggerSource),
# c_int16(0))
# self.checkResult(m)
| ElOceanografo/pico-python | picoscope/ps3000a.py | Python | bsd-2-clause | 16,341 | [
"Gaussian"
] | a8d0539c9cb0f639a06ac84c79279694cfcef4da82bdd31cb2784380c70a7a49 |
import numpy
from matplotlib.pyplot import figure, show, rc
from kmpfit import kmpfit
def my_model(p, x):
#-----------------------------------------------------------------------
# This describes the model and its parameters for which we want to find
# the best fit. 'p' is a sequence of parameters (array/list/tuple).
#-----------------------------------------------------------------------
A, mu, sigma, zerolev = p
return( A * numpy.exp(-(x-mu)*(x-mu)/(2.0*sigma*sigma)) + zerolev )
def my_residuals(p, data):
#-----------------------------------------------------------------------
# This function is the function called by the fit routine in kmpfit
# It returns a weighted residual. De fit routine calculates the
# square of these values.
#-----------------------------------------------------------------------
x, y, err = data
return (y-my_model(p,x)) / err
# Artificial data
N = 100
x = numpy.linspace(-5, 10, N)
truepars = [10.0, 5.0, 1.0, 0.0]
p0 = [9, 4.5, 0.8, 0]
y = my_model(truepars, x) + 0.3*numpy.random.randn(len(x))
err = 0.3*numpy.random.randn(N)
# The fit
fitobj = kmpfit.Fitter(residuals=my_residuals, data=(x, y, err))
try:
fitobj.fit(params0=p0)
except Exception, mes:
print "Something wrong with fit: ", mes
raise SystemExit
print "\n\n=============== Results of kmpfit ==================="
print "Params: ", fitobj.params
print "Errors from covariance matrix : ", fitobj.xerror
print "Uncertainties assuming reduced Chi^2=1: ", fitobj.stderr
print "Chi^2 min: ", fitobj.chi2_min
print "Reduced Chi^2: ", fitobj.rchi2_min
print "Iterations: ", fitobj.niter
print "Function ev: ", fitobj.nfev
print "Status: ", fitobj.status
print "Status Message:", fitobj.message
print "Covariance:\n", fitobj.covar
# Plot the result
rc('font', size=9)
rc('legend', fontsize=8)
fig = figure()
frame = fig.add_subplot(1,1,1)
frame.errorbar(x, y, yerr=err, fmt='go', alpha=0.7, label="Noisy data")
frame.plot(x, my_model(truepars,x), 'r', label="True data")
frame.plot(x, my_model(fitobj.params,x), 'b', lw=2, label="Fit with kmpfit")
frame.set_xlabel("X")
frame.set_ylabel("Measurement data")
frame.set_title("Least-squares fit to noisy Gaussian data using KMPFIT",
fontsize=10)
leg = frame.legend(loc=2)
show() | aoeftiger/kmpfit | kmpfit/example2.py | Python | bsd-3-clause | 2,326 | [
"Gaussian"
] | a1555cdbecaf3bb45ad347fb49c30759838d1a2c57f405bbea35ce29dfcdd9f5 |
"""
Define common steps for instructor dashboard acceptance tests.
"""
# pylint: disable=C0111
# pylint: disable=W0621
from __future__ import absolute_import
from lettuce import world, step
from mock import patch
from nose.tools import assert_in # pylint: disable=E0611
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given I am "([^"]*)" for a very large course')
def make_staff_or_instructor_for_large_course(step, role):
make_large_course(step, role)
@patch.dict('courseware.access.settings.FEATURES', {"MAX_ENROLLMENT_INSTR_BUTTONS": 0})
def make_large_course(step, role):
i_am_staff_or_instructor(step, role)
@step(u'Given I am "([^"]*)" for a course')
def i_am_staff_or_instructor(step, role): # pylint: disable=unused-argument
## In summary: makes a test course, makes a new Staff or Instructor user
## (depending on `role`), and logs that user in to the course
# Store the role
assert_in(role, ['instructor', 'staff'])
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='999',
display_name='Test Course'
)
world.course_key = course.id
world.role = 'instructor'
# Log in as the an instructor or staff for the course
if role == 'instructor':
# Make & register an instructor for the course
world.instructor = InstructorFactory(course_key=world.course_key)
world.enroll_user(world.instructor, world.course_key)
world.log_in(
username=world.instructor.username,
password='test',
email=world.instructor.email,
name=world.instructor.profile.name
)
else:
world.role = 'staff'
# Make & register a staff member
world.staff = StaffFactory(course_key=world.course_key)
world.enroll_user(world.staff, world.course_key)
world.log_in(
username=world.staff.username,
password='test',
email=world.staff.email,
name=world.staff.profile.name
)
def go_to_section(section_name):
# section name should be one of
# course_info, membership, student_admin, data_download, analytics, send_email
world.visit(u'/courses/{}'.format(world.course_key))
world.css_click(u'a[href="/courses/{}/instructor"]'.format(world.course_key))
world.css_click('a[data-section="{0}"]'.format(section_name))
@step(u'I click "([^"]*)"')
def click_a_button(step, button): # pylint: disable=unused-argument
if button == "Generate Grade Report":
# Go to the data download section of the instructor dash
go_to_section("data_download")
# Click generate grade report button
world.css_click('input[name="calculate-grades-csv"]')
# Expect to see a message that grade report is being generated
expected_msg = "Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section."
world.wait_for_visible('#report-request-response')
assert_in(
expected_msg, world.css_text('#report-request-response'),
msg="Could not find grade report generation success message."
)
elif button == "Grading Configuration":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="dump-gradeconf"]')
elif button == "List enrolled students' profile information":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles"]')
elif button == "Download profile information as a CSV":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles-csv"]')
else:
raise ValueError("Unrecognized button option " + button)
@step(u'I visit the "([^"]*)" tab')
def click_a_button(step, tab_name): # pylint: disable=unused-argument
# course_info, membership, student_admin, data_download, analytics, send_email
tab_name_dict = {
'Course Info': 'course_info',
'Membership': 'membership',
'Student Admin': 'student_admin',
'Data Download': 'data_download',
'Analytics': 'analytics',
'Email': 'send_email',
}
go_to_section(tab_name_dict[tab_name])
| dsajkl/reqiop | lms/djangoapps/instructor/features/common.py | Python | agpl-3.0 | 4,537 | [
"VisIt"
] | 0533977eb322960acb3b86ae26d080b30ca9309e536a332809cc0dc3951b7fae |
import os
import sys
import numpy as np
import pupynere as netcdf
import csv
import math
import json
SAVE_TO_COUCH = False
SAVE_JSON = True
IN_FOLDER = "../netcdf/v4/"
if SAVE_TO_COUCH:
import couchdb
couch = couchdb.Server()
couch.delete('euporias')
db = couch.create('euporias')
# db = couch['euporias']
output = csv.DictWriter(open("../csv/globalstats.csv", "w"), ["cellID","lat", "lon", "rpss", "meanPrediction", "meanHistoric", "power", "lonSlice", "ocean"], delimiter="\t")
output.writeheader()
outputPredictions = csv.DictWriter(open("../csv/predictions.csv", "w"), ["cellID","memberID", "windSpeed"], delimiter="\t")
outputPredictions.writeheader()
outputHistoric = csv.DictWriter(open("../csv/historic.csv", "w"), ["cellID","year","windSpeed"], delimiter="\t")
outputHistoric.writeheader()
windfarms = csv.DictReader(open("../csv/windfarms.csv", "r"), delimiter="\t")
#
skillsFile = netcdf.netcdf_file(IN_FOLDER + 'WindMod1DJF1leadGlobalSkill.nc', 'r')
print "skills file"
print skillsFile.variables
#
oceanMaskFile = netcdf.netcdf_file(IN_FOLDER + 'land_sea_mask_512x256.nc', 'r')
print "oceanMaskFile"
print oceanMaskFile.variables
#
forecastFile = netcdf.netcdf_file(IN_FOLDER + 'WindMod1DJF1leadGlobalForecast.nc', 'r')
print "forecast file"
print forecastFile.variables.keys()
print forecastFile.variables["forecast"].shape
#
historyFile = netcdf.netcdf_file(IN_FOLDER + 'WindObsDJF1leadGlobal.nc', 'r')
print "history file"
print historyFile.variables.keys()
print historyFile.variables["observations"].shape
#
count = 0
# years = range(1981,2011)
cells = []
cellDict = {}
def round(x):
return float("%.2f" % x)
jsons = {}
skipped = 0
for (i,lat) in enumerate(skillsFile.variables["latitude"]):
print i, lat
for (j,lon) in enumerate(skillsFile.variables["longitude"]):
# print j, lon
if lon > 180:
lon -= 360
rpss = skillsFile.variables["RPSS"][i][j]
ocean = oceanMaskFile.variables["tos"][-1][i][j]
meanPrediction = 0
meanHistoric = 0
# print i,j
predictions = []
observations = []
for (k,forecast) in enumerate(forecastFile.variables["forecast"][j][i]):
outputPredictions.writerow({
"cellID": count,
"memberID": k,
"windSpeed": float(forecast)
})
predictions += [float(forecast)]
for (k,vals) in enumerate(historyFile.variables["observations"][j][i]):
for (l,x) in enumerate(vals):
outputHistoric.writerow({
"cellID": count,
"year": int(historyFile.variables["years"][l]),
"windSpeed": float(x)
})
observations += [float(x)]
id = str(count)
jsons[id] = {
"_id": id,
"observations": observations,
"predictions": predictions
}
cell = {
"cellID": count,
"lat": round(lat),
"lon": round(lon),
"rpss": round(rpss),
"meanPrediction": round(np.median(predictions)),
"meanHistoric": round(np.median(observations)),
"power": 0,
"ocean": ocean
}
cellDict[count] = cell
cells += [cell]
count += 1
print "counting farms"
try:
cellForFarm = json.load(open("../csv/cellForFarm.json", "r"))
except:
cellForFarm = {}
outputJoin = csv.DictWriter(open("../csv/windCellJoin.csv", "w"), ["cellID","ID"], delimiter="\t")
outputJoin.writeheader()
for i,w in enumerate(windfarms):
print "searching ", i
try:
lat = float(w['Latitude (WGS84)'])
lon = float(w['Longitude (WGS84)'])
closest = 0
closestDist = 100000000
for c in cells:
dist = math.pow(float(c["lat"])-lat,2)+math.pow(float(c["lon"])-lon,2)
if dist<closestDist:
closestDist = dist
closest = c
cellForFarm[w["ID"]] = closest["cellID"]
outputJoin.writerow({"cellID": closest["cellID"], "ID":w["ID"]})
except:
print "error", w
json.dump(cellForFarm, open("../csv/cellForFarm.json", "w"))
for i,w in enumerate(windfarms):
print "adding power ", i
try:
power = int(w["Total power (kW)"])
print power, cellDict[cellForFarm[w["ID"]]]
cellDict[cellForFarm[w["ID"]]]["power"] += power
except:
print "error", w["Total power (kW)"], w
print cells[0]
cells.sort(key = lambda x: (x["lon"],x["lat"]))
lonSlice = -1
currentLon = -9999
prunedCells = []
if SAVE_JSON:
print "saving json"
try:
os.mkdir("../json/")
except:
pass
for c in cells:
if c["power"] > 0 or c["rpss"]>0:
if (c["lon"] != currentLon):
currentLon = c["lon"]
lonSlice += 1
c["lonSlice"] = lonSlice
prunedCells += [c]
id = str(c["cellID"])
o = jsons[id]
if SAVE_TO_COUCH:
db.save(o)
if SAVE_JSON:
json.dump(o, open("../json/"+id+".json", "w"))
print "%s cells kept, out of %s" % (len(prunedCells), len(cells))
output.writerows(prunedCells) | MoritzStefaner/project-ukko-os | data/python/couchify.py | Python | apache-2.0 | 4,625 | [
"NetCDF"
] | f9b9a05a11fd1b915221401c01136b9efcacce3af87e2ca0380c9236155d1e1f |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#This module calculates statistics and saves it to a file
import numpy as np
from scipy import stats as scstats
from tkinter import font as tkfont
import tkinter as tk
import stat_functions as stat
import functions as fu
import model_parser
STANDARD_LENGTH=8
class output:
def __init__(self,ll,direction,main_msg):
self.ll=ll
self.main_msg=main_msg
self.direction=direction
self.panel=self.ll.panel
self.lags=self.panel.options.robustcov_lags_statistics.value[1]
self.n_variables=self.panel.args.n_args
self.incr=0
self.d={'names':np.array(self.panel.args.names_v),
'count':range(self.n_variables),
'args':self.ll.args.args_v}
self.update_after_direction(direction,0)
self.heading()
def update_after_direction(self,direction,its):
self.direction=direction
self.iterations=its
self.constraints_printout()
self.t_stats()
self.heading()
def update_after_linesearch(self,direction,ll,incr):
self.direction=direction
self.ll=ll
self.incr=incr
self.d['args']=ll.args.args_v
self.heading()
def statistics(self):
return statistics(self.ll)
def reg_table(self):
return reg_table_obj(self)
def t_stats(self):
d=self.d
direction=self.direction
panel=self.panel
T=len(d['names'])
if direction.H is None:
return
d['se_robust'],d['se_st']=sandwich(direction,self.lags)
d['se_robust_oposite'],d['se_st_oposite']=sandwich(direction,self.lags,oposite=True)
if not (d['se_st_oposite'] is None):
d['se_robust'][np.isnan(d['se_robust'])]=d['se_robust_oposite'][np.isnan(d['se_robust'])]
d['se_st'][np.isnan(d['se_st'])]=d['se_st_oposite'][np.isnan(d['se_st'])]
#d['se_robust_fullsize'],d['se_st_fullsize']=sandwich(direction,self.lags,resize=False)
no_nan=np.isnan(d['se_robust'])==False
valid=no_nan
valid[no_nan]=(d['se_robust'][no_nan]>0)
d['tstat']=np.array(T*[np.nan])
d['tsign']=np.array(T*[np.nan])
d['tstat'][valid]=d['args'][valid]/d['se_robust'][valid]
d['tsign'][valid]=(1-scstats.t.cdf(np.abs(d['tstat'][valid]),panel.df))#Two sided tests
d['sign_codes']=get_sign_codes(d['tsign'])
def heading(self):
CI=self.direction.CI
n_CI=len(self.direction.mc_problems)
s=("LL:\t"+str(self.ll.LL)+' ').ljust(23)
if not self.incr is None:
s+=("\tIncrement: "+ str(self.incr)).ljust(17)+" "
else:
s+=str(" ").ljust(19)
if not self.iterations is None:
s+=f"\tIteration: {str(self.iterations).ljust(7)}"
if hasattr(self.direction,'HG_ratio'):
s+=f"\tSingularity problems: {str(self.direction.singularity_problems).ljust(7)}"
instr=''
if not self.panel.input.Z_names is None:
instr=', '.join(self.panel.input.Z_names[1:])
instr+="\t"+self.main_msg
s+=f"\nDependent: {self.panel.input.Y_names[0]}"
n,T,k=self.panel.X.shape
s+=f"\tPanel: {self.panel.NT_before_loss} observations,{n} groups and {T} dates"
if len(instr):
s+=f"\t\tInstruments: {instr}"
s+=f"\nMax condition index: {np.round(self.direction.CI)}\t ({n_CI} caseses where high CI was associated with more than one variable)\n"
self.heading_str=s
def constraints_printout(self):
panel=self.panel
direction=self.direction
constr=direction.constr
weak_mc_dict=direction.weak_mc_dict
d=self.d
if not direction.dx_norm is None:
d['dx_norm']=direction.dx_norm
T=len(d['names'])
d['set_to'],d['assco'],d['cause'],d['multicoll']=['']*T,['']*T,['']*T,['']*T
if constr is None:
return
c=constr.fixed
for i in c:
d['set_to'][i]=c[i].value_str
d['assco'][i]=c[i].assco_name
d['cause'][i]=c[i].cause
c=constr.intervals
for i in c:
if not c[i].intervalbound is None:
d['set_to'][i]=c[i].intervalbound
d['assco'][i]='NA'
d['cause'][i]=c[i].cause
for i in weak_mc_dict:#adding associates of non-severe multicollinearity
d['multicoll'][i]='|'
d['assco'][i]=panel.args.names_v[weak_mc_dict[i][0]]
class reg_table_obj(dict):
def __init__(self,output):
dict.__init__(self)
self.d=output.d
self.Y_names=output.ll.panel.input.Y_names
self.args=output.ll.args.dict_string
self.n_variables=output.n_variables
self.heading=output.heading_str
self.footer=f"\n\nSignificance codes: '=0.1, *=0.05, **=0.01, ***=0.001, |=collinear\n\n{output.ll.err_msg}"
def table(self,n_digits,brackets,fmt,stacked, show_direction, show_constraints):
include_cols,llength=self.get_cols(stacked, show_direction, show_constraints)
if fmt=='INTERNAL':
self.X=None
return str(self.args),None
self.include_cols=include_cols
self.n_cols=len(include_cols)
for a, l,is_string,name,neg,just,sep,default_digits in pr:
self[a]=column(self.d,a,l, is_string, name, neg, just, sep, default_digits,self.n_variables)
self.X=self.output_matrix(n_digits,brackets)
s=format_table(self.X, include_cols,fmt,f'Regression on {self.Y_names[0]}',self.heading,self.footer)
return s,llength
def output_matrix(self,digits,brackets):
structured=False
for i in range(self.n_cols):
if type(self.include_cols[i])==list:
structured=True
break
if structured:
return self.output_matrix_structured(digits, brackets)
else:
return self.output_matrix_flat(digits, brackets)
def output_matrix_structured(self,digits,brackets):
X=[['']*self.n_cols for i in range(3*(self.n_variables+1)-1)]
for i in range(self.n_cols):
a=self.include_cols[i]
if type(a)==list:
h=self[a[0]].name.replace(':',' ')
if brackets=='[':
X[0][i]=f"{h}[{self[a[1]].name}]:"
elif brackets=='(':
X[0][i]=f"{h}({self[a[1]].name}):"
else:
X[0][i]=f"{h}/{self[a[1]].name}:"
v=[self[a[j]].values(digits) for j in range(3)]
for j in range(self.n_variables):
X[(j+1)*3-1][i]=v[0][j]
if brackets=='[':
X[(j+1)*3][i]=f"[{v[1][j]}]{v[2][j]}"
elif brackets=='(':
X[(j+1)*3][i]=f"({v[1][j]}){v[2][j]}"
else:
X[(j+1)*3][i]=f"{v[1][j]}{v[2][j]}"
else:
X[0][i]=self[a].name
v=self[a].values(digits)
for j in range(self.n_variables):
X[(j+1)*3-1][i]=v[j]
return X
def output_matrix_flat(self,digits,brackets):
X=[['']*self.n_cols for i in range(self.n_variables+1)]
for i in range(self.n_cols):
a=self.include_cols[i]
X[0][i]=self[a].name
v=self[a].values(digits)
for j in range(self.n_variables):
X[j+1][i]=v[j]
return X
def get_cols(self,stacked,
show_direction,
show_constraints):
"prints a single regression"
dx_col=[]
llength=9
if show_direction:
dx_col=['dx_norm']
else:
llength-=1
mcoll_col=[]
if show_constraints:
mcoll_col=[ 'multicoll','assco','set_to', 'cause']
else:
llength-=2
if stacked:
cols=['count','names', ['args','se_robust', 'sign_codes']] + dx_col + ['tstat', 'tsign'] + mcoll_col
else:
cols=['count','names', 'args','se_robust', 'sign_codes'] + dx_col + ['tstat', 'tsign'] + mcoll_col
return cols,llength
class column:
def __init__(self,d,a,l,is_string,name,neg,just,sep,default_digits,n_variables):
self.length=l
self.is_string=is_string
self.name=name
self.default_digits=default_digits
self.neg_allowed=neg
self.justification=just
self.tab_sep=sep
self.n_variables=n_variables
if a in d:
self.exists=True
self.input=d[a]
else:
self.exists=False
self.input=[' - ']*self.n_variables
def values(self,digits):
try:
if self.length is None:
if digits=='SCI':
return self.input
else:
return np.round(self.input,digits)
return np.round(self.input,self.length)
except:
if self.length is None:
return self.input
else:
return np.array([str(i).ljust(self.length)[:self.length] for i in self.input])
def get_preferences(output_gui):
try:
pref=output_gui.window.right_tabs.preferences.options
return pref
except:
return
def sandwich(direction,lags,oposite=False,resize=True):
panel=direction.panel
H,G,idx=reduce_size(direction,oposite,resize)
lags=lags+panel.lost_obs
try:
hessin=np.linalg.inv(-H)
except Exception as e:
print(e)
return None,None
se_robust,se,V=stat.robust_se(panel,lags,hessin,G)
se_robust,se,V=expand_x(se_robust, idx),expand_x(se, idx),expand_x(V, idx,True)
return se_robust,se
def reduce_size(direction,oposite,resize):
H=direction.H
G=direction.G
if (G is None) or (H is None):
return
m=len(H)
if not resize:
return H,G,np.ones(m,dtype=bool)
weak_mc_dict=direction.weak_mc_dict.keys()
constr=list(direction.constr.fixed.keys())
if oposite:
weak_mc_dict=[direction.weak_mc_dict[i][0] for i in direction.weak_mc_dict]
constr=[]
for i in direction.constr.fixed:
if not direction.constr.fixed[i].assco_ix is None:
constr.append(direction.constr.fixed[i].assco_ix)
for i in weak_mc_dict:
if not i in constr:
constr.append(i)
idx=np.ones(m,dtype=bool)
if len(constr)>0:#removing fixed constraints from the matrix
idx[constr]=False
H=H[idx][:,idx]
G=G[:,:,idx]
return H,G,idx
def expand_x(x,idx,matrix=False):
m=len(idx)
if matrix:
x_full=np.zeros((m,m))
x_full[:]=np.nan
ref=np.arange(m)[idx]
for i in range(len(x)):
try:
x_full[ref[i],idx]=x[i]
x_full[idx,ref[i]]=x[i]
except:
a=0
else:
x_full=np.zeros(m)
x_full[:]=np.nan
x_full[idx]=x
return x_full
def get_sign_codes(tsign):
sc=[]
for i in tsign:
if np.isnan(i):
sc.append(i)
elif i<0.001:
sc.append('***')
elif i<0.01:
sc.append('** ')
elif i<0.05:
sc.append('* ')
elif i<0.1:
sc.append("' ")
else:
sc.append('')
sc=np.array(sc,dtype='<U3')
return sc
def remove_illegal_signs(name):
illegals=['#', '<', '$', '+',
'%', '>', '!', '`',
'&', '*', '‘', '|',
'{', '?', '“', '=',
'}', '/', ':',
'\\', 'b']
for i in illegals:
if i in name:
name=name.replace(i,'_')
return name
class statistics:
def __init__(self,ll):
ll.standardize()
panel=ll.panel
self.df=panel.df
self.N,self.T,self.k=panel.X.shape
self.Rsq_st, self.Rsqadj_st, self.LL_ratio,self.LL_ratio_OLS=stat.goodness_of_fit(ll,True)
self.Rsq, self.Rsqadj, self.LL_ratio,self.LL_ratio_OLS=stat.goodness_of_fit(ll,False)
self.no_ac_prob,self.rhos,self.RSqAC=stat.breusch_godfrey_test(panel,ll,10)
self.DW=stat.DurbinWatson(panel,ll)
self.norm_prob=stat.JB_normality_test(ll.e_norm,panel)
self.ADF_stat,self.c1,self.c5=stat.adf_test(panel,ll,10)
self.df_str=self.gen_df_str(panel)
self.instruments=panel.input.Z_names[1:]
self.pqdkm=panel.pqdkm
def gen_df_str(self,panel):
summary=f"""
SAMPLE SIZE SUMMARY:
\tOriginal sample size\t\t:\t{orig_size}
\tSample size after filtering\t\t:\t{panel.NT_before_loss}
\tDegrees of freedom\t\t:\t{panel.df}
\tNumber of IDs\t\t:\t{self.N:,}
\tNumber of dates (maximum)\t\t:\t{self.T}\n"""
group_rmv=f"""
REMOVED GROUPS BECAUSE OF TOO FEW OBSERVATIONS:
\tObservations per group lost because of
\tA)\tARIMA/GARCH\t:\t{panel.lost_obs}
\tB)\tMin # of obs in user preferences:\t:\t{panel.options.min_group_df.value}
\tMin # observations required (A+B)\t\t:\t{panel.lost_obs+panel.options.min_group_df.value}\n
\tGroups removed
\tA)\tTotal # of groups\t:\t{len(panel.idincl)}
\tB)\t# of groups removed\t:\t{sum(panel.idincl==False)}
\t# of groups remaining (A-B)\t\t:\t{sum(panel.idincl==True)}
\t# of observations removed\t\t:\t{panel.input.X.shape[0]-panel.NT_before_loss}\n"""
s=f"""
{summary}
{group_rmv}
DEGREES OF FREEDOM:
\tA)\tSample size\t:\t{panel.NT_before_loss}
\tB)\tObservations lost to
\t\tGARCH/ARIMA\t:\t{panel.tot_lost_obs}
\tRandom/Fixed Effects in
\tC)\tMean process\t:\t{panel.number_of_RE_coef}
\tD)\tVariance process\t:\t{panel.number_of_RE_coef_in_variance}
\tE)\tNumber of coefficients in
\t\tRegression\t:\t{panel.args.n_args:,}
\tDegrees of freedom (A-B-C-D-E)\t\t:\t{panel.df}\n\n"""
return s
def gen_mod_fit(self,n_digits):
return f"""
\tLL-ratio\t\t:\t{round(self.LL_ratio,n_digits)}
\tR-squared (from observed data)\t\t:\t{round(self.Rsq*100,2)}%
\tAdjusted R-squared (from observed data)\t\t:\t{round(self.Rsqadj*100,2)}%
\tR-squared (from normalized data)\t\t:\t{round(self.Rsq_st*100,2)}%
\tAdjusted R-squared (from normalized data)\t\t:\t{round(self.Rsqadj_st*100,2)}%
\t("Normalized data" means that the data is adjusted with the estimated ARIMA-GARCH parameters and random/fixed effects.)
\tDurbin-Watson statistic:\t\t:\t{round(self.DW,2)}
\tBreusch-Godfrey test\t\t:\t{round(self.no_ac_prob*100,n_digits)}% \t(significance, probability of no auto correlation)
\tJarque–Bera test for normality\t\t:\t{round(self.norm_prob*100,n_digits)}% \t(significance, probability of normality)\n
"""
def adf_str(self,n_digits):
if not self.ADF_stat=='NA':
if self.ADF_stat<self.c1:
self.ADF_res="Unit root rejected at 1%"
elif self.ADF_stat<self.c5:
self.ADF_res="Unit root rejected at 5%"
else:
self.ADF_res="Unit root not rejected"
adf=f"""
\tAugmented Dicky-Fuller (ADF)
\t\tTest statistic\t:\t{round(self.ADF_stat,n_digits)}
\t\t1% critical value\t:\t{round(self.c1,n_digits)}
\t\t5% critical value\t:\t{round(self.c5,n_digits)}
\t\tResult\t:\t\t{self.ADF_res}
"""
else:
adf="Unable to calculate ADF"
if self.df<1:
s+="""
\tWARNING: All your degrees of freedom (df) has been consumed, so statistics cannot be computed.
\tyou can increase df by for example turning off random/fixed effects """
return adf
def get_tab_stops(X,f):
f = tkfont.Font(font=f)
m_len = f.measure("m")
counter=2*m_len
tabs=[f"{counter}",tk.NUMERIC]
r,c=np.array(X).shape
for i in range(c):
t=1
num_max=0
for j in range(r):
s=str(X[j][i])
if '.' in s:
a=s.split('.')
num_max=max((len(a[0]),num_max))
t=max((f.measure(X[j][i])+(num_max+2)*m_len,t))
counter+=t
tabs.extend([f"{counter}",tk.NUMERIC])
return tabs
l=STANDARD_LENGTH
#python variable name, length, is string, display name, neg. values, justification next tab space round digits (None=no rounding,-1=set by user)
pr=[
['count', 2, False, '', False, 'right', 2, None],
['names', None, True, 'Variable names:', False, 'right', 2, None],
['args', None, False, 'Coef:', True, 'right', 2, -1],
['se_robust', None, False, 'rob.SE', True, 'right', 3, -1],
['sign_codes', 5, True, '', False, 'left', 2, -1],
['dx_norm', None, False, 'direction:', True, 'right', 2, None],
['tstat', 2, False, 't-stat.:', True, 'right', 2, 2],
['tsign', None, False, 'p-value:', False, 'right', 2, 3],
['multicoll', 1, True, '', False, 'left', 2, None],
['assco', 20, True, 'collinear with', False, 'center', 2, None],
['set_to', 6, True, 'set to', False, 'center', 2, None],
['cause', 50, True, 'cause', False, 'right', 2, None]]
def format_table(X,cols,fmt,heading,head,tail):
if fmt=='NORMAL':
return head+format_normal(X,[1],cols)+tail
if fmt=='LATEX':
return head+format_latex(X,cols,heading)+tail
if fmt=='HTML':
return format_html(X,cols,heading,head)+tail
def format_normal(X,add_rows=[],cols=[]):
p=''
if 'multicoll' in cols:
constr_pos=cols.index('multicoll')+1
p="\t"*constr_pos+"constraints:".center(38)
p+="\n"
for i in range(len(X)):
p+='\n'*(i in add_rows)
p+='\n'
for j in range(len(X[0])):
p+=f'\t{X[i][j]}'
return p
def format_latex(X,cols,heading):
X=np.array(X,dtype='U128')
n,k=X.shape
p="""
\\begin{table}[ht]
\\caption{%s}
\\centering
\\begin{tabular}{""" %(heading,)
p+=' c'*k+' }\n\\hline'
p+='\t'+' &\t'.join(X[0])+'\\\\\n\\hline\\hline'
for i in range(1,len(X)):
p+='\t'+ ' &\t'.join(X[i])+'\\\\\n'
p+="""
\hline %inserts single line
\end{tabular}
\label{table:nonlin} % is used to refer this table in the text
\end{table}"""
return p
def format_html(X,cols,heading,head):
X=np.array(X,dtype='U128')
n,k=X.shape
head=head.replace('\n','<br>')
head=head.replace('\t',' '*4)
p=f"""
<h1>{heading}</h1>
<p>{head}</p>
<p><table>"""
p+='\t</tr><th>'+'\t</th><th>'.join(X[0])+'</th></tr>\n'
for i in range(1,len(X)):
p+='\t</tr><td>'+'\t</td><td>'.join(X[i])+'</td></tr>\n'
p+='</table></p>'
return p
alphabet='abcdefghijklmnopqrstuvwxyz'
class join_table(dict):
"""Creates a joint table of several regressions with columns of the join_table_column class.
See join_table_column for data handling."""
def __init__(self,args,varnames=[]):
dict.__init__(self)
self.names_category_list=list([list(i) for i in args.names_category_list])#making a copy
k=0
for i in varnames:
if i in self.names_category_list[0]:
k=self.names_category_list[0].index(i)+1
else:
self.names_category_list[0].insert(k,i)
k+=1
self.names_v=[itm for s in self.names_category_list for itm in s]#flattening
def update(self,ll,stats,desc):
if not desc in self:
for i in range(len(ll.args.names_category_list)):
for j in ll.args.names_category_list[i]:
if not j in self.names_category_list[i]:
self.names_category_list[i].append(j)
self.names_v=[itm for s in self.names_category_list for itm in s]#flattening
self[desc]=join_table_column(stats, ll)
def make_table(self, stacked, brackets,digits,caption):
keys=list(self.keys())
k=len(keys)
n=len(self.names_v)
if stacked:
X=[['' for j in range(2+k)] for i in range(4+3*n)]
for i in range(n):
X[3*i+1][1]=self.names_v[i]
X[3*i+1][0]=i
X[1+3*n][1]='Log likelihood'
X[2+3*n][1]='Degrees of freedom'
X[3+3*n][1]='Adjusted R-squared'
else:
X=[['' for j in range(2+2*k)] for i in range(4+n)]
for i in range(n):
X[i+1][1]=self.names_v[i]
X[i+1][0]=i
X[1+n][1]='Log likelihood'
X[2+n][1]='Degrees of freedom'
X[3+n][1]='Adjusted R-squared'
for i in range(k):
self.make_column(i,keys[i],X,stacked, brackets,digits,caption)
s=format_normal(X,[1,(1+stacked*2)*n+1,(1+stacked*2)*n+4])
s+=f"\n\nSignificance codes: '=0.1, *=0.05, **=0.01, ***=0.001, |=collinear\n"
max_mod=0
models=[]
for i in range(len(keys)):
key=self[keys[i]]
p,q,d,k,m=key.pqdkm
models.append(f"\n{alphabet[i]}: {keys[i]}")
max_mod=max(len(models[i]),max_mod)
for i in range(len(keys)):
s+=models[i].ljust(max_mod+2)
if len(key.instruments):
s+=f"\tInstruments: {key.instruments}"
s+=f"\tARIMA({p},{d},{q})-GARCH({k},{m})"
return s,X
def make_column(self,col,key,X,stacked, brackets,digits,caption):
if not 'se_robust' in self[key].stats:
return
if caption=='JOINED LONG':
X[0][(2-stacked)*col+2]+=f"{self[key].Y_name} ({alphabet[col]})"
else:
X[0][(2-stacked)*col+2]=alphabet[col]
n=len(self.names_v)
m=len(self[key].args.names_v)
ix=[self.names_v.index(i) for i in self[key].args.names_v]
se=np.round(self[key].stats['se_robust'],digits)
sgn=self[key].stats['sign_codes']
args=np.round(self[key].args.args_v,digits)
if brackets=='[':
se_sgn=[f"[{se[i]}]{sgn[i]}" for i in range(m)]
elif brackets=='(':
se_sgn=[f"({se[i]}){sgn[i]}" for i in range(m)]
else:
se_sgn=[f"{se[i]}{sgn[i]}" for i in range(m)]
if stacked:
for i in range(m):
X[3*ix[i]+1][col+2]=args[i]
X[3*ix[i]+2][col+2]=se_sgn[i]
X[1+3*n][col+2]=fu.round_sign_digits(self[key].LL,5,1)
X[2+3*n][col+2]=self[key].df
X[3+3*n][col+2]=f"{round(self[key].Rsqadj*100,1)}%"
else:
for i in range(m):
X[ix[i]+1][col*2+2]=args[i]
X[ix[i]+1][col*2+3]=se_sgn[i]
X[1+n][col*2+3]=fu.round_sign_digits(self[key].LL,5,1)
X[2+n][col*2+3]=self[key].df
X[3+n][col*2+3]=f"{round(self[key].Rsqadj*100,1)}%"
class join_table_column:
def __init__(self,stats,ll):
panel=ll.panel
self.stats=stats
self.LL=ll.LL
self.df=ll.panel.df
self.args=ll.args
self.Rsq, self.Rsqadj, self.LL_ratio,self.LL_ratio_OLS=stat.goodness_of_fit(ll,True)
self.instruments=panel.input.Z_names[1:]
self.pqdkm=panel.pqdkm
self.Y_name=panel.input.Y_names
| espensirnes/paneltime | build/lib.win-amd64-3.7/paneltime/output.py | Python | gpl-3.0 | 20,054 | [
"ADF"
] | 3668a77850dd8e1b13c83304a1f5bc67d4f2b2299c338eb7e137617cbddb65ae |
# encoding: utf-8
industry_list = {
'150' : u'基金/证券/期货/投资',
'210' : u'家具/家电',
'200' : u'服装服饰/纺织/皮革',
'090' : u'房地产服务(物业管理/地产经纪)',
'310' : u'石油/石化/化工',
'190' : u'食品/饮料/烟酒/日化',
'010' : u'计算机软件',
'130' : u'银行',
'110' : u'中介服务',
'030' : u'IT服务/系统集成',
'050' : u'电子技术/半导体/集成电路',
'250' : u'交通/物流/运输',
'170' : u'影视/媒体/艺术/文化/出版',
'070' : u'广告/公关/市场推广/会展',
'480' : u'航空/航天',
'230' : u'旅游/酒店/餐饮服务/生活服务',
'320' : u'采掘/冶炼/矿产',
'450' : u'检测/认证',
'180' : u'印刷/包装/造纸',
'340' : u'仪器/仪表/工业自动化/电气',
'400' : u'其他',
'420' : u'网络游戏',
'040' : u'互联网/移动互联网/电子商务',
'500' : u'信托/担保/拍卖/典当',
'060' : u'通信(设备/运营/增值)',
'440' : u'外包服务',
'140' : u'保险',
'300' : u'环保',
'080' : u'房地产开发/建筑/建材/工程',
'460' : u'奢侈品/收藏品',
'290' : u'医疗设备/器械',
'330' : u'能源(电力/水利)',
'120' : u'专业服务(咨询/财会/法律/翻译等)',
'280' : u'医疗/保健/美容/卫生服务',
'260' : u'娱乐/休闲/体育',
'240' : u'百货/批发/零售',
'510' : u'租赁服务',
'100' : u'规划/设计/装潢',
'160' : u'贸易/进出口',
'490' : u'新能源',
'220' : u'办公用品及设备',
'380' : u'教育/培训/学术/科研/院校',
'270' : u'制药/生物工程',
'470' : u'工艺品/珠宝/玩具',
'390' : u'政府/公共事业/非营利机构',
'410' : u'农/林/牧/渔',
'360' : u'机械制造/机电/重工',
'350' : u'汽车/摩托车',
'370' : u'原材料及加工',
'430' : u'会计/审计',
'020' : u'计算机硬件/网络设备'
}
job_list = {
'160190' : {
'cn' :u'物流总监',
'en' :'Logistics Director'
},
'220090' : {
'cn' :u'CNC工程师',
'en' :'CNC Engineer'
},
'310010' : {
'cn' :u'志愿者',
'en' :'Volunteer'
},
'140148' : {
'cn' :u'固定收益业务',
'en' :'Fixed Income'
},
'140120' : {
'cn' :u'银行卡/电子银行/新业务开拓',
'en' :'Bank card/Electronic Banking/New Business'
},
'140060' : {
'cn' :u'客户经理/主管',
'en' :'Account Manager/Supervisor'
},
'140144' : {
'cn' :u'投资/理财顾问',
'en' :'Investment/Financial Management Advisor'
},
'140145' : {
'cn' :u'金融产品经理',
'en' :'Financial Product Manager'
},
'140146' : {
'cn' :u'基金管理',
'en' :'Fund Management'
},
'140147' : {
'cn' :u'行业研究',
'en' :'Industry Research'
},
'140141' : {
'cn' :u'融资总监',
'en' :'Treasury Director'
},
'140142' : {
'cn' :u'融资专员/助理',
'en' :'Treasury Executive/Assistant'
},
'140143' : {
'cn' :u'证券交易员',
'en' :'Securities Trader'
},
'220005' : {
'cn' :u'机械设备经理',
'en' :'Mechanical Equipment Manager'
},
'260030' : {
'cn' :u'大学教师/教授',
'en' :'Professor'
},
'170200' : {
'cn' :u'合同管理',
'en' :'Contract Management'
},
'170201' : {
'cn' :u'建筑工程安全管理',
'en' :'Construction Security Management'
},
'170202' : {
'cn' :u'智能大厦/综合布线/安防/弱电',
'en' :'Intelligent Building/Integrated Wiring/Defence&Security/Weak Current'
},
'170203' : {
'cn' :u'资料员',
'en' :'Data Management Specialist'
},
'060040' : {
'cn' :u'公关经理/主管',
'en' :'Public Relations Manager/Supervisor'
},
'170205' : {
'cn' :u'现场/施工管理',
'en' :'Construction Management'
},
'170206' : {
'cn' :u'钢结构工程师',
'en' :'Steel Structure Engineer'
},
'170207' : {
'cn' :u'爆破工程师',
'en' :'Blast Engineer'
},
'170208' : {
'cn' :u'空调工程师',
'en' :'Air Conditioner Engineer'
},
'170209' : {
'cn' :u'安装造价工程师',
'en' :'Installation Cost Engineer'
},
'220160' : {
'cn' :u'飞机设计与制造',
'en' :'Aircraft Design & Manufacture'
},
'360328' : {
'cn' :u'IT支持',
'en' :'IT Surpport'
},
'360329' : {
'cn' :u'运维总监',
'en' :'OPS Director'
},
'360240' : {
'cn' :u'三维/3D设计/制作',
'en' :'Three-dimensional/3D Design/Production'
},
'360324' : {
'cn' :u'自动化测试',
'en' :'Automation Testing Engineer'
},
'360325' : {
'cn' :u'功能测试',
'en' :'Functional Testing Engineer'
},
'360326' : {
'cn' :u'性能测试',
'en' :'Performance Testing Engineer'
},
'360327' : {
'cn' :u'测试开发',
'en' :'Test Development Engineer'
},
'360320' : {
'cn' :u'BI工程师',
'en' :'Business Intelligence Engineer'
},
'360321' : {
'cn' :u'架构师',
'en' :'Architect'
},
'360322' : {
'cn' :u'测试经理/主管',
'en' :'Testing Manager/Supervisor'
},
'360323' : {
'cn' :u'测试工程师',
'en' :'Testing Engineer'
},
'110310' : {
'cn' :u'射频工程师',
'en' :'RF Engineer'
},
'160120' : {
'cn' :u'仓库经理/主管',
'en' :'Warehouse Manager/Supervisor'
},
'290040' : {
'cn' :u'药品生产/质量管理',
'en' :'Drug Manufacturing/Quality Management'
},
'090080' : {
'cn' :u'财务分析经理/主管',
'en' :'Financial Analysis Manager/Supervisor'
},
'110250' : {
'cn' :u'移动通信工程师',
'en' :'Mobile Communication Engineer'
},
'120040' : {
'cn' :u'输电线路工程师',
'en' :'Transmission Line Engineer'
},
'030084' : {
'cn' :u'会员/VIP管理',
'en' :'VIP Member Management'
},
'190280' : {
'cn' :u'签证专员',
'en' :'Visa Specialist'
},
'250100' : {
'cn' :u'影视策划/制作/发行',
'en' :'Film Planning/Production/Distribution'
},
'170050' : {
'cn' :u'工程监理',
'en' :'Project Management'
},
'280167' : {
'cn' :u'专科医生',
'en' :'Doctor Specialist'
},
'280166' : {
'cn' :u'外科医生',
'en' :'Doctor Surgeial'
},
'280165' : {
'cn' :u'营养师',
'en' :'Dietitian'
},
'280164' : {
'cn' :u'心理医生',
'en' :'Psychologist/Psychiatrist'
},
'280163' : {
'cn' :u'麻醉医生',
'en' :'Anesthesiologist'
},
'280162' : {
'cn' :u'护士/护理人员',
'en' :'Nurse/Medical Assistant'
},
'280161' : {
'cn' :u'内科医生',
'en' :'Doctor Internal Medicine'
},
'140090' : {
'cn' :u'信贷管理/资信评估/分析',
'en' :'Loan/Credit Officer、Assets/Credit Valuation/Analyst'
},
'280169' : {
'cn' :u'中医科医生',
'en' :'Chinese Medicine Practioners'
},
'140149' : {
'cn' :u'零售客户服务',
'en' :'Retail Banking'
},
'591' : {
'cn' :u'翻译',
'en' :'Translator'
},
'593' : {
'cn' :u'酒店/餐饮/娱乐/生活服务',
'en' :'Hospitality/Restaurant/Entertainmen/Life Service'
},
'360020' : {
'cn' :u'运营经理/主管',
'en' :'Operations Manager/Supervisor'
},
'595' : {
'cn' :u'影视/媒体',
'en' :'Film Entertainment/Media'
},
'594' : {
'cn' :u'广告/会展',
'en' :'Advertising/Exhibition'
},
'597' : {
'cn' :u'教育/培训',
'en' :'Education/Training'
},
'596' : {
'cn' :u'艺术/设计',
'en' :'Art/Design'
},
'360180' : {
'cn' :u'游戏策划师',
'en' :'Game Planner'
},
'598' : {
'cn' :u'实习生/培训生/储备干部',
'en' :'Intern/Trainee/Associate Trainee'
},
'050080' : {
'cn' :u'体系工程师/审核员',
'en' :'Systems Engineer/Auditor'
},
'010101' : {
'cn' :u'投资者关系',
'en' :'Investor Relations'
},
'210170' : {
'cn' :u'工艺/制程工程师(PE)',
'en' :'PE Engineer'
},
'410009' : {
'cn' :u'大堂经理',
'en' :'Hall Manager'
},
'410008' : {
'cn' :u'综合业务专员/助理',
'en' :'Integrated Service Executive/Assistant'
},
'140050' : {
'cn' :u'融资经理/主管',
'en' :'Treasury Manager/Supervisor'
},
'410003' : {
'cn' :u'公司业务部门经理/主管',
'en' :'Corporate Banking Manager'
},
'410002' : {
'cn' :u'客户代表',
'en' :'Account Representative'
},
'410001' : {
'cn' :u'银行经理/主任',
'en' :'Bank Manager/Supervisor'
},
'260009' : {
'cn' :u'校长',
'en' :'School Principal'
},
'410007' : {
'cn' :u'综合业务经理/主管',
'en' :'Integrated Service Manager/Supervisor'
},
'410006' : {
'cn' :u'个人业务客户经理',
'en' :'Personal Banking Account Manager'
},
'410005' : {
'cn' :u'个人业务部门经理/主管',
'en' :'Personal Banking Manager/Supervisor'
},
'410004' : {
'cn' :u'公司业务客户经理',
'en' :'Corporate Banking Account Manager'
},
'410017' : {
'cn' :u'金融同业',
'en' :'Interbank'
},
'090170' : {
'cn' :u'税务专员/助理',
'en' :'Tax Executive/Assistant'
},
'080020' : {
'cn' :u'行政经理/主管/办公室主任',
'en' :'Administration Manager/Supervisor'
},
'080021' : {
'cn' :u'行政专员/助理',
'en' :'Administration Specialist/Assistant'
},
'170110' : {
'cn' :u'道路/桥梁/隧道工程技术',
'en' :'Road/Bridge/Tunnel Technology'
},
'250190' : {
'cn' :u'电话采编',
'en' :'Telephone Reporter'
},
'292010' : {
'cn' :u'航空乘务',
'en' :'Airline Crew'
},
'220230' : {
'cn' :u'工业工程师(IE)',
'en' :'IE Engineer'
},
'250210' : {
'cn' :u'印刷排版/制版',
'en' :'Layout Designer'
},
'290096' : {
'cn' :u'化学分析测试员',
'en' :'Chemical Analyst'
},
'599' : {
'cn' :u'交通/运输',
'en' :'Traffic Service'
},
'190020' : {
'cn' :u'餐饮/娱乐管理',
'en' :'Restaurant & Food / Entertainment Services Management'
},
'290094' : {
'cn' :u'医疗器械研发',
'en' :'Medical Equipment R&D'
},
'140080' : {
'cn' :u'风险管理/控制',
'en' :'Risk Management/Control'
},
'160220' : {
'cn' :u'供应链专员/助理',
'en' :'Supply Chain Specialist/Assistant'
},
'070162' : {
'cn' :u'猎头顾问/助理',
'en' :'Headhunter/Assistant'
},
'070161' : {
'cn' :u'企业培训师/讲师',
'en' :'Staff Trainer'
},
'360050' : {
'cn' :u'产品专员/助理',
'en' :'Product Specialist/Assistant'
},
'210080' : {
'cn' :u'产品管理',
'en' :'Product Management'
},
'020020' : {
'cn' :u'销售经理/主管',
'en' :'Sales Manager/Supervisor'
},
'470014' : {
'cn' :u'广告/会展项目管理',
'en' :'Advertising/Exhibition Project Management'
},
'020025' : {
'cn' :u'区域销售经理/主管',
'en' :'Regional Sales Manager/Supervisor'
},
'290099' : {
'cn' :u'医药招商专员/助理',
'en' :'Pharmaceutical Business Development Specialist/Assistant'
},
'250070' : {
'cn' :u'校对/录入',
'en' :'Proofreading/Copy Entry'
},
'130010' : {
'cn' :u'企管顾问/专业顾问/策划师',
'en' :'Business Management/Consultant/Adviser/Professional Planner'
},
'310020' : {
'cn' :u'实习生',
'en' :'Intern'
},
'450003' : {
'cn' :u'买手',
'en' :'Buyer'
},
'180060' : {
'cn' :u'西班牙语翻译',
'en' :'Spanish Translator'
},
'070040' : {
'cn' :u'人力资源专员/助理',
'en' :'HR Specialist/Assistant'
},
'630020' : {
'cn' :u'运维经理/主管',
'en' :'OPS Manager/Supervisor'
},
'270060' : {
'cn' :u'律师助理',
'en' :'Paralegal'
},
'170180' : {
'cn' :u'施工员',
'en' :'Construction Worker'
},
'640030' : {
'cn' :u'拍卖师',
'en' :'Auctioneer'
},
'090100' : {
'cn' :u'成本经理/主管',
'en' :'Cost Accounting Manager/Supervisor'
},
'170060' : {
'cn' :u'给排水/制冷暖通',
'en' :'Drainage / refrigeration HVAC'
},
'640010' : {
'cn' :u'珠宝/收藏品鉴定',
'en' :'Jewellery /Collection Appraiser'
},
'060150' : {
'cn' :u'广告创意/设计经理/主管',
'en' :'Advertising Creative Manager/Supervisor'
},
'190230' : {
'cn' :u'宾客服务经理',
'en' :'Guest Service Manager'
},
'160095' : {
'cn' :u'供应链经理/主管',
'en' :'Supply Chain Executive/Manager/Director'
},
'010102' : {
'cn' :u'分公司/代表处负责人',
'en' :'Head of Branch Company'
},
'010103' : {
'cn' :u'企业秘书/董事会秘书',
'en' :'Corporate/Board Secretary'
},
'060010' : {
'cn' :u'市场总监',
'en' :'Marketing Director'
},
'260073' : {
'cn' :u'培训助理/助教',
'en' :'Training Assistant'
},
'190220' : {
'cn' :u'管家部经理/主管',
'en' :'Housekeeping Manager'
},
'260071' : {
'cn' :u'培训师/讲师',
'en' :'Teacher/Trainer'
},
'260070' : {
'cn' :u'体育教师/教练',
'en' :'Physical Teacher/Coach'
},
'260077' : {
'cn' :u'教育产品开发',
'en' :'Education Product Development'
},
'260075' : {
'cn' :u'培训策划',
'en' :'Training Planning'
},
'260074' : {
'cn' :u'培训/招生/课程顾问',
'en' :'Enrollment/Course Consultant'
},
'180072' : {
'cn' :u'意大利语翻译',
'en' :'Italian Translator'
},
'140020' : {
'cn' :u'行长/副行长',
'en' :'President/Vice-President/Branch Manager'
},
'160250' : {
'cn' :u'海关事务管理',
'en' :'Customs Affairs Management'
},
'220120' : {
'cn' :u'锅炉工程师/技师',
'en' :'Boiler Engineer'
},
'050060' : {
'cn' :u'环境/健康/安全(EHS)经理/主管',
'en' :'EHS Manager/Supervisor'
},
'050100' : {
'cn' :u'故障分析工程师',
'en' :'Failure Analysis Engineer'
},
'360090' : {
'cn' :u'网络推广专员',
'en' :'Online Marketing Specialist'
},
'210250' : {
'cn' :u'包装工程师',
'en' :'Packaging Engineer'
},
'010070' : {
'cn' :u'部门/事业部管理',
'en' :'Department Management'
},
'010121' : {
'cn' :u'首席人力资源官CHO/HRVP',
'en' :'Chief Human Resource Officer/Vice President'
},
'170204' : {
'cn' :u'开发报建专员/助理',
'en' :'Applying for Construction Specialist/Assistant'
},
'110170' : {
'cn' :u'电气工程师',
'en' :'Electrical Engineer'
},
'180010' : {
'cn' :u'英语翻译',
'en' :'English Translator'
},
'110350' : {
'cn' :u'自动控制工程师/技术员',
'en' :'Autocontrol Engineer/Technician'
},
'510005' : {
'cn' :u'房地产资产管理',
'en' :'Real Estate Asset Management'
},
'360200' : {
'cn' :u'Flash设计/开发',
'en' :'Flash Designer/Developer'
},
'110290' : {
'cn' :u'通信项目管理',
'en' :'Communication Project Management'
},
'280150' : {
'cn' :u'医药代表',
'en' :'Medical Representative'
},
'460001' : {
'cn' :u'公关总监',
'en' :'Public Relations Director'
},
'210190' : {
'cn' :u'制造工程师',
'en' :'Manufacturing Engineer'
},
'340020' : {
'cn' :u'安全防护/安全管理',
'en' :'Safety Protection'
},
'120090' : {
'cn' :u'核力/火力工程师',
'en' :'Nuclear Power/Fire Engineer'
},
'060070' : {
'cn' :u'会务/会展经理/主管',
'en' :'Exhibition/Event Manager/Supervisor'
},
'470010' : {
'cn' :u'会展策划/设计',
'en' :'Exhibition Planning /Design'
},
'110190' : {
'cn' :u'工程与项目实施',
'en' :'Engineering and Project Implementation'
},
'140110' : {
'cn' :u'柜员/银行会计',
'en' :'Bank Teller/Bank Accountan'
},
'500005' : {
'cn' :u'固废处理工程师',
'en' :'Solid Waste Treatment Engineer'
},
'611' : {
'cn' :u'电力/能源/矿产/地质勘查',
'en' :'Electricity/Energy/Mining/Geological Survey'
},
'110050' : {
'cn' :u'电声/音响工程师/技术员',
'en' :'Electroacoustics Engineer'
},
'280172' : {
'cn' :u'儿科医生',
'en' :'Pediatrician'
},
'490003' : {
'cn' :u'配色技术员',
'en' :'Color Matcher (Technician)'
},
'100330' : {
'cn' :u'网络工程师',
'en' :'Network Engineer'
},
'090201' : {
'cn' :u'会计助理/文员',
'en' :'Accounting Clerk'
},
'090200' : {
'cn' :u'财务顾问',
'en' :'Finance Consultant'
},
'090203' : {
'cn' :u'资产/资金管理',
'en' :'Treasury Manager/Supervisor'
},
'090202' : {
'cn' :u'出纳员',
'en' :'Cashier'
},
'290092' : {
'cn' :u'药品市场推广专员/助理',
'en' :'Pharmaceutical Promotion Specialist/Assistant'
},
'290093' : {
'cn' :u'医药销售经理/主管',
'en' :'Pharmaceutical Sales Manager'
},
'290090' : {
'cn' :u'临床数据分析员',
'en' :'Clinical Data Analyst'
},
'290091' : {
'cn' :u'药品市场推广经理/主管',
'en' :'Pharmaceutical Promotion Manager/Supervisor'
},
'220190' : {
'cn' :u'食品机械',
'en' :'Food Machinery'
},
'290097' : {
'cn' :u'医疗器械生产/质量管理',
'en' :'Medical Equipment Manufacturing/Quality Control'
},
'450009' : {
'cn' :u'业务跟单员',
'en' :'Merchandiser'
},
'290095' : {
'cn' :u'医疗器械注册',
'en' :'Medical Equipment Registration'
},
'450007' : {
'cn' :u'商务专员/助理',
'en' :'Business Specialist/Assistant'
},
'450006' : {
'cn' :u'国内贸易专员/助理',
'en' :'Domestic Trade Specialist/Assistant'
},
'290098' : {
'cn' :u'医疗器械销售代表',
'en' :'Medical Equipment Sales'
},
'100235' : {
'cn' :u'计量/标准化工程师',
'en' :'Measure/Standardization Engineer'
},
'470012' : {
'cn' :u'制作执行',
'en' :'Event executive'
},
'220050' : {
'cn' :u'机电工程师',
'en' :'Electrical and Mechanical Engineers'
},
'100230' : {
'cn' :u'网络信息安全工程师',
'en' :'Network and Information Security Engineer'
},
'470011' : {
'cn' :u'婚礼策划服务',
'en' :'Wedding Planning Service'
},
'559' : {
'cn' :u'金融产品/行业研究/风控',
'en' :'Financial Product/Industry Research/Risk Management'
},
'558' : {
'cn' :u'业务服务',
'en' :'Financial Service'
},
'555' : {
'cn' :u'物业管理',
'en' :'Property Management'
},
'554' : {
'cn' :u'土木/土建规划设计',
'en' :'Civil Planning/Design'
},
'557' : {
'cn' :u'保险',
'en' :'Insurance'
},
'556' : {
'cn' :u'银行',
'en' :'Banking'
},
'551' : {
'cn' :u'硬件开发',
'en' :'Hardware Development'
},
'550' : {
'cn' :u'电信/通信技术',
'en' :'Telecommunication/Communication Techonlogy'
},
'553' : {
'cn' :u'建筑工程',
'en' :'Construction'
},
'110220' : {
'cn' :u'有线传输工程师',
'en' :'Wired Transmission Engineer'
},
'490002' : {
'cn' :u'涂料研发工程师',
'en' :'R&D Chemist Scientist'
},
'490001' : {
'cn' :u'化工实验室研究员/技术员',
'en' :'Chemical Lab Scientist / Technician'
},
'490007' : {
'cn' :u'造纸研发',
'en' :'Paper Making Scientist'
},
'490006' : {
'cn' :u'食品/饮料研发',
'en' :'Food / Beverage Scientist'
},
'490005' : {
'cn' :u'化妆品研发',
'en' :'Cosmetics Scientist'
},
'490004' : {
'cn' :u'塑料工程师',
'en' :'Plastics Engineer'
},
'500001' : {
'cn' :u'水处理工程师',
'en' :'Water Treatment Engineer'
},
'080065' : {
'cn' :u'后勤/总务',
'en' :'Logistics/General Affairs'
},
'500003' : {
'cn' :u'环保检测',
'en' :'Environmental Inspector'
},
'500002' : {
'cn' :u'环境评价工程师',
'en' :'Environmental Assessment Engineer'
},
'080060' : {
'cn' :u'图书/资料/档案管理',
'en' :'Document Keeper'
},
'080061' : {
'cn' :u'助理/秘书/文员',
'en' :'Executive Assistant/Secretary'
},
'360270' : {
'cn' :u'用户研究总监/经理',
'en' :'User Research Director/Manager'
},
'080063' : {
'cn' :u'电脑操作/打字/录入员',
'en' :'Computer Operator/Typist'
},
'510014' : {
'cn' :u'成本总监',
'en' :'Cost Accounting Director'
},
'160070' : {
'cn' :u'采购经理/主管',
'en' :'Purchasing Executive/Manager/Director'
},
'210030' : {
'cn' :u'生产项目工程师',
'en' :'Production Project Engineer'
},
'250250' : {
'cn' :u'记者/采编',
'en' :'Reporter'
},
'180073' : {
'cn' :u'葡萄牙语翻译',
'en' :'Portuguese Translator'
},
'150130' : {
'cn' :u'契约管理',
'en' :'Policy Management'
},
'080062' : {
'cn' :u'前台/总机/接待',
'en' :'Receptionist'
},
'150132' : {
'cn' :u'行业研究',
'en' :'Industry Research'
},
'150070' : {
'cn' :u'保险精算师',
'en' :'Actuary'
},
'220270' : {
'cn' :u'轨道交通工程师/技师',
'en' :'Railway Engineer/Technician'
},
'500006' : {
'cn' :u'废气处理工程师',
'en' :'Waste Gas Treatment Engineer'
},
'120160' : {
'cn' :u'冶金工程师',
'en' :'Metallurgical Engineer'
},
'040040' : {
'cn' :u'项目专员/助理',
'en' :'Project Specialist/Assistant'
},
'370008' : {
'cn' :u'医药销售代表',
'en' :'Pharmaceutical Sales Representative'
},
'360150' : {
'cn' :u'UE交互设计师',
'en' :'UE Interaction Designer'
},
'270010' : {
'cn' :u'律师',
'en' :'Lawyer'
},
'370001' : {
'cn' :u'销售代表',
'en' :'Sales Representative'
},
'370002' : {
'cn' :u'渠道/分销专员',
'en' :'Channel/Distribution Representative'
},
'370003' : {
'cn' :u'客户代表',
'en' :'Sales Account Representative'
},
'370004' : {
'cn' :u'销售工程师',
'en' :'Sales Engineer'
},
'370005' : {
'cn' :u'电话销售',
'en' :'Telesales'
},
'370006' : {
'cn' :u'经销商',
'en' :'Distributor'
},
'370007' : {
'cn' :u'大客户销售',
'en' :'Key Account Sales'
},
'240040' : {
'cn' :u'服装/纺织设计',
'en' :'Fashion/Textiles Designer'
},
'480001' : {
'cn' :u'后期制作',
'en' :'Postproduction'
},
'360010' : {
'cn' :u'运营总监',
'en' :'Operations Director'
},
'320040' : {
'cn' :u'农艺师',
'en' :'Agronomist'
},
'240020' : {
'cn' :u'美术/图形设计',
'en' :'Art/Graphic Design'
},
'170140' : {
'cn' :u'物业管理经理/主管',
'en' :'Property Management Manager/Supervisor'
},
'130050' : {
'cn' :u'调研员',
'en' :'Researcher'
},
'170020' : {
'cn' :u'土木/土建工程师',
'en' :'Civil Engineer'
},
'510017' : {
'cn' :u'房地产项目运营',
'en' :'Real Estate Project Operation'
},
'510016' : {
'cn' :u'房地产项目管理',
'en' :'Real Estate Project Management'
},
'510015' : {
'cn' :u'成本经理/主管',
'en' :'Cost Accounting Manager/Supervisor'
},
'500004' : {
'cn' :u'水质检测员',
'en' :'Water Quality Inspector'
},
'090020' : {
'cn' :u'财务总监',
'en' :'Chief Financial Officer'
},
'510011' : {
'cn' :u'规划设计总监',
'en' :'Planning Director'
},
'510010' : {
'cn' :u'配套工程师',
'en' :'Real Estate Supporting Engineer'
},
'090140' : {
'cn' :u'财务助理',
'en' :'Finance Assistant'
},
'510019' : {
'cn' :u'规划设计经理/主管',
'en' :'Planning Manager/Supervisor'
},
'160210' : {
'cn' :u'供应链总监',
'en' :'Supply Chain Director'
},
'060110' : {
'cn' :u'促销经理/主管',
'en' :'Promotion Manager/ Supervisor'
},
'190270' : {
'cn' :u'行程管理/计调',
'en' :'Travel Management'
},
'010020' : {
'cn' :u'首席运营官COO',
'en' :'Chief Operating Officer/COO'
},
'260055' : {
'cn' :u'音乐教师',
'en' :'Music Teacher'
},
'020070' : {
'cn' :u'售前支持经理/主管',
'en' :'Pre-Sales Support Manager/Supervisor'
},
'360120' : {
'cn' :u'电子商务专员',
'en' :'E-Commerce Specialist'
},
'180050' : {
'cn' :u'俄语翻译',
'en' :'Russian Translator'
},
'210210' : {
'cn' :u'工厂经理/厂长',
'en' :'Plant/Factory Manager'
},
'030085' : {
'cn' :u'客户服务经理/主管',
'en' :'Customer Service Manager/Specialist'
},
'050020' : {
'cn' :u'质量管理/测试主管(QA/QC主管)',
'en' :'QA/QC Supervisor'
},
'030086' : {
'cn' :u'网络/在线客服',
'en' :'Online Customer Service'
},
'030081' : {
'cn' :u'客户服务专员/助理',
'en' :'Customer Service Specialist/Assistant'
},
'030083' : {
'cn' :u'投诉处理专员',
'en' :'Complaint Coordinator'
},
'030082' : {
'cn' :u'咨询热线/呼叫中心人员',
'en' :'Hotline/Call Center Staff'
},
'300021' : {
'cn' :u'公务员/事业单位人员',
'en' :'Civil Servant'
},
'300020' : {
'cn' :u'科研人员',
'en' :'Researchers'
},
'150131' : {
'cn' :u'再保险',
'en' :'Reinsurance'
},
'450004' : {
'cn' :u'供应商开发',
'en' :'Supplier Development'
},
'110010' : {
'cn' :u'电路工程师/技术员',
'en' :'Electronic Circuit Engineer'
},
'060020' : {
'cn' :u'市场经理/主管',
'en' :'Marketing Manager/Supervisor'
},
'280010' : {
'cn' :u'医院管理人员',
'en' :'Hospital Management'
},
'190190' : {
'cn' :u'票务服务',
'en' :'Ticket Service'
},
'140010' : {
'cn' :u'证券/外汇/期货经纪人',
'en' :'Securities/Foreign Exchange/Futures/Brokerage'
},
'100060' : {
'cn' :u'项目经理/主管',
'en' :'Project Manager/Supervisor'
},
'140153' : {
'cn' :u'机构客户服务',
'en' :'Institutional Investor Service'
},
'140152' : {
'cn' :u'证券投资',
'en' :'Securities Investment/Portfolio Investment'
},
'140151' : {
'cn' :u'经纪业务',
'en' :'Brokerage'
},
'140150' : {
'cn' :u'合规稽查',
'en' :'Compliance And Audit'
},
'140157' : {
'cn' :u'信托业务',
'en' :'Trust'
},
'140155' : {
'cn' :u'房地产信托/物业投资',
'en' :'Real Estate Investment Trust/REITS'
},
'140154' : {
'cn' :u'资产管理',
'en' :'Asset Management'
},
'220010' : {
'cn' :u'机械工程师',
'en' :'Mechanical Engineer'
},
'500007' : {
'cn' :u'EHS管理',
'en' :'EHS Management'
},
'100370' : {
'cn' :u'项目总监',
'en' :'Project Director'
},
'110120' : {
'cn' :u'光源与照明工程师',
'en' :'Light Source and Lighting Engineer'
},
'220150' : {
'cn' :u'船舶设计与制造',
'en' :'Watercraft Design & Manufacture'
},
'160010' : {
'cn' :u'外贸经理/主管',
'en' :'Trading Manager/Supervisor'
},
'170214' : {
'cn' :u'架线和管道工程技术',
'en' :'Pipeline Engineering Technology'
},
'170213' : {
'cn' :u'软装设计师',
'en' :'Soft outfit Designer'
},
'170212' : {
'cn' :u'水利/港口工程技术',
'en' :'Water Conservancy/Port Engineering Technology'
},
'170211' : {
'cn' :u'结构工程师',
'en' :'Structural Engineer'
},
'170210' : {
'cn' :u'土建造价工程师',
'en' :'Civil Engineering Cost Engineer '
},
'360230' : {
'cn' :u'音效设计师',
'en' :'Sound Effects Designer'
},
'360338' : {
'cn' :u'配置管理经理/主管',
'en' :'Configuration Management Manager/Supervisor'
},
'110260' : {
'cn' :u'电信网络工程师',
'en' :'Telecommunication Network Engineer'
},
'360333' : {
'cn' :u'数据库开发工程师',
'en' :'Database Developer'
},
'640020' : {
'cn' :u'担保业务',
'en' :'Guarantee Business'
},
'360331' : {
'cn' :u'系统工程师',
'en' :'System Engineer'
},
'360330' : {
'cn' :u'运维开发',
'en' :'OPS Developer'
},
'360337' : {
'cn' :u'配置管理工程师',
'en' :'Configuration Management Engineer'
},
'360336' : {
'cn' :u'数据挖掘工程师',
'en' :'Data Mining Engineer'
},
'360335' : {
'cn' :u'移动前端开发工程师',
'en' :'Mobile Front-end Developer'
},
'360334' : {
'cn' :u'算法工程师',
'en' :'Algorithm Engineer'
},
'290070' : {
'cn' :u'环保工程师',
'en' :'Environmental Engineer'
},
'110065' : {
'cn' :u'嵌入式硬件开发(主板机…)',
'en' :'Embedded Hardware Engineer(PCB…)'
},
'190100' : {
'cn' :u'导游/旅行顾问',
'en' :'Tour Guide/Travel Consultant'
},
'110060' : {
'cn' :u'家用电器/数码产品研发',
'en' :'Household Electronics/Digital Products Development'
},
'160040' : {
'cn' :u'报关员',
'en' :'Document Management/Customs Agent'
},
'110300' : {
'cn' :u'通信标准化工程师',
'en' :'Communication Standardization Engineer'
},
'300010' : {
'cn' :u'科研管理人员',
'en' :'Research Management'
},
'340050' : {
'cn' :u'地质勘查/选矿/采矿',
'en' :'Geological Exploration'
},
'250110' : {
'cn' :u'导演/编导',
'en' :'Director/Choreographer'
},
'100090' : {
'cn' :u'软件工程师',
'en' :'Software Engineer'
},
'280110' : {
'cn' :u'药库主任/药剂师',
'en' :'Drug Storehouse Director/Pharmacist'
},
'613' : {
'cn' :u'化工',
'en' :'Chemical'
},
'370013' : {
'cn' :u'业务拓展专员/助理',
'en' :'BD Specialist/Assistant'
},
'280050' : {
'cn' :u'医药学检验',
'en' :'Clinical Laboratory'
},
'610' : {
'cn' :u'医院/医疗/护理',
'en' :'Hospital/Medicine/Nursing'
},
'100300' : {
'cn' :u'网站营运管理',
'en' :'Web Operations Management'
},
'370012' : {
'cn' :u'区域销售专员/助理',
'en' :'Regional Sales Specialist/Assistant'
},
'585' : {
'cn' :u'印刷/包装',
'en' :'Packaging/Printing'
},
'370011' : {
'cn' :u'网络/在线销售',
'en' :'Online Sales'
},
'292070' : {
'cn' :u'船舶乘务',
'en' :'Shipping Service'
},
'616' : {
'cn' :u'农/林/牧/渔',
'en' :'Agriculture/Forestry/Animal Husbandry/Fishing'
},
'060205' : {
'cn' :u'市场通路经理/主管',
'en' :'Trade Marketing Manager/Supervisor'
},
'020100' : {
'cn' :u'商务经理/主管',
'en' :'Business Manager/Supervisor'
},
'589' : {
'cn' :u'咨询/调研',
'en' :'Consultant/Research'
},
'614' : {
'cn' :u'环境科学/环保',
'en' :'Environmental Science/Environmental'
},
'210060' : {
'cn' :u'生产设备管理',
'en' :'Production Equipment Management'
},
'615' : {
'cn' :u'公务员/公益事业/科研',
'en' :'Official/Public Service/Science Research'
},
'210140' : {
'cn' :u'生产经理/车间主任',
'en' :'Production Manager/Workshop Supervisor'
},
'250050' : {
'cn' :u'发行管理',
'en' :'Distribution Management'
},
'360340' : {
'cn' :u'软件测试',
'en' :'Software Testing'
},
'160140' : {
'cn' :u'货运代理',
'en' :'Freight Forwarder'
},
'250200' : {
'cn' :u'电分操作员',
'en' :'Operator-Colour Distinguishing'
},
'360332' : {
'cn' :u'数据分析师',
'en' :'Data Analyst'
},
'170100' : {
'cn' :u'工程预结算管理',
'en' :'Construction Budget/Cost Management'
},
'090180' : {
'cn' :u'投融资经理/主管',
'en' :'Investment and Finance Manager/Supervisor'
},
'220200' : {
'cn' :u'纺织机械',
'en' :'Textile Machinery'
},
'440006' : {
'cn' :u'服装/纺织/皮革工艺师',
'en' :'Apparels/Textiles/Leather Goods Technologist'
},
'150040' : {
'cn' :u'保险代理人/经纪人/客户经理',
'en' :'Insurance Agent/Broker/Account Manager'
},
'120130' : {
'cn' :u'制冷/暖通',
'en' :'HVAC/Refrigeration'
},
'090060' : {
'cn' :u'会计/会计师',
'en' :'Accountant'
},
'280168' : {
'cn' :u'牙科医生',
'en' :'Dentist'
},
'250160' : {
'cn' :u'主持人/播音员',
'en' :'Host/Broadcaster'
},
'190030' : {
'cn' :u'大堂经理/领班',
'en' :'Lobby Manager/Supervisor'
},
'170080' : {
'cn' :u'房地产评估',
'en' :'Real Estate Appraisal'
},
'510012' : {
'cn' :u'规划设计师',
'en' :'Planning Designer'
},
'290020' : {
'cn' :u'临床研究员',
'en' :'Clinical Researcher'
},
'592' : {
'cn' :u'旅游/出入境服务',
'en' :'Tourism/Exit and Entry Service'
},
'360040' : {
'cn' :u'产品总监',
'en' :'Product Director'
},
'360160' : {
'cn' :u'运维工程师',
'en' :'Maintenance Engineer'
},
'130020' : {
'cn' :u'咨询总监',
'en' :'Advisory Director'
},
'170070' : {
'cn' :u'房地产项目策划经理/主管',
'en' :'Real Estate Planning Manager/Supervisor'
},
'070070' : {
'cn' :u'培训经理/主管',
'en' :'Training Manager/Supervisor'
},
'656' : {
'cn' :u'汽车制造',
'en' :'Automobile Manufacture'
},
'657' : {
'cn' :u'前端开发',
'en' :'Front-end Development'
},
'655' : {
'cn' :u'医药注册/推广',
'en' :'Medical Registration/Marketing'
},
'652' : {
'cn' :u'机械设计/制造',
'en' :'Mechanical Design/Production'
},
'090110' : {
'cn' :u'成本管理员',
'en' :'Capital Manager'
},
'360190' : {
'cn' :u'游戏界面设计师',
'en' :'Game UI Designer'
},
'320030' : {
'cn' :u'动物营养/饲料研发',
'en' :'Animal nutrition/Feed Development'
},
'210090' : {
'cn' :u'技术或工艺设计经理',
'en' :'Technology or Process Design Manager'
},
'170170' : {
'cn' :u'公路桥梁预算师',
'en' :'Road and Bridge Estimator'
},
'653' : {
'cn' :u'机械设备/维修',
'en' :'Mechanical Maintenance'
},
'190240' : {
'cn' :u'预定部主管',
'en' :'Reservation Supervisor'
},
'060060' : {
'cn' :u'市场调研与分析',
'en' :'Market Research and Analysis'
},
'110110' : {
'cn' :u'电子/电器维修',
'en' :'Electronics/Electronics Repair'
},
'190180' : {
'cn' :u'酒店/宾馆营销',
'en' :'Hotel Sales'
},
'520003' : {
'cn' :u'物业招商/租赁/租售',
'en' :'Property Lease/Rent'
},
'520002' : {
'cn' :u'高级物业顾问/物业顾问',
'en' :'Senior Property Advisor/Property Advisor'
},
'520001' : {
'cn' :u'物业管理专员/助理',
'en' :'Property Management'
},
'160240' : {
'cn' :u'集装箱业务',
'en' :'Container Operator'
},
'520005' : {
'cn' :u'物业机电工程师',
'en' :'Property Mechanical Engineer'
},
'520004' : {
'cn' :u'物业设施管理人员',
'en' :'Property Establishment Management'
},
'100130' : {
'cn' :u'高级硬件工程师',
'en' :'Senior Hardware Engineer'
},
'030070' : {
'cn' :u'售后支持工程师',
'en' :'Sales Support Engineer'
},
'060090' : {
'cn' :u'产品经理/主管',
'en' :'Product Manager/Supervisor'
},
'050110' : {
'cn' :u'环境/健康/安全(EHS)工程师',
'en' :'EHS Engineer'
},
'220110' : {
'cn' :u'夹具工程师/技师',
'en' :'Clamp Engineer'
},
'010060' : {
'cn' :u'合伙人',
'en' :'Partner'
},
'380009' : {
'cn' :u'业务分析专员/助理',
'en' :'Business Analysis Specialist/Assistant'
},
'250010' : {
'cn' :u'作家/编剧/撰稿人',
'en' :'Writer/Screenwriter'
},
'659' : {
'cn' :u'配置管理',
'en' :'Configuration Management'
},
'210240' : {
'cn' :u'生产总监',
'en' :'Production Director'
},
'100290' : {
'cn' :u'多媒体/游戏开发工程师',
'en' :'Multimedia/Game Development Engineer'
},
'292050' : {
'cn' :u'列车乘务',
'en' :'Train Crew'
},
'110340' : {
'cn' :u'激光/光电子技术',
'en' :'Laser/Optoelectronics Technology'
},
'290030' : {
'cn' :u'药品研发',
'en' :'Medicine R&D'
},
'140160' : {
'cn' :u'PE/VC投资',
'en' :'Private Equity/Venture Capital'
},
'350030' : {
'cn' :u'ERP技术开发',
'en' :'ERP R&D'
},
'340018' : {
'cn' :u'列车车长/司机',
'en' :'Train Driver'
},
'100080' : {
'cn' :u'系统分析师',
'en' :'System Analyst'
},
'240030' : {
'cn' :u'工业/产品设计',
'en' :'Industrial/Product Design'
},
'340015' : {
'cn' :u'飞机机长/副机长',
'en' :'Flight Captain'
},
'110020' : {
'cn' :u'电子/电器工程师',
'en' :'Electronic/Electrical Equipment Engineer'
},
'100340' : {
'cn' :u'网页设计/制作/美工',
'en' :'Web Designer/Production/Creative'
},
'260010' : {
'cn' :u'教学/教务管理人员',
'en' :'Teaching/Educational Administration'
},
'140040' : {
'cn' :u'投资银行业务',
'en' :'Investment Banking'
},
'220180' : {
'cn' :u'包装/印刷',
'en' :'Packaging/Printing'
},
'100180' : {
'cn' :u'ERP实施顾问',
'en' :'ERP Implementation'
},
'470008' : {
'cn' :u'美术指导',
'en' :'Art Director'
},
'220060' : {
'cn' :u'精密机械',
'en' :'Precision Machinery'
},
'470004' : {
'cn' :u'广告创意/设计师',
'en' :'Advertising Designer'
},
'470006' : {
'cn' :u'广告/会展业务拓展',
'en' :'Advertising/Exhibition BD'
},
'470001' : {
'cn' :u'广告客户总监',
'en' :'Advertising Account Director'
},
'470003' : {
'cn' :u'广告创意/设计总监',
'en' :'Advertising Creative Director'
},
'470002' : {
'cn' :u'广告客户专员',
'en' :'Advertising Account Executive'
},
'360300' : {
'cn' :u'WEB前端开发工程师',
'en' :'WEB Front-end Developer'
},
'510003' : {
'cn' :u'房地产项目招投标',
'en' :'Real Estate Tender /Bidding'
},
'260058' : {
'cn' :u'文科老师',
'en' :'Liberal Arts Teacher'
},
'510004' : {
'cn' :u'房地产投资分析',
'en' :'Real Estate Investment Analysis'
},
'292030' : {
'cn' :u'船长/副船长',
'en' :'Fleet Captain'
},
'120100' : {
'cn' :u'空调/热能工程师',
'en' :'Air-Conditioning/Energy Engineers'
},
'150010' : {
'cn' :u'稽核/法律/合规',
'en' :'Compliance/Audit'
},
'290100' : {
'cn' :u'政府事务管理',
'en' :'Government Affairs'
},
'290101' : {
'cn' :u'招投标管理',
'en' :'Tendering Coordinator'
},
'290102' : {
'cn' :u'医药招商经理/主管',
'en' :'Pharmaceutical Business Development Manager/Supervisor'
},
'566' : {
'cn' :u'生产管理/维修',
'en' :'Production Management/Maintenance'
},
'110330' : {
'cn' :u'版图设计工程师',
'en' :'Engineer Layout Design Engineer'
},
'110180' : {
'cn' :u'电子技术研发工程师',
'en' :'Electronics Development Engineer'
},
'210020' : {
'cn' :u'总工程师/副总工程师',
'en' :'Chief Engineer/Deputy Chief Engineer'
},
'360260' : {
'cn' :u'交互设计总监/经理',
'en' :'Interaction Design Director/Manager'
},
'291040' : {
'cn' :u'品类管理',
'en' :'Category Management'
},
'110230' : {
'cn' :u'电信交换工程师',
'en' :'Telecommunication Exchange Engineer'
},
'250120' : {
'cn' :u'摄影/摄像',
'en' :'Photographer/Videographer'
},
'340080' : {
'cn' :u'其他',
'en' :'Others'
},
'220240' : {
'cn' :u'机械结构工程师',
'en' :'Mechanical Structural Engineer'
},
'350040' : {
'cn' :u'需求分析师',
'en' :'Demand Analyst'
},
'450002' : {
'cn' :u'采购专员/助理',
'en' :'Purchasing Specialist/Assistant'
},
'250240' : {
'cn' :u'印刷机械机长',
'en' :'Printing Machine Operator'
},
'210070' : {
'cn' :u'安全/健康/环境管理',
'en' :'Safety/Health/Environmental Management'
},
'120070' : {
'cn' :u'电力维修技术员',
'en' :'Electricity Maintenance Technician'
},
'150120' : {
'cn' :u'储备经理人',
'en' :'Agency Management Associate'
},
'150080' : {
'cn' :u'业务经理/主管',
'en' :'Business Manager/Supervisor'
},
'100040' : {
'cn' :u'信息技术专员',
'en' :'Information Technology Specialist'
},
'120170' : {
'cn' :u'光伏技术工程师',
'en' :'Photovoltaic Technology Engineer'
},
'140130' : {
'cn' :u'国际结算/外汇交易',
'en' :'International Account Settlement/Foreign Exchange'
},
'669' : {
'cn' :u'其他',
'en' :'Others'
},
'668' : {
'cn' :u'写作/采编/出版',
'en' :'Writing/Newspaper/Publishing'
},
'667' : {
'cn' :u'项目管理/项目协调',
'en' :'Project Management'
},
'666' : {
'cn' :u'质量管理/安全防护',
'en' :'Quality Management/Safety Protection'
},
'665' : {
'cn' :u'房地产交易/中介',
'en' :'Real Estate Agent/Broker'
},
'664' : {
'cn' :u'房地产规划/开发',
'en' :'Real Estate Development'
},
'663' : {
'cn' :u'建筑装潢',
'en' :'Architectural Decoration'
},
'662' : {
'cn' :u'IT运维/技术支持',
'en' :'IT Operation/Technical Support'
},
'661' : {
'cn' :u'IT项目管理',
'en' :'IT Project Management'
},
'660' : {
'cn' :u'IT管理',
'en' :'IT Management'
},
'410020' : {
'cn' :u'信用卡中心',
'en' :'Credit Card Center'
},
'410022' : {
'cn' :u'客户总监',
'en' :'Account Director'
},
'130060' : {
'cn' :u'培训师',
'en' :'Trainers'
},
'210110' : {
'cn' :u'生产质量管理',
'en' :'Production Quality Management'
},
'240050' : {
'cn' :u'服装打样/制版',
'en' :'Sample Production'
},
'170130' : {
'cn' :u'室内装潢设计',
'en' :'Interior Design'
},
'640040' : {
'cn' :u'典当业务',
'en' :'Pawn Business'
},
'390001' : {
'cn' :u'硬件维护工程师',
'en' :'Hardware maintenance engineer'
},
'170030' : {
'cn' :u'建筑设计师',
'en' :'Architectural Designer'
},
'210050' : {
'cn' :u'生产物料管理(PMC)',
'en' :'Production Material Control(PMC)'
},
'542' : {
'cn' :u'后端开发',
'en' :'Back-end Development'
},
'543' : {
'cn' :u'IT质量管理',
'en' :'IT QA'
},
'540' : {
'cn' :u'行政/后勤/文秘',
'en' :'Administration'
},
'546' : {
'cn' :u'产品',
'en' :'Product'
},
'547' : {
'cn' :u'UI/UE/平面设计',
'en' :'UI/UE/Graphic Design'
},
'545' : {
'cn' :u'运营',
'en' :'Operations'
},
'090150' : {
'cn' :u'统计员',
'en' :'Statistician'
},
'549' : {
'cn' :u'电子/电器/半导体/仪器',
'en' :'Electronics/Wiring/Semiconductor/Instrument'
},
'250230' : {
'cn' :u'调墨技师',
'en' :'Ink Technician'
},
'160200' : {
'cn' :u'物流专员/助理',
'en' :'Logistics Specialist/Assistant'
},
'010010' : {
'cn' :u'首席执行官CEO/总裁/总经理',
'en' :'CEO/President/General Manager'
},
'020040' : {
'cn' :u'渠道/分销总监',
'en' :'Channel/Distribution Director'
},
'050010' : {
'cn' :u'质量管理/测试经理(QA/QC经理)',
'en' :'QA/QC Manager'
},
'360130' : {
'cn' :u'网店店长/客服管理',
'en' :'Online Shop Manager/Customer Service Management'
},
'190040' : {
'cn' :u'楼面管理',
'en' :'Floor Management'
},
'180040' : {
'cn' :u'德语翻译',
'en' :'German Translator'
},
'070100' : {
'cn' :u'薪资福利经理/主管',
'en' :'Compensation and Benefits Manager/Director'
},
'070101' : {
'cn' :u'薪资福利专员/助理',
'en' :'Compensation & Benefits Specialist/Assistant'
},
'060202' : {
'cn' :u'市场拓展专员/助理',
'en' :'BD Specialist/Assistant'
},
'060203' : {
'cn' :u'品牌经理/主管',
'en' :'Brand Manager/Supervisor'
},
'060200' : {
'cn' :u'市场专员/助理',
'en' :'Marketing Specialist/Assistant'
},
'060201' : {
'cn' :u'市场拓展经理/主管',
'en' :'BD manager/Supervisor'
},
'060206' : {
'cn' :u'市场通路专员/助理',
'en' :'Trade Marketing Specialist/Assistant'
},
'060207' : {
'cn' :u'市场企划专员/助理',
'en' :'Marketing Planning Specialist/Assistant'
},
'060204' : {
'cn' :u'产品专员/助理',
'en' :'Product Specialist/Assistant'
},
'370010' : {
'cn' :u'团购业务员',
'en' :'Team Buying Sales'
},
'170195' : {
'cn' :u'建筑机电工程师',
'en' :'Building Electrical Engineer'
},
'060208' : {
'cn' :u'选址拓展/新店开发',
'en' :'Site Development'
},
'060209' : {
'cn' :u'品牌专员/助理',
'en' :'Brand Specialist/Assistant'
},
'170040' : {
'cn' :u'建筑工程管理/项目经理',
'en' :'Construction Management'
},
'420008' : {
'cn' :u'汽车底盘/总装工程师',
'en' :'Automobile Chassis/Assembly Engineer'
},
'420001' : {
'cn' :u'汽车项目管理',
'en' :'Automotive Project Management'
},
'420002' : {
'cn' :u'汽车机构工程师',
'en' :'Automotive Structural Engineer'
},
'420003' : {
'cn' :u'汽车设计工程师',
'en' :'Automotive Design Engineer'
},
'420004' : {
'cn' :u'汽车电子工程师',
'en' :'Automotive Electronic Engineer'
},
'420005' : {
'cn' :u'汽车质量工程师',
'en' :'Automotive Quality Engineer'
},
'420006' : {
'cn' :u'汽车安全性能工程师',
'en' :'Safety Performance Engineer'
},
'420007' : {
'cn' :u'汽车装配工艺工程师',
'en' :'Assembly Engineer'
},
'430011' : {
'cn' :u'汽车定损/车险理赔',
'en' :'Automobile Insurance'
},
'190210' : {
'cn' :u'机场代表',
'en' :'Hotel Airport Representatives'
},
'670' : {
'cn' :u'采购',
'en' :'Purchasing'
},
'010080' : {
'cn' :u'总裁助理/总经理助理',
'en' :'Executive Assistant/General Manager Assistant'
},
'260059' : {
'cn' :u'外语老师',
'en' :'Foreign language teacher'
},
'100260' : {
'cn' :u'文档工程师',
'en' :'Documentation Engineer'
},
'220140' : {
'cn' :u'列车设计与制造',
'en' :'Train Design & Manufacture'
},
'260051' : {
'cn' :u'高中教师',
'en' :'High School Teacher'
},
'260053' : {
'cn' :u'初中教师',
'en' :'Junior high school teacher'
},
'260052' : {
'cn' :u'职业中专/技校教师',
'en' :'Vocational Technical Secondary School/Technical School Teacher'
},
'220020' : {
'cn' :u'模具工程师',
'en' :'Mold Engineer'
},
'260054' : {
'cn' :u'小学教师',
'en' :'Grade School Teacher'
},
'260057' : {
'cn' :u'理科老师',
'en' :'Science teacher'
},
'260056' : {
'cn' :u'美术教师',
'en' :'Art Teacher'
},
'210270' : {
'cn' :u'维修经理/主管',
'en' :'Maintenance Manager/Supervisor'
},
'100140' : {
'cn' :u'系统集成工程师',
'en' :'Systems Integration Engineer'
},
'250020' : {
'cn' :u'总编/副总编',
'en' :'General Editor/Deputy Editor'
},
'110090' : {
'cn' :u'半导体技术',
'en' :'Semiconductor Technology'
},
'360220' : {
'cn' :u'视觉设计师',
'en' :'Visual Effects Designer'
},
'110270' : {
'cn' :u'通信电源工程师',
'en' :'Communication Power Supply Engineer'
},
'150110' : {
'cn' :u'保险内勤',
'en' :'Staff'
},
'280030' : {
'cn' :u'理疗师',
'en' :'Physical Therapist'
},
'110070' : {
'cn' :u'嵌入式软件开发',
'en' :'Embedded Software Engineer'
},
'340040' : {
'cn' :u'测绘/测量',
'en' :'Mapping/Surveyor'
},
'110370' : {
'cn' :u'FAE现场应用工程师',
'en' :'Field Application Engineer(FAE)'
},
'290060' : {
'cn' :u'化工技术应用/化工工程师',
'en' :'Chemical Technical Application/Chemical Engineer'
},
'040010' : {
'cn' :u'项目总监',
'en' :'Project Director'
},
'220280' : {
'cn' :u'材料工程师',
'en' :'Material Engineer'
},
'220281' : {
'cn' :u'气动工程师',
'en' :'Pneumatic Engineer'
},
'220282' : {
'cn' :u'工艺/制程工程师(PE)',
'en' :'PE Engineer'
},
'220283' : {
'cn' :u'船舶维修/保养',
'en' :'Watercraft Repair/Maintenance'
},
'220284' : {
'cn' :u'列车维修/保养',
'en' :'Train Repair/Maintenance'
},
'220285' : {
'cn' :u'机械设备工程师',
'en' :'Mechanical Equipment Engineer'
},
'280100' : {
'cn' :u'兽医/宠物医生',
'en' :'Veterinarian/Pet Doctor'
},
'110380' : {
'cn' :u'IC验证工程师',
'en' :'IC Verification Engineer'
},
'100310' : {
'cn' :u'网站编辑',
'en' :'Website Editor'
},
'579' : {
'cn' :u'汽车销售与服务',
'en' :'Automotive Sales and Service'
},
'240010' : {
'cn' :u'平面设计经理/主管',
'en' :'Graphic Design Manager/Supervisor'
},
'150020' : {
'cn' :u'核保/理赔',
'en' :'Underwriting/Claim Management'
},
'571' : {
'cn' :u'服装/纺织/皮革',
'en' :'Apparels/Textiles/Leather Goods'
},
'280080' : {
'cn' :u'疾病控制/公共卫生',
'en' :'Disease Control/Public Health'
},
'360290' : {
'cn' :u'视觉设计总监/经理',
'en' :'Visual Design Director/Manager'
},
'210015' : {
'cn' :u'运营经理/主管',
'en' :'Operations Manager/Supervisor'
},
'210150' : {
'cn' :u'组长/拉长',
'en' :'Group Leader'
},
'658' : {
'cn' :u'BI',
'en' :'BI'
},
'090050' : {
'cn' :u'会计经理/主管',
'en' :'Accounting Manager/Supervisor'
},
'120060' : {
'cn' :u'电力工程师/技术员',
'en' :'Electric Power Engineer'
},
'100030' : {
'cn' :u'信息技术经理/主管',
'en' :'IT Manager/Supervisor'
},
'240125' : {
'cn' :u'包装设计',
'en' :'Packaging Design'
},
'150050' : {
'cn' :u'客户服务/续期管理',
'en' :'Customer Service/Account Renewals Management'
},
'240120' : {
'cn' :u'3D设计/制作',
'en' :'3D Design/Production'
},
'360110' : {
'cn' :u'电子商务经理/主管',
'en' :'E-Commerce Manager/Supervisor'
},
'240080' : {
'cn' :u'平面设计师',
'en' :'Graphic Designer'
},
'220210' : {
'cn' :u'设备修理',
'en' :'Equipment Repair'
},
'020005' : {
'cn' :u'区域销售总监',
'en' :'Regional Sales Director'
},
'170090' : {
'cn' :u'建筑设备工程师',
'en' :'Construction Equipment Engineer'
},
'360070' : {
'cn' :u'网络推广总监',
'en' :'Online Marketing Director'
},
'310040' : {
'cn' :u'培训生',
'en' :'Trainee'
},
'240060' : {
'cn' :u'工艺品/珠宝设计',
'en' :'Crafts/Jewelry Design'
},
'070140' : {
'cn' :u'员工关系/企业文化/工会',
'en' :'Employee Relations/Corporate Culture/Unions'
},
'070141' : {
'cn' :u'人力资源信息系统',
'en' :'HRIS'
},
'070142' : {
'cn' :u'人力资源伙伴(HRBP)',
'en' :'HR Business Partner'
},
'070143' : {
'cn' :u'组织发展(OD)',
'en' :'Organization Development'
},
'120080' : {
'cn' :u'水利/水电工程师',
'en' :'Water Resources/Water and Electric Engineer'
},
'250030' : {
'cn' :u'文字编辑/组稿',
'en' :'Copy Editor'
},
'320020' : {
'cn' :u'畜牧师',
'en' :'Animal Husbandry Technician'
},
'250090' : {
'cn' :u'艺术/设计总监',
'en' :'Artistic/Design Director'
},
'130030' : {
'cn' :u'咨询经理/主管',
'en' :'Consulting Manager/Supervisor'
},
'090120' : {
'cn' :u'审计经理/主管',
'en' :'Audit Manager/Supervisor'
},
'160021' : {
'cn' :u'商务经理/主管',
'en' :'Business Manager/Supervisor'
},
'160020' : {
'cn' :u'国内贸易经理/主管',
'en' :'Domestic Trade manager/Supervisor'
},
'270043' : {
'cn' :u'合规经理',
'en' :'Compliance Manager'
},
'270042' : {
'cn' :u'知识产权/专利/商标代理人',
'en' :' Intellectual Property/Patent Advisor'
},
'270041' : {
'cn' :u'法务专员/助理',
'en' :'Lega Specialist/Assistant'
},
'270040' : {
'cn' :u'法务经理/主管',
'en' :'Legal manager/Supervisor'
},
'170160' : {
'cn' :u'房地产交易/中介',
'en' :'Real Estate Agent/Broker'
},
'280020' : {
'cn' :u'医疗技术人员',
'en' :'Medical Technicians'
},
'270044' : {
'cn' :u'合规主管/专员',
'en' :'Compliance Supervisor/Specialist'
},
'060170' : {
'cn' :u'广告客户经理/主管',
'en' :'Advertising Account Manager/Supervisor'
},
'460009' : {
'cn' :u'媒介策划',
'en' :'Media Planning'
},
'460004' : {
'cn' :u'媒介专员/助理',
'en' :'Media Specialist/Assistant'
},
'460005' : {
'cn' :u'活动策划',
'en' :'Event Planner'
},
'460006' : {
'cn' :u'活动执行',
'en' :'Event Excution'
},
'460007' : {
'cn' :u'媒介销售',
'en' :'Media Sales'
},
'110100' : {
'cn' :u'电子元器件工程师',
'en' :'Electronic Component Engineer'
},
'460002' : {
'cn' :u'公关专员/助理',
'en' :'Public Relations Specialist/Assistant'
},
'460003' : {
'cn' :u'媒介经理/主管',
'en' :'Media Manager/Supervisor'
},
'160270' : {
'cn' :u'物流/仓储项目管理',
'en' :'Logistics/Warehousing Project Management'
},
'480002' : {
'cn' :u'放映管理',
'en' :'Projection Management'
},
'030060' : {
'cn' :u'售后支持经理/主管',
'en' :'After-Sales Support Manager/Supervisor'
},
'220100' : {
'cn' :u'冲压工程师/技师',
'en' :'Punch Engineer'
},
'100100' : {
'cn' :u'高级软件工程师',
'en' :'Senior Software Engineer'
},
'050040' : {
'cn' :u'供应商/采购质量管理',
'en' :'Supplier/Purchasing Quality Management'
},
'180030' : {
'cn' :u'法语翻译',
'en' :'French Translator'
},
'210230' : {
'cn' :u'生产项目经理/主管',
'en' :'Production Project Manager/Supervisor'
},
'010050' : {
'cn' :u'副总裁/副总经理',
'en' :'Vice President/Deputy General Manager'
},
'110400' : {
'cn' :u'电气线路设计',
'en' :'Electrical Circuit Design'
},
'110401' : {
'cn' :u'线路结构设计',
'en' :'Route structure design'
},
'110402' : {
'cn' :u'机电工程师',
'en' :'Electrical & Mechanical Engineer'
},
'110403' : {
'cn' :u'自动化工程师',
'en' :'Automation Engineer'
},
'110404' : {
'cn' :u'模拟电路设计/应用工程师',
'en' :'Analogical Electronic Design / Application Engineer'
},
'110405' : {
'cn' :u'空调工程/设计',
'en' :'Air Conditioning Engineering/Design'
},
'110406' : {
'cn' :u'仪器/仪表/计量',
'en' :'Instrument/Measurement Analyst'
},
'110407' : {
'cn' :u'安防系统工程师',
'en' :'Security Systems Engineer'
},
'280171' : {
'cn' :u'护理主任/护士长',
'en' :'Nursing Officer'
},
'220080' : {
'cn' :u'注塑工程师/技师',
'en' :'Injection Engineer'
},
'280173' : {
'cn' :u'验光师',
'en' :'Optometrist'
},
'280174' : {
'cn' :u'放射科医师',
'en' :'Radiologist'
},
'280175' : {
'cn' :u'综合门诊/全科医生',
'en' :'General Practitioner (GP)'
},
'100280' : {
'cn' :u'语音/视频/图形开发工程师',
'en' :'Audio/Video/Graphics Development Engineer'
},
'110030' : {
'cn' :u'电信/通讯工程师',
'en' :'Telecommunications/Communications Engineer'
},
'260020' : {
'cn' :u'幼教',
'en' :'Preschool Education'
},
'100350' : {
'cn' :u'计算机辅助设计工程师',
'en' :'Computer Aided Design Engineer'
},
'220070' : {
'cn' :u'铸造/锻造工程师/技师',
'en' :'Casting/Forging Engineer'
},
'140070' : {
'cn' :u'资产评估',
'en' :'Asset Evaluation'
},
'130071' : {
'cn' :u'情报信息分析师',
'en' :'Intelligence Analyst'
},
'100190' : {
'cn' :u'数据库管理员(DBA)',
'en' :'Database Administrator'
},
'030010' : {
'cn' :u'客户服务总监',
'en' :'Director of Customer Service'
},
'220170' : {
'cn' :u'维修工程师',
'en' :'Maintenance Engineer'
},
'360310' : {
'cn' :u'移动开发工程师',
'en' :'Mobile Development Engineer'
},
'292020' : {
'cn' :u'地勤人员',
'en' :'Ground Attendant'
},
'120110' : {
'cn' :u'石油/天然气技术人员',
'en' :'Oil/Gas Technician'
},
'240110' : {
'cn' :u'多媒体/动画设计',
'en' :'Multimedia/Animation Design'
},
'250080' : {
'cn' :u'排版设计',
'en' :'Layout Design'
},
'290055' : {
'cn' :u'医疗器械市场推广',
'en' :'Medical Equipment Marketing'
},
'110320' : {
'cn' :u'变压器与磁电工程师',
'en' :'Transformer and Magnetoelectricity'
},
'110240' : {
'cn' :u'数据通信工程师',
'en' :'Data Communication Engineer'
},
'291050' : {
'cn' :u'安防主管',
'en' :'Security Technical Service Executive'
},
'160050' : {
'cn' :u'水运/空运/陆运操作',
'en' :'Transport Operation'
},
'250130' : {
'cn' :u'录音/音效师',
'en' :'Recording/Audio Engineer'
},
'220250' : {
'cn' :u'飞机维修/保养',
'en' :'Aircraft Repair/Maintenance'
},
'100070' : {
'cn' :u'项目执行/协调人员',
'en' :'Project Specialist/Coordinator'
},
'100071' : {
'cn' :u'产品经理/主管',
'en' :'Product Manager/Supervisor'
},
'150090' : {
'cn' :u'产品开发/项目策划',
'en' :'Product Development/Planner'
},
'090090' : {
'cn' :u'财务分析员',
'en' :'Financial Analyst'
},
'160110' : {
'cn' :u'物料经理/主管',
'en' :'Materials Manager/Supervisor'
},
'340070' : {
'cn' :u'园艺师',
'en' :'Gardener/Horticulturist'
},
'280070' : {
'cn' :u'医药技术研发管理人员',
'en' :'Pharmaceutical Technology R&D Management'
},
'190200' : {
'cn' :u'宴会管理',
'en' :'Banquet Management'
},
'470009' : {
'cn' :u'会务/会展专员/助理',
'en' :' Exhibition Specialist/Assistant'
},
'360030' : {
'cn' :u'运营专员',
'en' :'Operations Specialist'
},
'090030' : {
'cn' :u'财务经理',
'en' :'Financial Manager'
},
'160090' : {
'cn' :u'物流经理/主管',
'en' :'Logistics manager/Supervisor'
},
'050090' : {
'cn' :u'可靠度工程师',
'en' :'Reliability Engineer'
},
'130073' : {
'cn' :u'咨询项目管理',
'en' :'Consulting Project Management'
},
'470005' : {
'cn' :u'文案/策划',
'en' :'Copywriter/Planner'
},
'210160' : {
'cn' :u'生产计划/调度',
'en' :'Production Planning/Scheduling'
},
'130070' : {
'cn' :u'涉外咨询师',
'en' :'Foreign Consultants'
},
'410019' : {
'cn' :u'基金托管',
'en' :'Trust Fund'
},
'410014' : {
'cn' :u'资金管理',
'en' :'Fund Management'
},
'410015' : {
'cn' :u'行业研究',
'en' :'Industry Research'
},
'410016' : {
'cn' :u'资产管理',
'en' :'Asset Management'
},
'070020' : {
'cn' :u'人力资源经理/主管',
'en' :'Human Resources Manager/Supervisor'
},
'410010' : {
'cn' :u'进出口/信用证结算',
'en' :'Trading / LC Officer'
},
'410011' : {
'cn' :u'风险控制',
'en' :'Risk Management'
},
'410012' : {
'cn' :u'信审核查',
'en' :'Credit Review'
},
'170120' : {
'cn' :u'园艺/园林/景观设计',
'en' :'Gardenning Designer'
},
'090160' : {
'cn' :u'税务经理/主管',
'en' :'Tax Manager/Supervisor'
},
'260072' : {
'cn' :u'培训督导',
'en' :'Supervision Training'
},
'080010' : {
'cn' :u'行政总监',
'en' :'Executive Director'
},
'537' : {
'cn' :u'销售行政/商务',
'en' :'Sales Administration'
},
'536' : {
'cn' :u'销售人员',
'en' :'Salespersons'
},
'535' : {
'cn' :u'销售管理',
'en' :'Sales Management'
},
'534' : {
'cn' :u'公关/媒介',
'en' :'Public Relations/Media'
},
'533' : {
'cn' :u'市场',
'en' :'Marketing Management'
},
'532' : {
'cn' :u'财务/审计/税务',
'en' :'Financial Affairs'
},
'531' : {
'cn' :u'人力资源',
'en' :'Human Resource'
},
'530' : {
'cn' :u'高级管理',
'en' :'Senior Management'
},
'250220' : {
'cn' :u'数码直印/菲林输出',
'en' :'Digital/Film Printing'
},
'539' : {
'cn' :u'法务',
'en' :'Legal'
},
'538' : {
'cn' :u'客户服务/技术支持',
'en' :'Customer Service and Technical Support'
},
'160230' : {
'cn' :u'物料专员/助理',
'en' :'Materials Specialist/Assistant'
},
'210040' : {
'cn' :u'采购管理',
'en' :'Purchasing Management'
},
'360100' : {
'cn' :u'电子商务总监',
'en' :'E-Commerce Director'
},
'020050' : {
'cn' :u'渠道/分销经理/主管',
'en' :'Channel/Distribution Manager/Supervisor'
},
'360080' : {
'cn' :u'网络推广经理/主管',
'en' :'Online Marketing Manager/Supervisor'
},
'180074' : {
'cn' :u'泰语翻译',
'en' :'Thai Translator'
},
'180075' : {
'cn' :u'中国方言翻译',
'en' :'Chinese Dialect Translator'
},
'180070' : {
'cn' :u'韩语/朝鲜语翻译',
'en' :'Korean Translator'
},
'180071' : {
'cn' :u'阿拉伯语翻译',
'en' :'Arabic Translator'
},
'070050' : {
'cn' :u'招聘经理/主管',
'en' :'Recruiting Manager/Supervisor'
},
'070051' : {
'cn' :u'招聘专员/助理',
'en' :'Recruiting Specialist/Assistant'
},
'170194' : {
'cn' :u'楼宇自动化',
'en' :'Building Automation'
},
'060210' : {
'cn' :u'SEM搜索引擎营销',
'en' :'SEM'
},
'170196' : {
'cn' :u'幕墙工程师',
'en' :'Curtain Wall Engineer'
},
'170197' : {
'cn' :u'建筑制图/模型/渲染',
'en' :'CAD Drafter/Building Model/Rendering'
},
'170191' : {
'cn' :u'高级建筑工程师/总工',
'en' :'Senior Architect'
},
'170192' : {
'cn' :u'建筑工程验收',
'en' :'Construction Project Inspector'
},
'170193' : {
'cn' :u'岩土工程',
'en' :'Geotechnical Engineer'
},
'250040' : {
'cn' :u'美术编辑',
'en' :'Art Editor'
},
'170198' : {
'cn' :u'开发报建经理/主管',
'en' :'Applying for Construction Manager/Supervisor'
},
'170199' : {
'cn' :u'市政工程师',
'en' :'Municipal Project Engineer'
},
'430008' : {
'cn' :u'二手车评估师',
'en' :'Second-Hand Car Appraisers'
},
'430001' : {
'cn' :u'汽车销售',
'en' :'Automobile Sales'
},
'430003' : {
'cn' :u'4S店管理',
'en' :'4S Shop Management'
},
'430002' : {
'cn' :u'售后服务客户服务',
'en' :'After-Sales Service/Customer Service'
},
'140030' : {
'cn' :u'证券分析/金融研究',
'en' :'Security Analysis/Financial Research'
},
'430004' : {
'cn' :u'零配件销售',
'en' :'Parts Sales'
},
'430007' : {
'cn' :u'检验检测',
'en' :'Check/Test'
},
'430006' : {
'cn' :u'汽车质量管理',
'en' :'Automotive Quality Management'
},
'220130' : {
'cn' :u'焊接工程师/技师',
'en' :'Welding Engineer'
},
'050070' : {
'cn' :u'认证工程师/审核员',
'en' :'Certification Engineer/Auditor'
},
'100150' : {
'cn' :u'硬件工程师',
'en' :'Hardware Engineer'
},
'110080' : {
'cn' :u'无线/射频通信工程师',
'en' :'RF/ Communication Engineer'
},
'220030' : {
'cn' :u'机械设计师',
'en' :'Mechanical Designer'
},
'100250' : {
'cn' :u'硬件测试',
'en' :'Hardware Testing'
},
'100170' : {
'cn' :u'工程与项目实施',
'en' :'Engineering and Project Implementation'
},
'110140' : {
'cn' :u'技术文档工程师',
'en' :'Technical Documentation Engineer'
},
'060130' : {
'cn' :u'市场企划经理/主管',
'en' :'Marketing Planning Manager/Supervisor'
},
'210260' : {
'cn' :u'生产文员',
'en' :'Production Clerk'
},
'350020' : {
'cn' :u'仿真应用工程师',
'en' :'Simulation Application Engineer'
},
'291010' : {
'cn' :u'店长/卖场管理',
'en' :'Store Manager/Floor Manager'
},
'450008' : {
'cn' :u'业务跟单经理/主管',
'en' :'Merchandising Manager/Supervisor'
},
'150100' : {
'cn' :u'保险顾问/财务规划师',
'en' :' Insurance Consultant'
},
'360210' : {
'cn' :u'特效设计师',
'en' :'Special Effects Designer'
},
'360339' : {
'cn' :u'技术/研发经理',
'en' :'Technology Manager'
},
'110280' : {
'cn' :u'增值产品开发工程师',
'en' :'Value-Added Product Development Engineer'
},
'110040' : {
'cn' :u'工艺/制程工程师(PE)',
'en' :'PE Engineer'
},
'160130' : {
'cn' :u'运输经理/主管',
'en' :'Transport Management/Executive'
},
'110360' : {
'cn' :u'电池/电源开发',
'en' :'Battery/Power Engineer'
},
'290010' : {
'cn' :u'生物工程/生物制药',
'en' :'Biopharmaceutical/Biotechnology'
},
'440009' : {
'cn' :u'质量管理/验货员(QA/QC)',
'en' :'Quality Management QA/QC'
},
'140100' : {
'cn' :u'清算人员',
'en' :'Settlement Officer'
},
'350010' : {
'cn' :u'UI设计师',
'en' :'UI Designer'
},
'440004' : {
'cn' :u'面料辅料采购',
'en' :'Fabric/Accessories Purchasing'
},
'440005' : {
'cn' :u'服装/纺织/皮革跟单',
'en' :'Apparels/Textiles/Leather Goods Merchandiser'
},
'040020' : {
'cn' :u'项目经理/主管',
'en' :'Project Manager/Supervisor'
},
'440003' : {
'cn' :u'面料辅料开发',
'en' :'Fabric/Accessories Development'
},
'440001' : {
'cn' :u'服装/纺织设计总监',
'en' :'Fashion/Textiles Design Director'
},
'380007' : {
'cn' :u'销售培训讲师',
'en' :'Sales trainer'
},
'240130' : {
'cn' :u'展示/陈列设计',
'en' :'Exhibition/Display Design'
},
'380005' : {
'cn' :u'销售运营专员/助理',
'en' :'Sales Operations Executive/Assistant'
},
'380004' : {
'cn' :u'销售运营经理/主管',
'en' :'Sales Operations Manager/Supervisor'
},
'380003' : {
'cn' :u'商务专员/助理',
'en' :'Business Executive/Assistant'
},
'380002' : {
'cn' :u'销售行政专员/助理',
'en' :'Sales Admin. Executive/Assistant'
},
'380001' : {
'cn' :u'销售行政经理/主管',
'en' :'Sales Admin. Manager/Supervisor'
},
'420010' : {
'cn' :u'汽车动力系统工程师',
'en' :'Automobile Power System Engineers'
},
'100320' : {
'cn' :u'网站策划',
'en' :'Site Planning'
},
'280130' : {
'cn' :u'药品注册',
'en' :'Drug Registration'
},
'220040' : {
'cn' :u'机械制图员',
'en' :'Mechanical Draftsman'
},
'380008' : {
'cn' :u'业务分析经理/主管',
'en' :'Business Analysis Manager/Supervisor'
},
'510008' : {
'cn' :u'房地产招商',
'en' :'Real Estate Investment'
},
'569' : {
'cn' :u'百货/连锁/零售服务',
'en' :'Department Store/Chain Shops/Retail'
},
'150030' : {
'cn' :u'保险培训师',
'en' :'Insurance Trainer'
},
'510001' : {
'cn' :u'房地产项目策划专员/助理',
'en' :'Real Estate Planning Specialist/Assistant'
},
'510002' : {
'cn' :u'配套经理/主管',
'en' :'Real Estate Supporting Manager/Supervisor'
},
'563' : {
'cn' :u'信托/担保/拍卖/典当',
'en' :'Other'
},
'564' : {
'cn' :u'生产工艺',
'en' :'Production Technology'
},
'565' : {
'cn' :u'采购/物料/设备管理',
'en' :'Purchasing/Material/Equipment Management'
},
'510006' : {
'cn' :u'房地产销售经理/主管',
'en' :'Real Estate Sales Manager/Supervisor'
},
'510007' : {
'cn' :u'房地产销售人员',
'en' :'Real Estate Sales'
},
'360280' : {
'cn' :u'用户研究员',
'en' :'User Researcher'
},
'110210' : {
'cn' :u'通信技术工程师',
'en' :'Communication Engineer'
},
'450005' : {
'cn' :u'外贸专员/助理',
'en' :'Trading Specialist/Assistant'
},
'280090' : {
'cn' :u'美容/整形师',
'en' :'Beautician/Plastic Surgeon'
},
'160160' : {
'cn' :u'物流/仓储调度',
'en' :'Logistics/Warehousing Dispatcher'
},
'090040' : {
'cn' :u'财务主管/总帐主管',
'en' :'Financial Director/General Accounts Director'
},
'240152' : {
'cn' :u'绘画',
'en' :'Graphic Illustrator'
},
'240153' : {
'cn' :u'平面设计总监/经理',
'en' :'Graphic Design Director/Manager'
},
'240151' : {
'cn' :u'创意指导/总监',
'en' :'Creative Director/Director'
},
'240156' : {
'cn' :u'CAD设计/制图',
'en' :'CAD design/drafting'
},
'240157' : {
'cn' :u'原画师',
'en' :'Original Artist'
},
'240155' : {
'cn' :u'玩具设计',
'en' :'Toy Design'
},
'220260' : {
'cn' :u'维修经理/主管',
'en' :'Maintenance Manager/Supervisor'
},
'240090' : {
'cn' :u'媒体广告设计',
'en' :'Media Advertising'
},
'100020' : {
'cn' :u'技术/研发总监',
'en' :'Technology Director'
},
'020010' : {
'cn' :u'销售总监',
'en' :'Sales Director'
},
'600' : {
'cn' :u'物流/仓储',
'en' :'Logistics/Warehouse'
},
'603' : {
'cn' :u'医学研发/临床试验',
'en' :'Medical Research /Clinical Trials'
},
'602' : {
'cn' :u'贸易',
'en' :'Trade'
},
'190010' : {
'cn' :u'酒店/宾馆管理',
'en' :'Hotel Management'
},
'270020' : {
'cn' :u'法律顾问',
'en' :'Legal Adviser'
},
'310050' : {
'cn' :u'储备干部',
'en' :'Associate Trainee'
},
'070010' : {
'cn' :u'人力资源总监',
'en' :'Director of Human Resources'
},
'240070' : {
'cn' :u'家具/家居设计',
'en' :'Furniture/Household Product Design'
},
'360060' : {
'cn' :u'SEO搜索引擎优化',
'en' :'SEO'
},
'170010' : {
'cn' :u'建筑工程师',
'en' :'Architect'
},
'130040' : {
'cn' :u'咨询顾问/咨询员',
'en' :'Consultant'
},
'210130' : {
'cn' :u'维修工程师',
'en' :'Maintenance Engineer'
},
'020127' : {
'cn' :u'客户总监',
'en' :'Account Director'
},
'020126' : {
'cn' :u'团购经理/主管',
'en' :'Team Buying Manager/Supervisor'
},
'020125' : {
'cn' :u'销售总经理/销售副总裁',
'en' :'Sales General Manager/Vice President'
},
'020122' : {
'cn' :u'客户经理/主管',
'en' :'Sales Account Manager/Supervisor'
},
'020121' : {
'cn' :u'大客户销售管理',
'en' :'Key Account Sales Management'
},
'020120' : {
'cn' :u'业务拓展经理/主管',
'en' :'Business Development Supervisor/Manager'
},
'170150' : {
'cn' :u'城市规划与设计',
'en' :'Urban Planning and Design'
},
'020080' : {
'cn' :u'售前支持工程师',
'en' :'Pre-Sales Support Engineer'
},
'090130' : {
'cn' :u'审计专员/助理',
'en' :'Audit Executive/Assistant'
},
'160260' : {
'cn' :u'单证员',
'en' :'Documentation Specialist'
},
'110130' : {
'cn' :u'集成电路IC设计/应用工程师',
'en' :'IC Design/Application Engineer'
},
'190260' : {
'cn' :u'旅游产品销售',
'en' :'Tourism Product Sales'
},
'010030' : {
'cn' :u'首席技术官CTO/首席信息官CIO',
'en' :'Chief Technology Officer/Chief Information Officer'
},
'450001' : {
'cn' :u'采购总监',
'en' :'Purchasing Director'
},
'210220' : {
'cn' :u'生产项目总监',
'en' :'Production Project Director'
},
'280120' : {
'cn' :u'针灸推拿',
'en' :'Acupuncture and Massage'
},
'190060' : {
'cn' :u'营养师',
'en' :'Dietitian'
},
'180020' : {
'cn' :u'日语翻译',
'en' :'Japanese Translator'
},
'050030' : {
'cn' :u'质量检测员/测试员',
'en' :'Quality Inspector/Tester'
},
'120150' : {
'cn' :u'能源/矿产项目管理',
'en' :'Energy/Mining Project Management'
},
'070120' : {
'cn' :u'绩效经理/主管',
'en' :'Performance Assessment Manager/Supervisor'
},
'070121' : {
'cn' :u'绩效专员/助理',
'en' :'Performance Assessment Specialist/Assistant'
},
'210180' : {
'cn' :u'工业工程师(IE)',
'en' :'Industrial Engineer'
},
'070080' : {
'cn' :u'培训专员/助理',
'en' :'Training Specialist/Assistant'
},
'010040' : {
'cn' :u'首席财务官CFO',
'en' :'Chief Financial Officer/CFO'
}
}
| followcat/predator | sources/liepin_industry.py | Python | lgpl-3.0 | 97,374 | [
"BLAST"
] | 0fec7f1aa5d7bc1b743e613cf2fcbda944cccdc84f4160ffaabbcd815bbe4a15 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.interactions
import numpy as np
class TabulatedTest(ut.TestCase):
system = espressomd.System(box_l=3 * [10.])
system.time_step = 0.01
system.cell_system.skin = 0.4
def setUp(self):
self.force = np.zeros((100,))
self.energy = np.zeros((100,))
self.min_ = 1.
self.max_ = 2.
self.dx = (self.max_ - self.min_) / 99.
for i in range(0, 100):
self.force[i] = 5 + i * 2.3 * self.dx
self.energy[i] = 5 - i * 2.3 * self.dx
self.system.part.add(type=0, pos=[5., 5., 5.0])
self.system.part.add(type=0, pos=[5., 5., 5.5])
def tearDown(self):
self.system.part.clear()
def check(self):
p0, p1 = self.system.part.all()
# Below cutoff
np.testing.assert_allclose(np.copy(self.system.part.all().f), 0.0)
for z in np.linspace(0, self.max_ - self.min_, 200, endpoint=False):
p1.pos = [5., 5., 6. + z]
self.system.integrator.run(0)
np.testing.assert_allclose(
np.copy(p0.f), [0., 0., -(5. + z * 2.3)])
np.testing.assert_allclose(np.copy(p0.f), -np.copy(p1.f))
self.assertAlmostEqual(
self.system.analysis.energy()['total'], 5. - z * 2.3)
@utx.skipIfMissingFeatures("TABULATED")
def test_non_bonded(self):
self.system.non_bonded_inter[0, 0].tabulated.set_params(
min=self.min_, max=self.max_, energy=self.energy, force=self.force)
params = self.system.non_bonded_inter[0, 0].tabulated.get_params()
np.testing.assert_allclose(params['force'], self.force)
np.testing.assert_allclose(params['energy'], self.energy)
self.assertAlmostEqual(params['min'], self.min_)
self.assertAlmostEqual(params['max'], self.max_)
self.check()
self.system.non_bonded_inter[0, 0].tabulated.set_params(
min=-1, max=-1, energy=[], force=[])
@utx.skipIfMissingFeatures("TABULATED")
def test_bonded(self):
tb = espressomd.interactions.TabulatedDistance(
min=self.min_, max=self.max_, energy=self.energy, force=self.force)
self.system.bonded_inter.add(tb)
np.testing.assert_allclose(tb.params['force'], self.force)
np.testing.assert_allclose(tb.params['energy'], self.energy)
self.assertAlmostEqual(tb.params['min'], self.min_)
self.assertAlmostEqual(tb.params['max'], self.max_)
p0, p1 = self.system.part.all()
p0.add_bond((tb, p1))
self.check()
if __name__ == "__main__":
ut.main()
| pkreissl/espresso | testsuite/python/tabulated.py | Python | gpl-3.0 | 3,393 | [
"ESPResSo"
] | a1ebe7a22379229e2543ee1ef31ede34f6bfdbc4ba6868834e49df2a546744b5 |
#!/usr/bin/env ipython -wthread
from enthought.mayavi.mlab import *
from neuron import h as nrn
from neuron import gui
from neuron3d import *
##==============================================================================
# Cell
##==============================================================================
nrn.load_file("stn.hoc")
cell = Cell(name="Subthalamic Neuron")
cell.edit_traits()
| tfoutz99/Neuron3D | Examples/6_Subthalamic_Neuron.py | Python | gpl-3.0 | 395 | [
"Mayavi",
"NEURON"
] | 5b822ef280d79eb3cff75e59d8448d1c4fc23ff825ab8797a2e122154f91a5b6 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import time
from flask import Flask, render_template, request
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
app = Flask(__name__)
class Visit(ndb.Model):
'Visit entity registers visitor IP address & timestamp'
visitor = ndb.StringProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
def store_visit(remote_addr, user_agent):
'create new Visit entity in Datastore'
Visit(visitor='{}: {}'.format(remote_addr, user_agent)).put()
def fetch_visits(limit):
'get most recent visits & add task to delete older visits'
data = Visit.query().order(-Visit.timestamp).fetch(limit)
oldest = time.mktime(data[-1].timestamp.timetuple())
oldest_str = time.ctime(oldest)
logging.info('Delete entities older than %s' % oldest_str)
taskqueue.add(url='/trim', params={'oldest': oldest})
return data, oldest_str
@app.route('/trim', methods=['POST'])
def trim():
'(push) task queue handler to delete oldest visits'
oldest = request.form.get('oldest', type=float)
keys = Visit.query(
Visit.timestamp < datetime.fromtimestamp(oldest)
).fetch(keys_only=True)
nkeys = len(keys)
if nkeys:
logging.info('Deleting %d entities: %s' % (
nkeys, ', '.join(str(k.id()) for k in keys)))
ndb.delete_multi(keys)
else:
logging.info(
'No entities older than: %s' % time.ctime(oldest))
return '' # need to return SOME string w/200
@app.route('/')
def root():
'main application (GET) handler'
store_visit(request.remote_addr, request.user_agent)
visits, oldest = fetch_visits(10)
context = {'visits': visits, 'oldest': oldest}
return render_template('index.html', **context)
| googlecodelabs/migrate-python2-appengine | mod7-gaetasks/main.py | Python | apache-2.0 | 2,374 | [
"VisIt"
] | dc270f807c54e12107d584d86c937e78b9f2e28f23c1a213bd77a3f22c80f104 |
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-CAs
# Author : Ricardo Graciani
########################################################################
"""
Refresh the local copy of the CA certificates and revocation lists.
Connects to the BundleDelivery service to obtain the tar balls. Needed when proxies appear to be
invalid.
Example:
$ dirac-admin-get-CAs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC.FrameworkSystem.Client.BundleDeliveryClient import BundleDeliveryClient
__RCSID__ = "$Id$"
@Script()
def main():
Script.addDefaultOptionValue("/DIRAC/Security/SkipCAChecks", "yes")
Script.parseCommandLine(ignoreErrors=True)
bdc = BundleDeliveryClient()
result = bdc.syncCAs()
if not result["OK"]:
DIRAC.gLogger.error("Error while updating CAs", result["Message"])
DIRAC.exit(1)
elif result["Value"]:
DIRAC.gLogger.notice("CAs got updated")
else:
DIRAC.gLogger.notice("CAs are already synchronized")
result = bdc.syncCRLs()
if not result["OK"]:
DIRAC.gLogger.error("Error while updating CRLs", result["Message"])
DIRAC.exit(1)
elif result["Value"]:
DIRAC.gLogger.notice("CRLs got updated")
else:
DIRAC.gLogger.notice("CRLs are already synchronized")
DIRAC.exit(0)
if __name__ == "__main__":
main()
| ic-hep/DIRAC | src/DIRAC/FrameworkSystem/scripts/dirac_admin_get_CAs.py | Python | gpl-3.0 | 1,569 | [
"DIRAC"
] | ac7f60baf49190ea2cb272085288d40dc4b0bad5e9341fe59731eb6c1a6a8314 |
""" This is a test of the FileCatalogDB
It supposes that the DB is present.
"""
# pylint: disable=invalid-name,wrong-import-position
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import unittest
import itertools
import os
import sys
from collections import defaultdict
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.DataManagementSystem.DB.FileCatalogDB import FileCatalogDB
from DIRAC.Core.Security.Properties import FC_MANAGEMENT
seName = "mySE"
testUser = 'atsareg'
testGroup = 'dirac_user'
testDir = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir'
parentDir = '/vo.formation.idgrilles.fr/user/a/atsareg'
nonExistingDir = "/I/Dont/exist/dir"
testFile = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir/testfile'
nonExistingFile = "/I/Dont/exist"
x509Chain = "<X509Chain 3 certs [/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch]"
x509Chain += "[/DC=ch/DC=cern/CN=CERN Trusted Certification Authority][/DC=ch/DC=cern/CN=CERN Root CA]>"
credDict = {
'DN': '/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch',
'extraCredentials': 'hosts',
'group': 'visitor',
'CN': 'volhcb12.cern.ch',
'x509Chain': x509Chain,
'username': 'anonymous',
'isLimitedProxy': False,
'properties': [FC_MANAGEMENT],
'isProxy': False}
isAdmin = False
proxyUser = 'anonymous'
proxyGroup = 'visitor'
# TESTS WERE DESIGNED WITH THIS CONFIGURATION
# DATABASE_CONFIG = { 'UserGroupManager' : 'UserAndGroupManagerDB',
# 'SEManager' : 'SEManagerDB',
# 'SecurityManager' : 'NoSecurityManager',
# 'DirectoryManager' : 'DirectoryLevelTree',
# 'FileManager' : 'FileManager',
# 'DirectoryMetadata' : 'DirectoryMetadata',
# 'FileMetadata' : 'FileMetadata',
# 'DatasetManager' : 'DatasetManager',
# 'UniqueGUID' : False,
# 'GlobalReadAccess' : True,
# 'LFNPFNConvention' : 'Strong',
# 'ResolvePFN' : True,
# 'DefaultUmask' : 0775,
# 'ValidFileStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'ValidReplicaStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'VisibleFileStatus' : ['AprioriGood'],
# 'VisibleReplicaStatus': ['AprioriGood'] }
DATABASE_CONFIG = {
'UserGroupManager': 'UserAndGroupManagerDB', # UserAndGroupManagerDB, UserAndGroupManagerCS
'SEManager': 'SEManagerDB', # SEManagerDB, SEManagerCS
# NoSecurityManager, DirectorySecurityManager, FullSecurityManager
'SecurityManager': 'NoSecurityManager',
# DirectorySimpleTree, DirectoryFlatTree, DirectoryNodeTree, DirectoryLevelTree
'DirectoryManager': 'DirectoryLevelTree',
'FileManager': 'FileManager', # FileManagerFlat, FileManager
'DirectoryMetadata': 'DirectoryMetadata',
'FileMetadata': 'FileMetadata',
'DatasetManager': 'DatasetManager',
'UniqueGUID': True,
'GlobalReadAccess': True,
'LFNPFNConvention': 'Strong',
'ResolvePFN': True,
'DefaultUmask': 0o775,
'ValidFileStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'ValidReplicaStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'VisibleFileStatus': ['AprioriGood'],
'VisibleReplicaStatus': ['AprioriGood']}
ALL_MANAGERS = {
"UserGroupManager": [
"UserAndGroupManagerDB", "UserAndGroupManagerCS"], "SEManager": [
"SEManagerDB", "SEManagerCS"], "SecurityManager": [
"NoSecurityManager", "DirectorySecurityManager", "FullSecurityManager"], "DirectoryManager": [
"DirectorySimpleTree", "DirectoryFlatTree", "DirectoryNodeTree", "DirectoryLevelTree"], "FileManager": [
"FileManagerFlat", "FileManager"], }
ALL_MANAGERS_NO_CS = {
"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": [
"NoSecurityManager",
"DirectorySecurityManager",
"FullSecurityManager"],
"DirectoryManager": [
"DirectorySimpleTree",
"DirectoryFlatTree",
"DirectoryNodeTree",
"DirectoryLevelTree"],
"FileManager": [
"FileManagerFlat",
"FileManager"],
}
DEFAULT_MANAGER = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["DirectorySecurityManagerWithDelete"],
"DirectoryManager": ["DirectoryClosure"],
"FileManager": ["FileManagerPs"],
}
DEFAULT_MANAGER_2 = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["NoSecurityManager"],
"DirectoryManager": ["DirectoryLevelTree"],
"FileManager": ["FileManager"],
}
MANAGER_TO_TEST = DEFAULT_MANAGER
class FileCatalogDBTestCase(unittest.TestCase):
""" Base class for the FileCatalogDB test cases
"""
def setUp(self):
self.db = FileCatalogDB()
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
self.db.setConfig(DATABASE_CONFIG)
def tearDown(self):
pass
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
class SECase (FileCatalogDBTestCase):
def test_seOperations(self):
"""Testing SE related operation"""
# create SE
ret = self.db.addSE(seName, credDict)
if isAdmin:
self.assertTrue(ret["OK"], "addSE failed when adding new SE: %s" % ret)
seId = ret["Value"]
# create it again
ret = self.db.addSE(seName, credDict)
self.assertEqual(ret["Value"], seId, "addSE failed when adding existing SE: %s" % ret)
else:
self.assertEqual(
ret["OK"],
False,
"addSE should fail when adding new SE as non admin: %s" %
ret)
# remove it
ret = self.db.deleteSE(seName, credDict)
self.assertEqual(ret["OK"], True if isAdmin else False, "deleteE failed %s" % ret)
class UserGroupCase(FileCatalogDBTestCase):
def test_userOperations(self):
"""Testing the user related operations"""
expectedRes = None
if isAdmin:
print("Running UserTest in admin mode")
expectedRes = True
else:
print("Running UserTest in non admin mode")
expectedRes = False
# Add the user
result = self.db.addUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "AddUser failed when adding new user: %s" % result)
# Add an existing user
result = self.db.addUser(testUser, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddUser failed when adding existing user: %s" %
result)
# Fetch the list of user
result = self.db.getUsers(credDict)
self.assertEqual(result['OK'], expectedRes, "getUsers failed: %s" % result)
if isAdmin:
# Check if our user is present
self.assertEqual(testUser in result['Value'], expectedRes, "getUsers failed: %s" % result)
# remove the user we created
result = self.db.deleteUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteUser failed: %s" % result)
def test_groupOperations(self):
"""Testing the group related operations"""
expectedRes = None
if isAdmin:
print("Running UserTest in admin mode")
expectedRes = True
else:
print("Running UserTest in non admin mode")
expectedRes = False
# Create new group
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "AddGroup failed when adding new user: %s" % result)
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddGroup failed when adding existing user: %s" %
result)
result = self.db.getGroups(credDict)
self.assertEqual(result['OK'], expectedRes, "getGroups failed: %s" % result)
if isAdmin:
self.assertEqual(testGroup in result['Value'], expectedRes)
result = self.db.deleteGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteGroup failed: %s" % result)
class FileCase(FileCatalogDBTestCase):
def test_fileOperations(self):
"""
Tests the File related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
data = {
testFile: {
'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0',
}
}
result = self.db.addFile(data, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
self.assertEqual(set(result['Value']['Successful']), {testFile}, "Failed to add new file %s" % result)
result = self.db.exists(testFile, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile) should be the same lfn %s" % result)
result = self.db.exists({testFile: '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
result = self.db.exists({testFile: {'GUID': '1000', 'PFN': 'blabla'}}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
# In fact, we don't check if the GUID is correct...
result = self.db.exists({testFile: '1001'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1001) should be the same lfn %s" % result)
result = self.db.exists({testFile + '2': '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile + '2'),
testFile, "exists( testFile2 : 1000) should return testFile %s" % result)
# Re-adding the same file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with same param %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"addFile failed: it should be possible to add an existing lfn with same param %s" %
result)
# Adding same file with different param
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '1'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with different parem %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"addFile failed: it should not be possible to add an existing lfn with different param %s" %
result)
result = self.db.addFile({testFile + '2': {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result["OK"], "addFile failed when adding existing file %s" % result)
self.assertTrue(
testFile +
'2' in result["Value"]["Failed"],
"addFile failed: it should not be possible to add a new lfn with existing GUID %s" %
result)
##################################################################################
# Setting existing status of existing file
result = self.db.setFileStatus({testFile: "AprioriGood"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setFileStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting unexisting status of existing file
result = self.db.setFileStatus({testFile: "Happy"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting un-existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setFileStatus should have failed %s" %
result)
# Setting existing status of unexisting file
result = self.db.setFileStatus({nonExistingFile: "Trash"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of non-existing file %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setFileStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
##################################################################################
result = self.db.isFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "isFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"isFile : %s should be seen as a file %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(nonExistingFile,
result))
self.assertTrue(result["Value"]["Successful"][nonExistingFile] is False,
"isFile : %s should be seen as a file %s" % (nonExistingFile, result))
result = self.db.changePathOwner({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathOwner : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathGroup({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathGroup failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathGroup : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathMode({testFile: 0o44, nonExistingFile: 0o44}, credDict)
self.assertTrue(result["OK"], "changePathMode failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathMode : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileSize([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileSize failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileSize : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile],
123,
"getFileSize got incorrect file size %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileSize : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileMetadata([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileMetadata failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileMetadata : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile]["Owner"],
"toto",
"getFileMetadata got incorrect Owner %s" %
result)
self.assertEqual(
result["Value"]["Successful"][testFile]["Status"],
"AprioriGood",
"getFileMetadata got incorrect status %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileMetadata : %s should be in Failed %s" %
(nonExistingFile,
result))
# DOES NOT FOLLOW THE SUCCESSFUL/FAILED CONVENTION
# result = self.db.getFileDetails([testFile, nonExistingFile], credDict)
# self.assertTrue(result["OK"], "getFileDetails failed: %s" % result)
# self.assertTrue(
# testFile in result["Value"]["Successful"],
# "getFileDetails : %s should be in Successful %s" %
# (testFile,
# result))
# self.assertEqual(
# result["Value"]["Successful"][testFile]["Owner"],
# "toto",
# "getFileDetails got incorrect Owner %s" %
# result)
# self.assertTrue(
# nonExistingFile in result["Value"]["Failed"],
# "getFileDetails : %s should be in Failed %s" %
# (nonExistingFile,
# result))
# ADD SOMETHING ABOUT FILE ANCESTORS AND DESCENDENTS
result = self.db.getSEDump('testSE')
self.assertTrue(result['OK'], "Error when getting SE dump %s" % result)
self.assertEqual(result['Value'], ((testFile, '0', 123),),
"Did not get the expected SE Dump %s" % result['Value'])
result = self.db.removeFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"removeFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"removeFile : %s should be in True %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingFile],
"removeFile : %s should be in True %s" %
(nonExistingFile,
result))
class ReplicaCase(FileCatalogDBTestCase):
def test_replicaOperations(self):
"""
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Adding new replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding the same replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding replica of a non existing file
result = self.db.addReplica({nonExistingFile: {"PFN": "Idontexist", "SE": "otherSE"}}, credDict)
self.assertTrue(
result['OK'],
"addReplica failed when adding Replica to non existing Replica %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Failed"],
"addReplica for non existing file should go in Failed %s" %
result)
# Setting existing status of existing Replica
result = self.db.setReplicaStatus({testFile: {"Status": "Trash", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setReplicaStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting non existing status of existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "randomStatus", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting non-existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing File
result = self.db.setReplicaStatus(
{nonExistingFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(nonExistingFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "otherSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica but not visible %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting status of non-existing File but not visible
result = self.db.getReplicaStatus({nonExistingFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting status of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicaStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
# Getting replicas of existing File and non existing file, seeing all replicas
result = self.db.getReplicas([testFile, nonExistingFile], allStatus=True, credDict=credDict)
self.assertTrue(result["OK"], "getReplicas failed %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicas failed, %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile], {
"otherSE": "", "testSE": ""}, "getReplicas failed, %s should be in Successful %s" %
(testFile, result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicas failed, %s should be in Failed %s" %
(nonExistingFile,
result))
# removing master replica
result = self.db.removeReplica({testFile: {"SE": "testSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing master Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing master Replica %s" %
result)
# removing non existing replica of existing File
result = self.db.removeReplica({testFile: {"SE": "nonExistingSe2"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing non existing Replica %s" %
result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing new Replica %s" %
result)
# removing non existing replica of non existing file
result = self.db.removeReplica({nonExistingFile: {"SE": "nonExistingSe3"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing replica of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Successful"],
"removeReplica of non existing file, %s should be in Successful %s" %
(nonExistingFile,
result))
# removing last replica
result = self.db.removeReplica({testFile: {"SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing last Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing last Replica %s" %
result)
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
class DirectoryCase(FileCatalogDBTestCase):
def test_directoryOperations(self):
"""
Tests the Directory related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new directory
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result['OK'], "addDirectory failed when adding new directory %s" % result)
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Re-adding the same directory (CAUTION, different from addFile)
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result["OK"], "addDirectory failed when adding existing directory %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"addDirectory failed: it should be possible to add an existing lfn %s" %
result)
result = self.db.isDirectory([testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "isDirectory failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertTrue(
result["Value"]["Successful"][testDir],
"isDirectory : %s should be seen as a directory %s" %
(testDir,
result))
self.assertTrue(
nonExistingDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(nonExistingDir,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingDir] is False,
"isDirectory : %s should be seen as a directory %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, False, True, credDict)
self.assertTrue(result["OK"], "getDirectorySize failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, True, True, credDict)
self.assertTrue(result["OK"], "getDirectorySize (calc) failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize (calc): %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize (calc) : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.listDirectory([parentDir, testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "listDirectory failed: %s" % result)
self.assertTrue(
parentDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(list(result["Value"]["Successful"][parentDir]["SubDirs"]), [testDir],
"listDir : incorrect content for %s (%s)" % (parentDir, result))
self.assertTrue(
testDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(list(result["Value"]["Successful"][testDir]["Files"]), [testFile.split("/")[-1]],
"listDir : incorrect content for %s (%s)" % (testDir, result))
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"listDirectory : %s should be in Failed %s" %
(nonExistingDir,
result))
# We do it two times to make sure that
# when updating something to the same value
# returns a success if it is allowed
for attempt in range(2):
print("Attempt %s" % (attempt + 1))
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict)
result = self.db.changePathOwner({parentDir: "toto"}, credDict)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Mode'),
0o775,
"testDir should not have changed %s" %
result2)
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Owner'),
proxyUser,
"testDir should not have changed %s" %
result2)
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
proxyGroup,
"testDir should not have changed %s" %
result2)
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], "changePathOwner : \
# %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
pass
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict, True)
result = self.db.changePathOwner({parentDir: "toto"}, credDict, True)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict, True)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
result3 = self.db.getFileMetadata(testFile, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
self.assertTrue(result3["OK"], "getFileMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Mode'), 0o777, "testDir should have mode %s %s" %
(0o777, result2))
self.assertEqual(
result3['Value'].get(
'Successful', {}).get(
testFile, {}).get('Mode'), 0o777, "testFile should have mode %s %s" %
(0o777, result3))
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Owner'), 'toto', "testDir should belong to %s %s" %
(proxyUser, result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('Owner'),
'toto',
"testFile should belong to %s %s" %
(proxyUser,
result3))
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
'toto',
"testDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('OwnerGroup'),
'toto',
"testFile should belong to %s %s" %
(proxyGroup,
result3))
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], \
# "changePathOwner : %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'Owner' ), \
# proxyUser, "testFile should not have changed %s" % result3 )
#
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testFile should not have changed %s" % result3 )
pass
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
pathParts = testDir.split('/')[1:]
startDir = '/'
pathToRemove = []
for part in pathParts:
startDir = os.path.join(startDir, part)
pathToRemove.append(startDir)
pathToRemove.reverse()
for toRemove in pathToRemove:
result = self.db.removeDirectory(toRemove, credDict)
self.assertTrue(result["OK"], "removeDirectory failed: %s" % result)
class DirectoryUsageCase (FileCatalogDBTestCase):
def getPhysicalSize(self, sizeDict, dirName, seName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
val = sizeDict[dirName]['PhysicalSize'][seName]
files = val['Files']
size = val['Size']
return (files, size)
def getLogicalSize(self, sizeDict, dirName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
files = sizeDict[dirName]['LogicalFiles']
size = sizeDict[dirName]['LogicalSize']
return (files, size)
def getAndCompareDirectorySize(self, dirList, recursiveSum=True):
""" Fetch the directory size from the DirectoryUsage table
and calculate it, compare the results, and then return
the values
"""
retTable = self.db.getDirectorySize(dirList, True, False, recursiveSum, credDict)
retCalc = self.db.getDirectorySize(dirList, True, True, recursiveSum, credDict)
self.assertTrue(retTable["OK"])
self.assertTrue(retCalc["OK"])
succTable = retTable['Value']['Successful']
succCalc = retCalc['Value']['Successful']
# Since we have simple type, the == is recursive for dict :-)
retEquals = (succTable == succCalc)
self.assertTrue(
retEquals, "Calc and table results different with recursiveSum %s: %s %s" %
(recursiveSum, succTable, succCalc))
return retTable
def checkNonRecursiveDirectorySize(self, curDir):
""" The tests here should be true at any point in time of the directory size testing
We basically make sure that the sum of the non recursive sum of subdirectories
is the same as the recursive size of the directory (read it a few times, slowly :-)
"""
res = self.db.listDirectory(curDir, credDict)
subDirs = res['Value']['Successful'][curDir]['SubDirs']
files = res['Value']['Successful'][curDir]['Files']
nrDirSize = self.getAndCompareDirectorySize(curDir, recursiveSum=False)['Value']['Successful'][curDir]
rDirSize = self.getAndCompareDirectorySize(curDir, recursiveSum=True)['Value']['Successful'][curDir]
# If there are no files, the size should be 0
if not files:
self.assertEqual((nrDirSize['LogicalFiles'], nrDirSize['LogicalSize']), (0, 0))
# If there are no subdirectories, the recursive and non recursive sum should be the same
if not subDirs:
self.assertEqual(rDirSize, nrDirSize)
# If there are subdir, the recursive size of the subdir + the non recursive size of curdir
# should be equal to the recursive size of curdir
else:
# Get the logocal size of the subdirs
ret = self.getAndCompareDirectorySize(subDirs, recursiveSum=True)['Value']['Successful']
subLogicalDir = 0
subLogicalFiles = 0
subLogicalSize = 0
# It's a dict of SE, each of them having {Files: x, Size: y}
physicalSizePerSE = defaultdict(lambda: defaultdict(int))
physicalSizeTotalFiles = 0
physicalSizeTotalSize = 0
for subDir, subDirDict in ret.items():
subLogicalDir += subDirDict['LogicalDirectories']
subLogicalFiles += subDirDict['LogicalFiles']
subLogicalSize += subDirDict['LogicalSize']
subDirPhys = subDirDict['PhysicalSize']
physicalSizeTotalFiles += subDirPhys.pop('TotalFiles', 0)
physicalSizeTotalSize += subDirPhys.pop('TotalSize', 0)
for se, seDict in subDirPhys.items():
physicalSizePerSE[se]['Files'] += seDict['Files']
physicalSizePerSE[se]['Size'] += seDict['Size']
self.assertEqual(rDirSize['LogicalDirectories'], nrDirSize['LogicalDirectories'] + subLogicalDir)
self.assertEqual(rDirSize['LogicalFiles'], nrDirSize['LogicalFiles'] + subLogicalFiles)
self.assertEqual(rDirSize['LogicalSize'], nrDirSize['LogicalSize'] + subLogicalSize)
rDirPhys = rDirSize['PhysicalSize']
nrDirPhys = nrDirSize['PhysicalSize']
# We pop to be able to loop over the SEs later on
self.assertEqual(rDirPhys.pop('TotalFiles', 0), nrDirPhys.pop('TotalFiles', 0) + physicalSizeTotalFiles)
self.assertEqual(rDirPhys.pop('TotalSize', 0), nrDirPhys.pop('TotalSize', 0) + physicalSizeTotalSize)
# Add the curDir non recursive physical SE to the subdir
for se, seDict in nrDirPhys.items():
physicalSizePerSE[se]['Files'] += seDict['Files']
physicalSizePerSE[se]['Size'] += seDict['Size']
self.assertEqual(rDirPhys, physicalSizePerSE)
# Now do the check recursively
for subDir in subDirs:
self.checkNonRecursiveDirectorySize(subDir)
# # There are no subdir, so the size and the recursive size of d2 and d2 should be the same
# ret = self.getAndCompareDirectorySize([d1, d2], recursiveSum=False)
# self.assertTrue(ret["OK"])
# nonRecVal = ret['Value']['Successful']
# self.assertTrue(val == nonRecVal)
# nonRecD1s1 = self.getPhysicalSize(nonRecVal, d1, 'se1')
# nonRecD1s2 = self.getPhysicalSize(nonRecVal, d1, 'se2')
# nonRecD1l = self.getLogicalSize(nonRecVal, d1)
# try:
# nonRecD2s1 = self.getPhysicalSize(nonRecVal, d2, 'se1')
# except KeyError:
# nonRecD2s1 = (0, 0)
# try:
# nonRecD2s2 = self.getPhysicalSize(nonRecVal, d2, 'se2')
# except KeyError:
# nonRecD2s2 = (0, 0)
# nonRecD2l = self.getLogicalSize(nonRecVal, d2)
# # The size of the root dir should be zero
# ret = self.getAndCompareDirectorySize('/sizeTest', recursiveSum=False)
# self.assertTrue(ret["OK"])
# nonRecStVal = ret['Value']['Successful']
# print("CHRIS nonRecStVal %s" % nonRecStVal)
# # There should be no physical size there
# try:
# stS1 = self.getPhysicalSize(nonRecStVal, '/sizeTest', 'se1')
# self.assertTrue(False, "There should be no physical size")
# except KeyError:
# pass
# try:
# stS2 = self.getPhysicalSize(nonRecStVal, '/sizeTest', 'se2')
# self.assertTrue(False, "There should be no physical size")
# except KeyError:
# pass
# nonRecStl = self.getLogicalSize(nonRecStVal, '/sizeTest')
# self.assertEqual(nonRecStl, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
# # The sum of the non recursive sizes of /sizeTest + /sizeTest/d1 + /siteTest
# # should be equal to the size of /sizeTest
# ret = self.getAndCompareDirectorySize('/sizeTest', recursiveSum=True)
# self.assertTrue(ret["OK"])
# recVal = ret['Value']['Successful']
# recSts1 = self.getPhysicalSize(recVal, '/sizeTest', 'se1')
# recSts2 = self.getPhysicalSize(recVal, '/sizeTest', 'se2')
# recStl = self.getLogicalSize(recVal, '/sizeTest')
# self.assertEqual(recStl, tuple(sum(x) for x in zip(nonRecStl, nonRecD1l, nonRecD2l)))
# self.assertEqual(recSts1, tuple(sum(x) for x in zip(nonRecD1s1, nonRecD2s1)))
# self.assertEqual(recSts2, tuple(sum(x) for x in zip(nonRecD1s2, nonRecD2s2)))
def test_directoryUsage(self):
"""Testing DirectoryUsage related operation"""
# create SE
# Only admin can run that
if not isAdmin:
return
d1 = '/sizeTest/d1'
d2 = '/sizeTest/d2'
f1 = d1 + '/f1'
f2 = d1 + '/f2'
f3 = d2 + '/f3'
f1Size = 3000000000
f2Size = 3000000001
f3Size = 3000000002
for sen in ['se1', 'se2', 'se3']:
ret = self.db.addSE(sen, credDict)
self.assertTrue(ret["OK"])
for din in [d1, d2]:
ret = self.db.createDirectory(din, credDict)
self.assertTrue(ret["OK"])
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se2',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2], recursiveSum=True)
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
ret = self.db.addReplica({f1: {"PFN": "f1se2", "SE": "se2"},
f2: {"PFN": "f1se3", "SE": "se3"}},
credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(
d1s2, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1s2, (2, f1Size + f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
ret = self.db.removeReplica({f2: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s2 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s2 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f3: {'PFN': 'f3se3',
'SE': 'se3',
'Size': f3Size,
'GUID': '1003',
'Checksum': '3'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
ret = self.db.removeReplica({f1: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
ret = self.db.removeReplica({f2: {"SE": "se3"},
f3: {"SE": "se3"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
ret = self.db.removeFile([f2, f3], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
self.assertEqual(d2l, (0, 0), "Unexpected size %s, expected %s" % (d2l, (0, 0)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
# Removing Replicas and Files from the same directory
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se1',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se1"},
f2: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
ret = self.db.removeFile([f1, f2], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
# Try removing a replica from a non existing SE
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s2 = self.getPhysicalSize(val, d1, 'se2')
except KeyError:
d1s2 = (0, 0)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
###################
# Non recursive tests
self.checkNonRecursiveDirectorySize('/sizeTest')
###################
def _makeTestSuite():
# In Python 3 TestSuite cannot easily be re-used as the tests are cleaned up by default
suite = unittest.defaultTestLoader.loadTestsFromTestCase(SECase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(UserGroupCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ReplicaCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryUsageCase))
return suite
if __name__ == '__main__':
managerTypes = list(MANAGER_TO_TEST)
all_combinations = list(itertools.product(*MANAGER_TO_TEST.values()))
numberOfManager = len(managerTypes)
for setup in all_combinations:
print("Running with:")
print(("".join(["\t %s : %s\n" % (managerTypes[i], setup[i]) for i in range(numberOfManager)])))
for i in range(numberOfManager):
DATABASE_CONFIG[managerTypes[i]] = setup[i]
# Then run without admin privilege:
isAdmin = False
if FC_MANAGEMENT in credDict['properties']:
credDict['properties'].remove(FC_MANAGEMENT)
print("Running test without admin privileges")
testResult = unittest.TextTestRunner(verbosity=2).run(_makeTestSuite())
# First run with admin privilege:
isAdmin = True
if FC_MANAGEMENT not in credDict['properties']:
credDict['properties'].append(FC_MANAGEMENT)
print("Running test with admin privileges")
testResult = unittest.TextTestRunner(verbosity=2).run(_makeTestSuite())
sys.exit(not testResult.wasSuccessful())
| yujikato/DIRAC | tests/Integration/DataManagementSystem/Test_FileCatalogDB.py | Python | gpl-3.0 | 64,072 | [
"DIRAC"
] | 549a0e098ffc60903965c65e0e6a9dfa72ef9e557b77f180929d70ff9e628ac1 |
import sys
import os.path
#sys.path.insert(0, '/home/andy/theano/tool_examples/theano-lstm-0.0.15')
from theano_lstm import Embedding, LSTM, RNN, StackedCells, Layer, create_optimization_updates, masked_loss
from utilities import *
import dill
import argparse
#import cPickle
import pickle
import numpy
from collections import OrderedDict
import theano, theano.tensor as T
import turing_model
from theano_toolkit.parameters import Parameters
from theano.compile.nanguardmode import NanGuardMode
DESCRIPTION = """
Recurrent neural network based statistical language modelling toolkit
(based on LSTM algorithm)
Implemented by Daniel Soutner,
Department of Cybernetics, University of West Bohemia, Plzen, Czech rep.
dsoutner@kky.zcu.cz, 2013
"""
def parse_args(parser):
parser.add_argument('--train', nargs=1, action="store", metavar="FILE",
help='training file !')
parser.add_argument('--valid', nargs=1, action="store", metavar="FILE",
help='valid file !')
parser.add_argument('--test', nargs=1, action="store", metavar="FILE",
help='testing file for ppl!')
parser.add_argument('--neuron-type', action="store", dest='celltype',
help='type of hidden neurons, RNN/LSTM, default: RNN', type=str, default='RNN')
parser.add_argument('--train-method', action="store", dest='train_method',
help='training method LSTM/TURING/ALL, default: ALL', type=str, default='ALL')
parser.add_argument('--projection-size', action="store", dest='n_projection',
help='Number of neurons in projection layer, default: 100', type=int, default=100)
parser.add_argument('--hidden-size', action="store", dest='n_hidden',
help='Number of neurons in hidden layer, default: 100', type=int, default=100)
parser.add_argument('--stack', action="store", dest='n_stack',
help='Number of hidden neurons, default: 1 ', type=int, default=1)
parser.add_argument('--learning-rate', action="store", dest='lr',
help='learing rate at begining, default: 0.01 ', type=float, default=0.01)
parser.add_argument('--improvement-rate', action="store", dest='improvement_rate',
help='relative improvement for early stopping on ppl , default: 0.005 ', type=float, default=0.005)
parser.add_argument('--entropy-reg', action="store", dest='entropy_reg',
help='entropy regulizer, default: 0.001 ', type=float, default=0.001)
parser.add_argument('--minibatch-size', action="store", dest='minibatch_size',
help='minibatch size for training, default: 100', type=int, default=100)
parser.add_argument('--max-epoch', action="store", dest='max_epoch',
help='maximum number of epoch if not early stopping, default: 1000', type=int, default=1000)
parser.add_argument('--early-stop', action="store", dest='early_stop',
help='1 for early-stopping, 0 for not', type=int, default=1)
parser.add_argument('--save-net', action="store", dest="save_net", default=None, metavar="FILE",
help="Save RNN to file")
parser.add_argument('--load-net', action="store", dest="load_net", default=None, metavar="FILE",
help="Load RNN from file")
return parser.parse_args()
def build_vocab(data_file_str):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
lines.append(tokens)
data_file.close()
vocab = Vocab()
for line in lines:
vocab.add_words(line.split(" "))
return vocab
def load_data(data_file_str, vocab, data_type):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
# abandom too long sent in training set., too long sent will take too many time and decrease preformance
tokens_for_count = line.replace('\n','').split(' ')
if len(tokens_for_count) > 50 and data_type == 'train':
continue
lines.append(tokens)
data_file.close()
# transform into big numerical matrix of sentences:
numerical_lines = []
for line in lines:
numerical_lines.append(vocab(line))
numerical_lines, numerical_lengths = pad_into_matrix(numerical_lines)
return numerical_lines, numerical_lengths
def softmax(x):
"""
Wrapper for softmax, helps with
pickling, and removing one extra
dimension that Theano adds during
its exponential normalization.
"""
return T.nnet.softmax(x.T)
def has_hidden(layer):
"""
Whether a layer has a trainable
initial hidden state.
"""
return hasattr(layer, 'initial_hidden_state')
def matrixify(vector, n):
return T.repeat(T.shape_padleft(vector), n, axis=0)
def initial_state(layer, dimensions = None):
"""
Initalizes the recurrence relation with an initial hidden state
if needed, else replaces with a "None" to tell Theano that
the network **will** return something, but it does not need
to send it to the next step of the recurrence
"""
if dimensions is None:
return layer.initial_hidden_state if has_hidden(layer) else None
else:
return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
def initial_state_with_taps(layer, dimensions = None):
"""Optionally wrap tensor variable into a dict with taps=[-1]"""
state = initial_state(layer, dimensions)
if state is not None:
return dict(initial=state, taps=[-1])
else:
return None
class Model:
"""
Simple predictive model for forecasting words from
sequence using LSTMs. Choose how many LSTMs to stack
what size their memory should be, and how many
words can be predicted.
"""
def __init__(self, hidden_size, input_size, vocab_size, entropy_reg = 0.001, stack_size=1, celltype=LSTM):
# core layer in RNN/LSTM
self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)
# add an embedding
self.model.layers.insert(0, Embedding(vocab_size, input_size))
# add a classifier:
self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))
self.entropy_reg = entropy_reg
self.turing_params = Parameters()
#init turing machine model
self.turing_updates , self.turing_predict = turing_model.build(self.turing_params , hidden_size , vocab_size)
self.hidden_size = hidden_size
# inputs are matrices of indices,
# each row is a sentence, each column a timestep
self._stop_word = theano.shared(np.int32(999999999), name="stop word")
self.for_how_long = T.ivector()
self.mask_matrix = T.imatrix()
self.input_mat = T.imatrix()
self.priming_word = T.iscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
# create symbolic variables for prediction:
#change by darong #issue : what is greedy
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions,self.entropy = self.create_final_prediction()
# create symbolic variable for greedy search:
self.greedy_predictions = self.create_lstm_prediction(greedy=True)
# create gradient training functions:
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = 0.01
self.turing_lr = 0.01
self.all_lr = 0.01
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
# create ppl
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
def save(self, save_file, vocab):
pickle.dump(self.model, open(save_file, "wb")) # pickle is for lambda function, cPickle cannot
pickle.dump(vocab, open(save_file+'.vocab', "wb")) # pickle is for lambda function, cPickle cannot
def save_turing(self, save_file):
self.turing_params.save(save_file + '.turing')
def load(self, load_file, lr):
self.model = pickle.load(open(load_file, "rb"))
if os.path.isfile(load_file + '.turing') :
self.turing_params.load(load_file + '.turing')
else :
print "no turing model!!!! pretrain with lstm param"
self.turing_params['W_input_hidden'] = self.model.layers[-1].params[0].get_value().T #not sure
self.turing_params['W_read_hidden'] = self.model.layers[-1].params[0].get_value().T
self.turing_params['b_hidden_0'] = self.model.layers[-1].params[1].get_value()
temp = self.model.layers[1].initial_hidden_state.get_value()[self.hidden_size:]
self.turing_params['memory_init'] = temp.reshape((1,)+temp.shape)
# need to compile again for calculating predictions after loading lstm
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions,self.entropy = self.create_final_prediction()
self.greedy_predictions = self.create_lstm_prediction(greedy=True)#can change to final
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = lr
self.turing_lr = lr#change this
self.all_lr = lr
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
# print "done compile"
def stop_on(self, idx):
self._stop_word.set_value(idx)
@property
def params(self):
return self.model.params
def create_lstm_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
return result[-1].transpose((2,0,1))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_final_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
hidden_size = result[-2].shape[2]/2
temp = self.turing_predict(result[-2][:,:,hidden_size:])
turing_result = temp[0]
entropy_result = temp[1]
#the last layer do transpose before compute
return turing_result.transpose((1,0,2)),entropy_result.transpose((1,0))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_cost_fun (self):
# create a cost function that
# takes each prediction at every timestep
# and guesses next timestep's value:
what_to_predict = self.input_mat[:, 1:]
# because some sentences are shorter, we
# place masks where the sentences end:
# (for how long is zero indexed, e.g. an example going from `[2,3)`)
# has this value set 0 (here we substract by 1):
for_how_long = self.for_how_long - 1
# all sentences start at T=0:
starting_when = T.zeros_like(self.for_how_long)
self.lstm_cost = masked_loss(self.lstm_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
zero_entropy = T.zeros_like(self.entropy)
real_entropy = T.switch(self.mask_matrix,self.entropy,zero_entropy)
self.final_cost = masked_loss(self.final_predictions,
what_to_predict,
for_how_long,
starting_when).sum()+self.entropy_reg*real_entropy.sum()
def create_predict_function(self):
self.lstm_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.lstm_predictions,
allow_input_downcast=True
)
self.final_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.final_predictions,
allow_input_downcast=True
)
self.greedy_fun = theano.function(
inputs=[self.priming_word],
outputs=T.concatenate([T.shape_padleft(self.priming_word), self.greedy_predictions]),
allow_input_downcast=True
)
def create_training_function(self):
updates, _, _, _, _ = create_optimization_updates(self.lstm_cost, self.params, method="SGD", lr=self.lstm_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.lstm_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_cost,
updates=updates,
allow_input_downcast=True)
updates_turing = self.turing_updates(self.final_cost , lr=self.turing_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.turing_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long,self.mask_matrix],
outputs=self.final_cost,
updates=updates_turing,
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True),
allow_input_downcast=True)
all_updates_lstm, _, _, _, _ = create_optimization_updates(self.final_cost, self.params, method="SGD", lr=self.all_lr,part=True)
all_updates_turing_temp = self.turing_updates(self.final_cost , lr=self.all_lr)
updates_all = all_updates_lstm
for pair in all_updates_turing_temp :
updates_all[pair[0]] = pair[1]
self.all_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long,self.mask_matrix],
outputs=self.final_cost,
updates=updates_all,
allow_input_downcast=True)
def create_lstm_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.lstm_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_final_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.final_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_ppl_function(self):
self.lstm_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_ppl,
allow_input_downcast=True)
self.final_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_ppl,
allow_input_downcast=True)
def __call__(self, x):
return self.pred_fun(x)#any problem??
def get_minibatch(full_data, full_lengths, minibatch_size, minibatch_idx):
lengths = []
for j in range(minibatch_size):
lengths.append(full_lengths[minibatch_size * minibatch_idx + j])
width = max(full_lengths)
# width = max(full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :])
height = minibatch_size
minibatch_data = np.empty([height, width], dtype=theano.config.floatX)
minibatch_data = full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :]
return minibatch_data, lengths
def get_mask(minibatch_data, lengths):
origin_mask = np.zeros_like(minibatch_data)
for idx,l in enumerate(lengths):
for l_idx in range(l-1):
origin_mask[idx][l_idx] = 1
return origin_mask[:,:-1]
def training(args, vocab, train_data, train_lengths, valid_data, valid_lengths):
# training information
print 'training information'
print '-------------------------------------------------------'
print 'method: %s' % args.train_method
print 'vocab size: %d' % len(vocab)
print 'sentences in training file: %d' % len(train_lengths)
print 'max length in training file: %d' % max(train_lengths)
print 'train file: %s' % args.train[0]
print 'valid file: %s' % args.valid[0]
print 'type: %s' % args.celltype
print 'project: %d' % args.n_projection
print 'hidden: %d' % args.n_hidden
print 'stack: %d' % args.n_stack
print 'learning rate: %f' % args.lr
print 'minibatch size: %d' % args.minibatch_size
print 'max epoch: %d' % args.max_epoch
print 'improvement rate: %f' % args.improvement_rate
print 'entropy reg: %f' % args.entropy_reg
print 'save file: %s' % args.save_net
print 'load_model: %s' % args.load_net
print 'early-stop: %r' % args.early_stop
print '-------------------------------------------------------'
if args.celltype == 'LSTM':
celltype = LSTM
elif args.celltype == 'RNN':
celltype = RNN
print 'start initializing model'
# construct model & theano functions:
model = Model(
input_size=args.n_projection,
hidden_size=args.n_hidden,
vocab_size=len(vocab),
entropy_reg = args.entropy_reg,
stack_size=args.n_stack, # make this bigger, but makes compilation slow
celltype=celltype # use RNN or LSTM
)
if args.lr :
model.lstm_lr = args.lr
model.turing_lr = args.lr
model.all_lr = args.lr
model.stop_on(vocab.word2index["."])
if args.load_net :
if args.lr :
model.load(args.load_net, args.lr)# 0 is useless
else :
model.load(args.load_net, 0)
# train:
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
update_fun = model.lstm_update_fun
ppl_fun = model.lstm_ppl_fun
lr = model.lstm_lr
print 'update lstm learning rate : %f' % model.lstm_lr
elif args.train_method == 'TURING' :
update_fun = model.turing_update_fun
ppl_fun = model.final_ppl_fun
lr = model.turing_lr
print 'update turing learning rate : %f' % model.turing_lr
else :
update_fun = model.all_update_fun
ppl_fun = model.final_ppl_fun
lr = model.all_lr
print 'update all learning rate : %f' % model.all_lr
stop_count = 0 # for stop training
change_count = 0 # for change learning rate
print 'start training'
min_valid_ppl = float('inf')
for epoch in range(args.max_epoch):
print "\nepoch %d" % epoch
# minibatch part
minibatch_size = args.minibatch_size # how many examples in a minibatch
n_train_batches = len(train_lengths)/minibatch_size
train_ppl = 0
for minibatch_idx in range(n_train_batches):
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, minibatch_size, minibatch_idx)
mask = get_mask(minibatch_train_data, lengths)
error = update_fun(minibatch_train_data , list(lengths) ,mask)
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
sys.stdout.write( '\n%d minibatch idx / %d total minibatch, ppl: %f '% (minibatch_idx+1, n_train_batches, minibatch_train_ppl) )
sys.stdout.flush() # important
# rest minibatch if exits
if (minibatch_idx + 1) * minibatch_size != len(train_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(train_lengths) - minibatch_size * minibatch_idx
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, n_rest_example, minibatch_idx)
mask = get_mask(minibatch_train_data, lengths)
error = update_fun(minibatch_train_data , list(lengths) ,mask)
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
train_ppl = train_ppl / sum(train_lengths)
# print 'done training'
# valid ppl
minibatch_size = min(20, len(valid_lengths))
valid_ppl = 0
n_valid_batches = len(valid_lengths)/minibatch_size
for minibatch_idx in range(n_valid_batches):
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, minibatch_size, minibatch_idx)
minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths))
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
# last minibatch
if (minibatch_idx + 1) * minibatch_size != len(valid_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(valid_lengths) - minibatch_size * minibatch_idx
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, n_rest_example, minibatch_idx)
minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths))
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
valid_ppl = valid_ppl / sum(valid_lengths)
print "\ntrain ppl: %f, valid ppl: %f" % (train_ppl, valid_ppl)
if valid_ppl < min_valid_ppl:
min_valid_ppl = valid_ppl
model.save(args.save_net, vocab)
if args.train_method != 'LSTM' :
model.save_turing(args.save_net)
stop_count = 0
change_count = 0
print "save best model"
continue
if args.early_stop:
if (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate:
if stop_count > 2 or lr < 1e-6:
print 'stop training'
break
stop_count = stop_count + 1
elif (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate * 0.5:
# if change_count > 2:
print 'change learning rate from %f to %f' % (lr, lr/2)
model.lstm_lr = model.lstm_lr / 2.
model.turing_lr = model.turing_lr / 2.
model.all_lr = model.all_lr / 2.
if args.train_method == 'LSTM' :
lr = model.lstm_lr
elif args.train_method == 'TURING' :
lr = model.turing_lr
else :
lr = model.all_lr
# change_count = change_count + 1
def testing(args, test_data, test_lengths):
model_load = Model(
input_size=1,
hidden_size=1,
vocab_size=1,
stack_size=1, # make this bigger, but makes compilation slow
celltype=RNN # use RNN or LSTM
)
model_load.stop_on(vocab.word2index["."])
if args.train_method != 'LSTM' :
if not os.path.isfile(args.load_net + '.turing') :
print "there is no trained turing file so we can't test by turing model!!"
sys.exit()
model_load.load(args.load_net, 0)
# test ppl
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
ppl_fun = model_load.lstm_ppl_fun
else :
ppl_fun = model_load.final_ppl_fun
minibatch_size = 1
n_test_batches = len(test_lengths)
for minibatch_idx in range(n_test_batches):
minibatch_test_data, lengths = get_minibatch(test_data, test_lengths, minibatch_size, minibatch_idx)
minibatch_test_ppl = ppl_fun(minibatch_test_data, list(lengths))
print minibatch_test_ppl
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=DESCRIPTION)
args = parse_args(parser)
# if no args are passed
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
if args.train:
vocab = build_vocab(args.train[0])
train_data, train_lengths = load_data(args.train[0], vocab, 'train')
valid_data, valid_lengths = load_data(args.valid[0], vocab, 'valid')
training(args, vocab, train_data, train_lengths, valid_data, valid_lengths)
elif args.test:
vocab = pickle.load(open(args.load_net+'.vocab', "rb"))
test_data, test_lengths = load_data(args.test[0], vocab, 'test')
testing(args, test_data, test_lengths)
| darongliu/Lstm_Turing_LM | lstm-neural-turing-machines-lm/analysis/v1-one-weight-same-entropy/lm_v4.py | Python | mit | 26,033 | [
"NEURON"
] | 9e558ccfa59c44e4680df6fefb5c81713b240cb653eb92a220f7075e2b770726 |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from kivy.lang import Builder
from kivy.uix.recycleview import RecycleView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.metrics import dp
from kivy.uix.label import Label
from kivy.properties import StringProperty
from kivy.properties import Property
from kivy.properties import BoundedNumericProperty
from kivy.properties import NumericProperty
from kivy.properties import AliasProperty
# noinspection PyProtectedMember
from kivy.properties import dpi2px
from kivy.graphics.opengl import GL_MAX_TEXTURE_SIZE
from ORCA.widgets.core.Label import cLabel
from ORCA.widgets.core.TouchRectangle import cTouchRectangle
from ORCA.utils.TypeConvert import ToUnicode
from ORCA.utils.TypeConvert import ToHex
from ORCA.utils.RemoveNoClassArgs import RemoveNoClassArgs
__all__ = ['cScrollableLabelLarge']
Builder.load_string('''
<cScrollableLabelLargeInner>:
RecycleBoxLayout:
default_size_hint: 1, None
size_hint: None,None
height: self.minimum_height
''')
# noinspection PyUnusedLocal
class cScrollableLabelLarge(Widget):
""" Main Widget to display a large text
By default, x and y scrolling is enabled
Horizontal scrolling can be disabled by passing
noxscroll = False
Supports background color for the Label
As implementation, it is a Widget which contains a Background (if color is given)
and a customized RecycleView
"""
text = StringProperty('')
#font_size = Property('20sp')
def __init__(self, **kwargs):
kwargsInner={}
for k in kwargs:
if k not in ["size_hint","size","pos","pos_hint"]:
kwargsInner[k]=kwargs[k]
self.oScrollableLabelLargeInner=cScrollableLabelLargeInner(**kwargsInner)
super(self.__class__, self).__init__(**RemoveNoClassArgs(dInArgs=kwargs,oObject=Widget))
self.oBackGround = None
if "background_color" in kwargs:
self.oBackGround=cTouchRectangle(size=self.size,pos=self.pos, background_color=kwargs["background_color"])
self.add_widget(self.oBackGround)
del kwargs["background_color"]
self.oScrollableLabelLargeInner.size = self.size
self.oScrollableLabelLargeInner.pos = self.pos
self.add_widget(self.oScrollableLabelLargeInner)
self.bind(pos=self.update_graphics_pos,size=self.update_graphics_size)
def update_graphics_pos(self, instance, value):
""" Updates the child widget position (Backgrund and Recycleview) """
if self.oBackGround is not None:
self.oBackGround.pos = value
self.oScrollableLabelLargeInner.pos = value
def update_graphics_size(self, instance, value):
""" Updates the child widget size (Backgrund and Recycleview) """
if self.oBackGround is not None:
self.oBackGround.size = value
self.oScrollableLabelLargeInner.size = value
def IncreaseFontSize(self,*args):
""" Pass through function for the Recycleview """
self.oScrollableLabelLargeInner.IncreaseFontSize(args)
def DecreaseFontSize(self,*args):
""" Pass through function for the Recycleview """
self.oScrollableLabelLargeInner.DecreaseFontSize(args)
def on_text(self, instance, value):
""" Pass through function for the Recycleview """
self.oScrollableLabelLargeInner.text=value
def on_oOrcaWidget(self, instance, value):
""" Passes the OrcaWidget to the Childs """
if self.oBackGround is not None:
self.oBackGround.oOrcaWidget=value
self.oScrollableLabelLargeInner.oOrcaWidget=value
def _get_font_size(self):
"""Returns the Font Size """
return self.oScrollableLabelLargeInner.fFontSize
def _set_font_size(self, value):
"""Passes the change of font size """
self.oScrollableLabelLargeInner.font_size = value
def EnableWidget(self, *, bEnable:bool) -> bool:
if bEnable:
if self.oBackGround:
self.oBackGround.opacity = self.oScrollableLabelLargeInner.oOrcaWidget.fOrgOpacity
self.oScrollableLabelLargeInner.opacity = self.oScrollableLabelLargeInner.oOrcaWidget.fOrgOpacity
else:
if self.oBackGround:
self.oBackGround.opacity = 0.0
self.oScrollableLabelLargeInner.opacity = 0.0
font_size = AliasProperty(_get_font_size, _set_font_size)
# noinspection PyUnusedLocal
class cLineLayoutBase(BoxLayout):
""" embedded class to present a single line of text """
text = StringProperty("")
font_size = NumericProperty(0)
def __init__(self, **kwargs):
super(self.__class__,self).__init__(**RemoveNoClassArgs(dInArgs=kwargs,oObject=BoxLayout))
self.oLabel = cLabel(**self.oScrollableLabelLargeInner.kwFontArgs)
if self.oScrollableLabelLargeInner.oOrcaWidget is not None:
self.oLabel.oOrcaWidget = self.oScrollableLabelLargeInner.oOrcaWidget
self.add_widget(self.oLabel)
def on_size(self,*largs):
""" Updates the child widget size (label) """
self.oLabel.height = self.height
self.oLabel.text_size = self.size
def on_text(self,instance,value):
""" sets the text """
self.oLabel.text=value
def on_font_size(self,instance,value):
""" sets the font size """
self.oLabel.font_size=value
# noinspection PyProtectedMember,PyUnusedLocal
class cScrollableLabelLargeInner(RecycleView):
""" The "real' scrollable label (without background) """
# to have similar properties as a Label
font_size = Property('20sp')
text = StringProperty('')
oOrcaWidget = Property(None)
# Internal Property which handles fonmt resizing (not working as RecycleView can't manage change of cached widget)
fFontSize = BoundedNumericProperty(dpi2px(20,'sp'), min=4.0, max=96.0,errorhandler=lambda x: 96.0 if x > 96.0 else 4.0)
def __init__(self, **kwargs):
#we create a new class on the fly top ass the font args to the creation process, as the view adapter creates without arguments
self.cLineLayout=type('cLineLayout', cLineLayoutBase.__bases__, dict(cLineLayoutBase.__dict__))
# passes myself to the embedded class. Not good style but Recycleview limits passing customized parameters
self.cLineLayout.oScrollableLabelLargeInner=self
self.oOrcaWidget = kwargs.get('ORCAWIDGET',None)
# maximal len (in chars) of a single ine of the given text
self.iMaxLen = 0
# Setting the scrolltypes / bars for the Recycleview
self.scroll_type = ['bars', 'content']
self.scroll_wheel_distance = dp(114)
self.bar_width = dp(10)
# The original passed Data array
self.aData = []
# Internal Flag to distinguish between first show and (re) setting text
self.bInit = False
# The maximum width of a char
self.iMaxCharwidth = 0
# The maximum characters per line
self.iMaxCharsPerLine = 0
if "font_size" in kwargs:
self.on_font_size(None,kwargs["font_size"])
# Retrieving the genuine font properties of a label to pass only those arguments to the label (removing pos, hints, background colors , etc
self.aFontProperties = Label._font_properties+("background_color",)
# standard font args, if nothing is given
self.kwFontArgs = {"halign" : "left","valign": "top", "max_lines":1,"font_size":20}
# add / update the font args to be passed to the Label
for k in kwargs:
if k in self.aFontProperties:
self.kwFontArgs[k]=kwargs[k]
self.kwFontArgs["font_size"]=self.fFontSize
self.kwFontArgs.pop("text",None)
# Parameter Flag to disable horizontal scrolling
self.bNoXScroll = kwargs.get("noxscroll",False)
self.bMarkup = kwargs.get("markup", False)
#A dummy label to get th width a the larges character
self.oLabel = Label(**RemoveNoClassArgs(dInArgs=self.kwFontArgs,oObject=Label))
super(self.__class__, self).__init__(**RemoveNoClassArgs(dInArgs=kwargs,oObject=RecycleView))
# This manages the distance between lines
self.layout_manager.default_size = (None,self.oLabel._label.get_extents('W')[1])
#self.layout_manager.default_size = (None, self.fFontSize*1.1)
self.layout_manager.orientation = 'vertical'
# we need to handle size changes
self.bind(size=self.update_size)
self.bind(text=self.on_textinner)
self.text = kwargs.get("text","")
def on_fFontSize(self, instance, value):
""" Will handle font size changes """
if self.layout_manager is not None:
self.kwFontArgs["font_size"]=self.fFontSize
self.oLabel.font_size = self.fFontSize
self.layout_manager.default_size = (None,self.oLabel._label.get_extents('W')[1])
self.SetData(self.aData)
def on_font_size(self, instance, value):
"""Helper function to manage strings with metrics passed as arguments (eg '12dp') """
try:
fValue=float(value)
except:
fValue=dpi2px(value[:-2],value[-2:])
self.fFontSize=fValue
def on_textinner(self, instance, value):
""" helper to have a Label like functionality to set the caption """
self.update_size(None,None)
def IncreaseFontSize(self,*args):
""" Increase the Font size """
self.fFontSize +=1.0
def DecreaseFontSize(self,*args):
""" Decrease the Font size """
self.fFontSize -=1.0
def SetData(self, aData):
""" Passes the data to the Recycle view and sets the layout manager size """
self.data = [{'text': ToUnicode(x),"font_size":self.fFontSize} for x in aData]
if self.bNoXScroll:
self.layout_manager.width=self.width
else:
self.layout_manager.width= self.iMaxCharwidth * self.iMaxCharsPerLine
self.viewclass = self.cLineLayout
self.refresh_from_data()
def update_size(self, instance, value):
""" Fits the text into layout_manager line.
If noxscroll, all line with be split up to fit to the widget size.
if x scrolling is enabled, we look, if the the maximum line length exceed the TEXTURE SIZE.
In that case we split the lines as well and set the scrolling window size to the texture size.
if x scrolling is enabled, and all lines fit to the texture size, we pass the unchanged array """
if self.size==[100,100]:
return
aData = []
bDoLineBreak = False
self.iMaxCharwidth = self.oLabel._label.get_extents('W')[0]
self.iMaxCharsPerLine = int(self.width/self.iMaxCharwidth)
if not self.bNoXScroll:
self.aData = self.text.split('\n')
self.iMaxLen=len(max(self.aData,key=len))
if (self.iMaxCharwidth*self.iMaxLen)>GL_MAX_TEXTURE_SIZE:
self.iMaxCharsPerLine=int(GL_MAX_TEXTURE_SIZE/self.iMaxCharwidth)
bDoLineBreak = True
else:
self.iMaxCharsPerLine=self.iMaxLen
else:
bDoLineBreak = True
if bDoLineBreak:
if self.oLabel is not None:
if len(self.text)>10000:
aData = self.text.split('\n')
i=0
iEnd=len(aData)
while i<iEnd:
if len(aData[i])>self.iMaxCharsPerLine:
aData.insert(i+1,aData[i][self.iMaxCharsPerLine:])
aData[i]=aData[i][:self.iMaxCharsPerLine]
iEnd+=1
i+=1
else:
self.oLabel.size = self.size
self.oLabel.text_size = (self.width,None)
self.oLabel.text = self.text
self.oLabel._label.render()
aData=[]
for oLine in self.oLabel._label._cached_lines:
if len(oLine.words)>0:
uText= u''
for oWord in oLine.words:
if self.bMarkup:
uText+=self.AddMarkUps(oWord)
else:
uText+=oWord.text
aData.append(uText)
else:
aData.append(u'')
self.oLabel.text = ""
self.aData = aData
self.SetData(aData)
else:
self.SetData(self.aData)
def AddMarkUps(self,oWord):
uText=oWord.text
if oWord.options["bold"]:
uText=self.AddMarkUp(uText,"b")
if oWord.options["italic"]:
uText=self.AddMarkUp(uText,"i")
if oWord.options["underline"]:
uText=self.AddMarkUp(uText,"u")
if oWord.options["strikethrough"]:
uText=self.AddMarkUp(uText,"s")
if oWord.options["font_name"] != "Roboto":
uText=self.AddMarkUp(uText,"font",oWord.options["font_name"])
if oWord.options["font_size"] != self.fFontSize:
uText=self.AddMarkUp(uText,"size",ToUnicode(oWord.options["font_size"]))
if oWord.options["color"] != [1,1,1,1]:
uHexColor = u''
for iColor in oWord.options["color"]:
uHexColor+=ToHex(int(iColor*255))
uText=self.AddMarkUp(uText,"color",'#'+uHexColor)
return uText
# noinspection PyMethodMayBeStatic
def AddMarkUp(self,uText,uMarkUp,uValue=None):
if uValue is None:
return "[{1}]{0}[/{1}]".format(uText,uMarkUp)
else:
return "[{1}={2}]{0}[/{1}]".format(uText,uMarkUp,uValue)
| thica/ORCA-Remote | src/ORCA/widgets/core/ScrollableLabelLarge.py | Python | gpl-3.0 | 15,619 | [
"ORCA"
] | d9233284c81ba4f6376ffd77dc94ce117e6947982dc0e021d6107e251d9082e2 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# PMDA
# Copyright (c) 2018 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
"""
LeafletFinder Analysis tool --- :mod:`pmda.leaflet`
===================================================
This module contains parallel versions of analysis tasks in
:mod:`MDAnalysis.analysis.leaflet`.
.. autoclass:: LeafletFinder
:members:
:undoc-members:
:inherited-members:
"""
from __future__ import absolute_import, division
import numpy as np
import dask.bag as db
import networkx as nx
from scipy.spatial import cKDTree
import MDAnalysis as mda
import dask
from joblib import cpu_count
from .parallel import ParallelAnalysisBase, Timing
from .util import timeit
class LeafletFinder(ParallelAnalysisBase):
"""Parallel Leaflet Finder analysis.
Identify atoms in the same leaflet of a lipid bilayer.
This class implements and parallelizes the *LeafletFinder* algorithm
[Michaud-Agrawal2011]_.
The parallelization is done based on [Paraskevakos2018]_.
Attributes
----------
Parameters
----------
Universe : :class:`~MDAnalysis.core.groups.Universe`
a :class:`MDAnalysis.core.groups.Universe` (the
`atomgroup` must belong to this Universe)
atomgroup : tuple of :class:`~MDAnalysis.core.groups.AtomGroup`
atomgroups that are iterated in parallel
Note
----
At the moment, this class has far fewer features than the serial
version :class:`MDAnalysis.analysis.leaflet.LeafletFinder`.
This version offers LeafletFinder algorithm 4 ("Tree-based Nearest
Neighbor and Parallel-Connected Components (Tree-Search)") in
[Paraskevakos2018]_.
Currently, periodic boundaries are not taken into account.
The calculation is parallelized on a per-frame basis;
at the moment, no parallelization over trajectory blocks is performed.
"""
def __init__(self, universe, atomgroups):
self._atomgroup = atomgroups
self._results = list()
super(LeafletFinder, self).__init__(universe, (atomgroups,))
def _find_connected_components(self, data, cutoff=15.0):
"""Perform the Connected Components discovery for the atoms in data.
Parameters
----------
data : Tuple of lists of Numpy arrays
This is a data and index tuple. The data are organized as
`([AtomPositions1<NumpyArray>,AtomPositions2<NumpyArray>],
[index1,index2])`. `index1` and `index2` are showing the
position of the `AtomPosition` in the adjacency matrix and
allows to correct the node number of the produced graph.
cutoff : float (optional)
head group-defining atoms within a distance of `cutoff`
Angstroms are deemed to be in the same leaflet [15.0]
Returns
-------
values : list.
A list of all the connected components of the graph that is
generated from `data`
"""
# pylint: disable=unsubscriptable-object
window, index = data[0]
num = window[0].shape[0]
i_index = index[0]
j_index = index[1]
graph = nx.Graph()
if i_index == j_index:
train = window[0]
test = window[1]
else:
train = np.vstack([window[0], window[1]])
test = np.vstack([window[0], window[1]])
tree = cKDTree(train, leafsize=40)
edges = tree.query_ball_point(test, cutoff)
edge_list = [list(zip(np.repeat(idx, len(dest_list)), dest_list))
for idx, dest_list in enumerate(edges)]
edge_list_flat = np.array([list(item) for sublist in edge_list for
item in sublist])
if i_index == j_index:
res = edge_list_flat.transpose()
res[0] = res[0] + i_index - 1
res[1] = res[1] + j_index - 1
else:
removed_elements = list()
for i in range(edge_list_flat.shape[0]):
if (edge_list_flat[i, 0] >= 0 and
edge_list_flat[i, 0] <= num - 1) and \
(edge_list_flat[i, 1] >= 0 and
edge_list_flat[i, 1] <= num - 1) or \
(edge_list_flat[i, 0] >= num and
edge_list_flat[i, 0] <= 2 * num - 1) and \
(edge_list_flat[i, 1] >= num and
edge_list_flat[i, 1] <= 2 * num - 1) or \
(edge_list_flat[i, 0] >= num and
edge_list_flat[i, 0] <= 2 * num - 1) and \
(edge_list_flat[i, 1] >= 0 and
edge_list_flat[i, 1] <= num - 1):
removed_elements.append(i)
res = np.delete(edge_list_flat, removed_elements,
axis=0).transpose()
res[0] = res[0] + i_index - 1
res[1] = res[1] - num + j_index - 1
if res.shape[1] == 0:
res = np.zeros((2, 1), dtype=np.int)
edges = [(res[0, k], res[1, k]) for k in range(0, res.shape[1])]
graph.add_edges_from(edges)
# partial connected components
subgraphs = nx.connected_components(graph)
comp = [g for g in subgraphs]
return comp
# pylint: disable=arguments-differ
def _single_frame(self, ts, atomgroups, scheduler_kwargs, n_jobs,
cutoff=15.0):
"""Perform computation on a single trajectory frame.
Must return computed values as a list. You can only **read**
from member variables stored in ``self``. Changing them during
a run will result in undefined behavior. `ts` and any of the
atomgroups can be changed (but changes will be overwritten
when the next time step is read).
Parameters
----------
scheduler_kwargs : Dask Scheduler parameters.
cutoff : float (optional)
head group-defining atoms within a distance of `cutoff`
Angstroms are deemed to be in the same leaflet [15.0]
Returns
-------
values : anything
The output from the computation over a single frame must
be returned. The `value` will be added to a list for each
block and the list of blocks is stored as :attr:`_results`
before :meth:`_conclude` is run. In order to simplify
processing, the `values` should be "simple" shallow data
structures such as arrays or lists of numbers.
"""
# Get positions of the atoms in the atomgroup and find their number.
atoms = ts.positions[atomgroups.indices]
matrix_size = atoms.shape[0]
arranged_coord = list()
part_size = int(matrix_size / n_jobs)
# Partition the data based on a 2-dimensional partitioning
for i in range(1, matrix_size + 1, part_size):
for j in range(i, matrix_size + 1, part_size):
arranged_coord.append(([atoms[i - 1:i - 1 + part_size],
atoms[j - 1:j - 1 + part_size]],
[i, j]))
# Distribute the data over the available cores, apply the map function
# and execute.
parAtoms = db.from_sequence(arranged_coord,
npartitions=len(arranged_coord))
parAtomsMap = parAtoms.map_partitions(self._find_connected_components,
cutoff=cutoff)
Components = parAtomsMap.compute(**scheduler_kwargs)
# Gather the results and start the reduction. TODO: think if it can go
# to the private _reduce method of the based class.
result = list(Components)
# Create the overall connected components of the graph
while len(result) != 0:
item1 = result[0]
result.pop(0)
ind = []
for i, item2 in enumerate(Components):
if item1.intersection(item2):
item1 = item1.union(item2)
ind.append(i)
ind.reverse()
for j in ind:
Components.pop(j)
Components.append(item1)
# Change output for and return.
indices = [np.sort(list(g)) for g in Components]
return indices
# pylint: disable=arguments-differ
def run(self,
start=None,
stop=None,
step=None,
n_jobs=-1,
cutoff=15.0):
"""Perform the calculation
Parameters
----------
start : int, optional
start frame of analysis
stop : int, optional
stop frame of analysis
step : int, optional
number of frames to skip between each analysed frame
n_jobs : int, optional
number of tasks to start, if `-1` use number of logical cpu cores.
This argument will be ignored when the distributed scheduler is
used
"""
# are we using a distributed scheduler or should we use
# multiprocessing?
scheduler = dask.config.get('scheduler', None)
if scheduler is None:
# maybe we can grab a global worker
try:
scheduler = dask.distributed.worker.get_client()
except ValueError:
pass
if n_jobs == -1:
n_jobs = cpu_count()
# we could not find a global scheduler to use and we ask for a single
# job. Therefore we run this on the single threaded scheduler for
# debugging.
if scheduler is None and n_jobs == 1:
scheduler = 'single-threaded'
# fall back to multiprocessing, we tried everything
if scheduler is None:
scheduler = 'multiprocessing'
scheduler_kwargs = {'scheduler': scheduler}
if scheduler == 'multiprocessing':
scheduler_kwargs['num_workers'] = n_jobs
with timeit() as b_universe:
universe = mda.Universe(self._top, self._traj)
start, stop, step = self._trajectory.check_slice_indices(
start, stop, step)
with timeit() as total:
with timeit() as prepare:
self._prepare()
with self.readonly_attributes():
timings = list()
times_io = []
for frame in range(start, stop, step):
with timeit() as b_io:
ts = universe.trajectory[frame]
times_io.append(b_io.elapsed)
with timeit() as b_compute:
components = self. \
_single_frame(ts=ts,
atomgroups=self._atomgroup,
scheduler_kwargs=scheduler_kwargs,
n_jobs=n_jobs,
cutoff=cutoff)
timings.append(b_compute.elapsed)
leaflet1 = self._atomgroup[components[0]]
leaflet2 = self._atomgroup[components[1]]
self._results.append([leaflet1, leaflet2])
with timeit() as conclude:
self._conclude()
self.timing = Timing(times_io,
np.hstack(timings), total.elapsed,
b_universe.elapsed, prepare.elapsed,
conclude.elapsed)
return self
def _conclude(self):
self.results = self._results
| MDAnalysis/pmda | pmda/leaflet.py | Python | gpl-2.0 | 11,864 | [
"MDAnalysis"
] | 0119322030513f679bdea7ca184094808fb60aa51b8bef2f3eab60f9ef910935 |
"""
How to reference supporting evidence for some object in the database.
See: "Metadata in PyOpenWorm" for discussion on semantics of what giving
evidence for an object means.
"""
from __future__ import absolute_import
from __future__ import print_function
import PyOpenWorm as P
from PyOpenWorm.evidence import Evidence
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.document import Document
from PyOpenWorm.data import Data
from PyOpenWorm.context import Context
# Create dummy database configuration.
d = Data({'rdf.source': 'ZODB'})
# Connect to database with dummy configuration
P.connect(conf=d)
ctx = Context(ident='http://example.org/data')
evctx = Context(ident='http://example.org/meta')
# Create a new Neuron object to work with
n = ctx(Neuron)(name='AVAL')
# Create a new Evidence object with `doi` and `pmid` fields populated.
# See `PyOpenWorm/evidence.py` for other available fields.
d = evctx(Document)(key='Anonymous2011', doi='125.41.3/ploscompbiol', pmid='12345678')
e = evctx(Evidence)(key='Anonymous2011', reference=d)
# Evidence object asserts something about the enclosed dataObject.
# Here we add a receptor to the Neuron we made earlier, and "assert it".
# As the discussion (see top) reads, this might be asserting the existence of
# receptor UNC-8 on neuron AVAL.
n.receptor('UNC-8')
e.supports(ctx.rdf_object)
# Save the Neuron and Evidence objects to the database.
ctx.save_context()
evctx.save_context()
# What does my evidence object contain?
for e_i in evctx.stored(Evidence)().load():
print(e_i.reference())
print(e_i.supports())
# Disconnect from the database.
P.disconnect()
| gsarma/PyOpenWorm | examples/add_reference.py | Python | mit | 1,637 | [
"NEURON"
] | fd3f88ad92e8b5b842b37ad483277b054aa45c9cbfd24d2b46c3a2a36920e15b |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N test functions"""
from __future__ import absolute_import, print_function
import tvm
from tvm import relay
from tvm.contrib import utils, graph_executor, download
from hashlib import md5
from itertools import zip_longest, combinations
import numpy as np
from PIL import Image
import os
from . import _infrastructure
from tvm.relay.op.contrib import get_pattern_table
def get_real_image(im_height, im_width):
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download.download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def assert_lib_hash(lib, golden):
"""Check that the Ethos-N runtime modules in a library hash to the same values
as given by the golden hash(es).
If there's only one Ethos-N module, the golden hash may be provided as a str.
If there's multiple, a set of golden hashes should be provided to correspond
with each Ethos-N module that is expected.
This function is used to ensure that no change is made which alters the output
of a compilation. If such a change is made deliberately (eg. to fix a bug) then
the golden hash should be updated after verifying on hardware that the behaviour
is still correct.
This method is used because of the lack of hardware availability in upstream CI.
"""
# Convert str hash into a set of hashes
if isinstance(golden, str):
golden = {golden}
temp = utils.tempdir()
path = temp.relpath("lib.cmm")
hash_set = set()
for mod in lib.imported_modules:
if mod.type_key == "ethos-n":
mod.save(path)
lib_hash = md5(open(path, "rb").read()).hexdigest()
hash_set.add(lib_hash)
assert hash_set == golden, "Expected hash: {} Got hash: {}".format(golden, hash_set)
def make_module(func, params):
func = relay.Function(relay.analysis.free_vars(func), func)
if params:
relay.build_module.bind_params_by_name(func, params)
mod = tvm.IRModule.from_expr(func)
return relay.transform.InferType()(mod)
def make_ethosn_composite(ethosn_expr, name):
vars = relay.analysis.free_vars(ethosn_expr)
func = relay.Function([relay.Var("a")], ethosn_expr)
func = func.with_attr("Composite", name)
call = relay.Call(func, vars)
return call
def make_ethosn_partition(ethosn_expr):
# Create an Ethos-N global function
mod = tvm.IRModule({})
vars = relay.analysis.free_vars(ethosn_expr)
# NB: it is illegal to reuse variables inside and outside a scope in Relay
# if you want to duplicate types and names you must re-allocate them.
fresh_vars = [relay.Var(v.name_hint, v.type_annotation) for v in vars]
binds = {}
for var, fresh_var in zip(vars, fresh_vars):
binds[var] = fresh_var
ethosn_expr_fresh = relay.bind(ethosn_expr, binds)
func = relay.Function(fresh_vars, ethosn_expr_fresh)
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", "ethos-n")
func = func.with_attr("global_symbol", "ethos-n_0")
g1 = relay.GlobalVar("ethos-n_0")
mod[g1] = func
mod = relay.transform.InferType()(mod)
# These are the vars to call the Ethos-N partition with
more_vars = relay.analysis.free_vars(ethosn_expr)
# Call the Ethos-N partition in main
call_fn1 = g1(*more_vars)
mod["main"] = relay.Function(more_vars, call_fn1)
return relay.transform.InferType()(mod)
def get_host_op_count(mod):
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def build(mod, params, npu=True, expected_host_ops=0, npu_partitions=1):
"""Build a network with or without Ethos-N offloading.
Parameters
----------
mod : IRModule
The Relay module to build.
params : dict of str to NDArray
The weights to build with.
npu : bool, optional
Whether to build with Ethos-N offloading.
expected_host_ops : int, optional
The number of ops expected to remain on the host.
npu_partitions : int, optional
The number of Ethos-N partitions expected.
"""
relay.backend.te_compiler.get().clear()
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.ethos-n.options": {"variant": get_ethosn_variant()}}
):
with tvm.target.Target("llvm"):
if npu:
f = relay.build_module.bind_params_by_name(mod["main"], params)
mod = tvm.IRModule()
mod["main"] = f
pattern = get_pattern_table("ethos-n")
mod = relay.transform.InferType()(mod)
mod = relay.transform.MergeComposite(pattern)(mod)
mod = relay.transform.AnnotateTarget("ethos-n")(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.PartitionGraph()(mod)
host_op_count = get_host_op_count(mod)
assert (
host_op_count == expected_host_ops
), "Got {} host operators, expected {}".format(host_op_count, expected_host_ops)
attrs = [
mod[var.name_hint].attrs
for var in mod.get_global_vars()
if mod[var.name_hint].attrs
]
partition_count = sum(
[
key == "Compiler" and value == "ethos-n"
for attr in attrs
for key, value in attr.items()
]
)
assert (
npu_partitions == partition_count
), "Got {} ethos-n partitions, expected {}".format(partition_count, npu_partitions)
return relay.build(mod, params=params)
def run(lib, inputs, outputs, npu=True):
"""Run a module with specified inputs.
Parameters
----------
lib : runtime.Module
The runtime module.
inputs : dict of str to NDArray
The input dictionary.
outputs : int
The expected number of outputs.
npu : bool
Whether or not any part of the lib is offloaded to Ethos-N.
If it's false (i.e. it's all running on the CPU), we set
the mocked result equal to the output so that a subsequent
mocked run on the NPU returns the same value.
Returns
-------
out : list of NDArray
The results.
"""
# Export and load lib to confirm this works
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
lib.export_library(lib_path)
lib = tvm.runtime.load_module(lib_path)
module = graph_executor.GraphModule(lib["default"](tvm.cpu()))
module.set_input(**inputs)
module.run()
out = [module.get_output(i) for i in range(outputs)]
if not npu:
inference_result(out)
return out
def build_and_run(
mod, inputs, outputs, params, device=tvm.cpu(), npu=True, expected_host_ops=0, npu_partitions=1
):
lib = build(mod, params, npu, expected_host_ops, npu_partitions)
return run(lib, inputs, outputs, npu)
def verify(answers, dtype, atol, rtol=1e-07, verify_saturation=True):
"""Compare the array of answers. Each entry is a list of outputs"""
if len(answers) < 2:
print("No results to compare: expected at least two, found ", len(answers))
for answer in zip_longest(*answers):
for outs in combinations(answer, 2):
if verify_saturation:
assert (
np.count_nonzero(outs[0].numpy() == np.iinfo(dtype).max)
< 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
assert (
np.count_nonzero(outs[0].numpy() == np.iinfo(dtype).min)
< 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol)
def inference_result(outputs):
"""Set the expected results of an Ethos inference, if the testing
infrastructure is available. This assumes that the entire graph
was offloaded to the neural processor."""
if tvm.get_global_func("relay.ethos-n.test.infra.inference_result", True):
return _infrastructure.inference_result(*outputs)
return False
def test_error(mod, params, err_msg):
caught = None
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.ethos-n.options": {"variant": get_ethosn_variant()}}
):
with tvm.target.Target("llvm"):
try:
mod = relay.transform.InferType()(mod)
relay.build(mod, params)
except tvm.error.TVMError as e:
caught = e.args[0]
finally:
relay.backend.te_compiler.get().clear()
assert caught is not None
assert err_msg in caught, caught
def get_conv2d(var, shape, dtype):
"""Standard convolution to test activation functions"""
weight_shape = (1, 1, shape[3], 1)
w = tvm.nd.array(np.ones(weight_shape, dtype))
weights = relay.const(w, dtype)
conv = relay.qnn.op.conv2d(
var,
weights,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(1.0, "float32"),
kernel_scale=relay.const(1.0, "float32"),
kernel_size=(1, 1),
channels=1,
data_layout="NHWC",
kernel_layout="HWIO",
)
b = tvm.nd.array(np.zeros((shape[0],), "int32"))
biasc = relay.const(b, "int32")
bias = relay.nn.bias_add(conv, biasc, axis=0)
req = relay.qnn.op.requantize(
bias,
relay.const(1.0, "float32"), # input zero scale
relay.const(0, "int32"), # input zero point
relay.const(1.1, "float32"), # output zero scale
relay.const(0, "int32"), # output zero point
out_dtype=dtype,
)
params = {"w": w, "b": b}
return req, params
def get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, channels
):
kernel_sc = (
kernel_sc.numpy() if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray) else [kernel_sc]
)
dtype_min = np.iinfo(dtype).min
dtype_max = np.iinfo(dtype).max
input_max = input_sc * (dtype_max - input_zp)
input_min = input_sc * (dtype_min - input_zp)
kernel_max = max(kernel_sc) * (dtype_max - kernel_zp)
kernel_min = min(kernel_sc) * (dtype_min - kernel_zp)
output_limits = [
kernel_max * kernel_h * kernel_w * channels * input_max,
kernel_min * kernel_h * kernel_w * channels * input_max,
kernel_min * kernel_h * kernel_w * channels * input_min,
kernel_max * kernel_h * kernel_w * channels * input_min,
]
output_max = max(output_limits)
output_min = min(output_limits)
output_sc = (output_max - output_min) / (dtype_max - dtype_min)
output_zp = int(dtype_min - (output_min / output_sc))
return output_zp, output_sc
def get_ethosn_api_version():
return tvm.get_global_func("relay.ethos-n.api.version")()
def get_ethosn_variant():
return os.getenv("ETHOSN_VARIANT_CONFIG", default="Ethos-N78_1TOPS_2PLE_RATIO")
| dmlc/tvm | tests/python/contrib/test_ethosn/infrastructure.py | Python | apache-2.0 | 12,799 | [
"VisIt"
] | 7a04a6c53711873f1a58ec119daa4532f50bc9ff19c2d058aea9d837833a0267 |
#!/usr/bin/env python3
import os
import argparse
import sys
from time import sleep
import subprocess
import glob
mypath = os.environ["PATH"]
os.environ["PATH"] = "/home/wl45/python/bin:/home/wl45/opt:" + mypath
my_env = os.environ.copy()
parser = argparse.ArgumentParser(
description="This is a python3 script to\
automatically analysis the simulation")
# parser.add_argument("template", help="the name of template file")
# parser.add_argument("-n", "--number", type=int, default=20,
# help="Number of simulation run")
parser.add_argument("-o", "--offAuto", help="turn off from Read from \
config file", action="store_true")
args = parser.parse_args()
# n = args.number
# protein_name = args.template.strip('/')
folder_list = glob.glob("*")
print(folder_list)
# sys.exit()
#folder_list = ['T089_ha', 'T089_he', 'T120_ha', 'T120_he', 'T251_ha', 'T251_he', 'top7_ha', 'top7_he', '1UBQ_ha', '1UBQ_he']
folder_list = ['T251_lp', 'top7_lp', '1UBQ_lp', 'T120_lp']
os.system("mkdir -p weilu_jul_27")
for folder in folder_list:
os.chdir(folder)
if(not args.offAuto):
exec (open("config.py").read())
n = number_of_run
steps = simulation_steps
os.system("mkdir -p ../weilu/"+folder+"/lowest_energy")
os.system("mkdir -p ../weilu/"+folder+"/best_q")
os.system("sort analysis/list_of_max_q > ../weilu/q_"+folder+".dat")
for i in range(n):
# move
os.chdir("analysis/"+str(i))
os.system("cp chosen.pdb ../../../weilu/"+folder+"/best_q/"+str(i)+".pdb")
# os.system("cp ~/opt/plot_scripts/print_chosen.pml .")
# os.system("/usr/local/bin/pymol -qc -r print_chosen.pml")
# os.system("cp chosen.png ../../results/chosen_"+str(i)+".png")
# os.system("cp final.png ../../results/final_"+str(i)+".png")
# os.system("cp final.pdb ../../results/final_"+str(i)+".pdb")
# os.system("cp final.txt ../../results/final_"+str(i)+".txt")
# os.system("cp lowest_energy.pdb \
# ../../results/lowest_energy/lowest_energy_" + str(i)+".pdb")
os.chdir("../..")
os.chdir("..")
| luwei0917/awsemmd_script | final_move.py | Python | mit | 2,154 | [
"PyMOL"
] | 644e950860f1d78e564a1abc6a657a500d598a774002775d992d59d7fb6e286f |
import sublime
import sublime_plugin
import os.path
is_sublime_text_3 = int(sublime.version()) >= 3000
if is_sublime_text_3:
from .progress_notifier import ProgressNotifier
from .cross_platform_codecs import CrossPlaformCodecs
else:
from progress_notifier import ProgressNotifier
from cross_platform_codecs import CrossPlaformCodecs
# A base for each command
class BaseCommand(sublime_plugin.WindowCommand):
package_name = "Gulp"
def run(self, task_name=None, task_flag=None, silent=False, paths=[]):
self.setup_data_from_settings()
self.task_name = task_name
self.task_flag = task_flag if task_name is not None and task_flag is not None else self.get_flag_from_task_name()
self.silent = silent
self.working_dir = ""
self.sercheable_folders = [os.path.dirname(path) for path in paths] if len(paths) > 0 else self.window.folders()
self.output_view = None
self.work()
def setup_data_from_settings(self):
self.settings = sublime.load_settings("Gulp.sublime-settings")
self.results_in_new_tab = self.settings.get("results_in_new_tab", False)
self.nonblocking = self.settings.get("nonblocking", True)
self.exec_args = self.settings.get('exec_args', False)
def get_flag_from_task_name(self):
flags = self.settings.get("flags", {})
return flags[self.task_name] if self.task_name in flags else ""
# Main method, override
def work(self):
pass
# Panels and message
def show_quick_panel(self, items, on_done=None, font=sublime.MONOSPACE_FONT):
self.defer_sync(lambda: self.window.show_quick_panel(items, on_done, font))
def show_input_panel(self, caption, initial_text="", on_done=None, on_change=None, on_cancel=None):
self.window.show_input_panel(caption, initial_text, on_done, on_change, on_cancel)
def status_message(self, text):
sublime.status_message("%s: %s" % (self.package_name, text))
def error_message(self, text):
sublime.error_message("%s: %s" % (self.package_name, text))
# Output view
def show_output_panel(self, text):
if self.silent:
self.status_message(text)
return
if self.results_in_new_tab:
new_tab_path = os.path.join(self.gulp_results_path(), "Gulp Results")
self.output_view = self.window.open_file(new_tab_path)
self.output_view.set_scratch(True)
else:
self.output_view = self.window.get_output_panel("gulp_output")
self.show_panel()
self.output_view.settings().set("scroll_past_end", False)
self.add_syntax()
self.append_to_output_view(text)
def gulp_results_path(self):
return next(folder_path for folder_path in self.sercheable_folders if self.working_dir.find(folder_path) != -1) if self.working_dir else ""
def gulp_results_view(self):
if self.output_view is None:
gulp_results = [view for view in sublime.active_window().views() if view.file_name() and os.path.basename(view.file_name()) == "Gulp Results"]
return gulp_results[0] if len(gulp_results) > 0 else None
else:
return self.output_view
def add_syntax(self):
syntax_file = self.settings.get("syntax", "Packages/Gulp/syntax/GulpResults.tmLanguage")
if syntax_file:
self.output_view.set_syntax_file(syntax_file)
def append_to_output_view_in_main_thread(self, text):
self.defer_sync(lambda: self.append_to_output_view(text))
def append_to_output_view(self, text):
if not self.silent:
decoded_text = text if is_sublime_text_3 else CrossPlaformCodecs.force_decode(text)
self._insert(self.output_view, decoded_text)
def _insert(self, view, content):
if view is None:
return
if self.results_in_new_tab and view.is_loading():
self.set_timeout(lambda: self._insert(view, content), 10)
else:
view.set_read_only(False)
view.run_command("view_insert", { "size": view.size(), "content": content })
view.set_viewport_position((0, view.size()), True)
view.set_read_only(True)
def set_output_close_on_timeout(self):
timeout = self.settings.get("results_autoclose_timeout_in_milliseconds", False)
if timeout:
self.set_timeout(self.close_panel, timeout)
def close_panel(self):
if self.results_in_new_tab:
self.output_view = self.gulp_results_view()
if self.output_view and self.output_view.file_name():
self.window.focus_view(self.output_view)
self.window.run_command('close_file')
else:
self.window.run_command("hide_panel", { "panel": "output.gulp_output" })
def show_panel(self):
self.window.run_command("show_panel", { "panel": "output.gulp_output" })
# Sync/async calls
def defer_sync(self, fn):
self.set_timeout(fn, 0)
def defer(self, fn):
self.async(fn, 0)
def set_timeout(self, fn, delay):
sublime.set_timeout(fn, delay)
def async(self, fn, delay):
if is_sublime_text_3:
progress = ProgressNotifier('Gulp: Working')
sublime.set_timeout_async(lambda: self.call(fn, progress), delay)
else:
fn()
def call(self, fn, progress):
fn()
progress.stop()
class ViewInsertCommand(sublime_plugin.TextCommand):
def run(self, edit, size, content):
self.view.insert(edit, int(size), content) | nickgzzjr/sublime-gulp | base_command.py | Python | mit | 5,668 | [
"GULP"
] | 8bfb19d35910d23bb2953733044c2c222b59add19dd42eccc38d4319f0e1cab0 |
#!/usr/bin/env python
# -*- coding: us-ascii -*-
#----------------------------------------------------------------------------
# Copyright (C) 2009 BigDFT group (TD)
# This file is distributed under the terms of the
# GNU General Public License, see ~abinit/COPYING
# or http://www.gnu.org/copyleft/gpl.txt .
# For the initials of contributors, see ~abinit/doc/developers/contributors.txt .
#----------------------------------------------------------------------------
# Check malloc.prc (verbose format i.e. memdebug == .true. in memory.f90)
# Date: 11/05/2009
#----------------------------------------------------------------------------
import sys
from pprint import pprint
def array_name(name):
"Build different structures"
array = []
ilast = None
for i in reversed(name.split("%")[1:]):
if ilast:
ilast = i + "%" + ilast
else:
ilast = i
array.append(ilast)
return array
def main():
print "Read the file 'malloc.prc':"
try:
fd = iter(open("malloc.prc","r").readlines())
except IOError:
sys.stdout.write("The file 'malloc.prc' does not exist!\n")
sys.exit(1)
#First line not useful
fd.next()
#Initialized dictionary of variables
variables = dict()
total_size = 0
nalloc = 0
nzero = 0
ndealloc = 0
for line in fd:
a = line.split()
#Not used
routine = a[0]
#Decomment here to use the fullname and derivatives
#name = a[1]
#Here use the last name (after the last "%")
name = a[1].split("%")[-1]
if name == "routine":
#Last line
continue
size = int(a[2])
total_size += size
if size < 0:
ndealloc += 1
elif size > 0:
nalloc += 1
else:
nzero += 1
if name in variables.keys():
variables[name][0] += size
variables[name][1].append((routine,size))
else:
variables[name] = [size,[(routine,size)]]
#Group first
keys = variables.keys()
for key in keys:
for var in array_name(key):
if var in variables.keys():
#Group
variables[var][0] += variables[key][0]
variables[var][1].append(variables[key][1])
del variables[key]
break
print "Remaining memory=%d, allocations=%d, deallocations=%d, zero=%d" % \
(total_size,nalloc,ndealloc,nzero)
#Check if 0
ok=0
for (key,value) in variables.items():
if value[0] != 0:
ok += 1
print key
pprint(value)
if ok != 0:
print "There are %d incoherencies between allocations and deallocations." % ok
return ok
if __name__ == "__main__":
retcode = main()
sys.exit(retcode)
| SamKChang/abinit-7.10.5_multipole | tests/Scripts/memcheck.py | Python | gpl-3.0 | 2,739 | [
"ABINIT"
] | f8002ca1553c610d846ee3737ed5bec1d76143bbf25b8ff07d2dff059389ca4a |
"""
SP3 format:
https://kb.igs.org/hc/en-us/articles/201096516-IGS-Formats
"""
import xarray
import numpy as np
import logging
from pathlib import Path
from datetime import datetime, timedelta
import typing as T
from .rio import first_nonblank_line
# for NetCDF compression. too high slows down with little space savings.
ENC = {"zlib": True, "complevel": 1, "fletcher32": True}
def load_sp3(fn: Path, outfn: Path) -> xarray.Dataset:
dat: T.Dict[str, T.Any] = {}
with fn.open("r") as f:
ln = first_nonblank_line(f)
assert ln[0] == "#", f"failed to read {fn} line 1"
dat["t0"] = sp3dt(ln)
# Nepoch != number of time steps, at least for some files
dat["Nepoch"] = int(ln[32:39])
dat["coord_sys"] = ln[46:51]
dat["orbit_type"] = ln[52:55]
dat["agency"] = ln[56:60]
f.readline()
ln = f.readline()
assert ln[0] == "+", f"failed to read {fn} SV header"
# version c : Nsv <= 85, int(ln[4:6])
# version d : Nsv <= 999, int(len[3:6])
# (see ftp://igs.org/pub/data/format/sp3d.pdf)
# So this should work for both versions
Nsv = int(ln[3:6])
svs = get_sv(ln, Nsv)
unread_sv = Nsv - 17
while unread_sv > 0:
svs += get_sv(f.readline(), unread_sv)
unread_sv -= 17
# let us know if you need these intermediate lines parsed
for ln in f:
if ln.startswith("*"):
break
if not ln.startswith("*"): # EOF
raise ValueError(f"{fn} appears to be badly malformed")
# the rest of the file is data, punctuated by epoch lines
ecefs = []
clocks = []
vels = []
ecef = np.empty((Nsv, 3))
clock = np.empty((Nsv, 2))
vel = np.empty((Nsv, 3))
i = 0
times = [sp3dt(ln)]
for ln in f:
if ln[0] == "*":
times.append(sp3dt(ln))
ecefs.append(ecef)
clocks.append(clock)
vels.append(vel)
ecef = np.empty((Nsv, 3))
clock = np.empty((Nsv, 2))
vel = np.empty((Nsv, 3))
i = 0
continue
if ln[0] == "P":
ecef[i, :] = (float(ln[4:18]), float(ln[18:32]), float(ln[32:46]))
clock[i, 0] = float(ln[46:60])
i += 1
elif ln[0] == "V":
vel[i - 1, :] = (float(ln[4:18]), float(ln[18:32]), float(ln[32:46]))
clock[i - 1, 1] = float(ln[46:60])
elif ln[:2] in ("EP", "EV"):
# let us know if you want these data types
pass
elif len(ln) == 0: # blank line
pass
elif ln.startswith("EOF"):
break
else:
logging.info(f"unknown data {ln}")
# assemble the last time step
ecefs.append(ecef)
clocks.append(clock)
vels.append(vel)
aclock = np.asarray(clocks)
# assemble into final xarray.Dataset
ds = xarray.Dataset(coords={"time": times, "sv": svs, "ECEF": ["x", "y", "z"]})
ds["position"] = (("time", "sv", "ECEF"), ecefs)
ds["clock"] = (("time", "sv"), aclock[:, :, 0])
if not np.isnan(vel).all():
ds["velocity"] = (("time", "sv", "ECEF"), vels)
ds["dclock"] = (("time", "sv"), aclock[:, :, 1])
ds.attrs = dat
if outfn:
outfn = Path(outfn).expanduser()
enc = {k: ENC for k in ds.data_vars}
ds.to_netcdf(outfn, mode="w", encoding=enc)
return ds
def sp3dt(ln: str) -> datetime:
"""
some receivers such as ESA Swarm return seconds=60, so let's patch this.
"""
dt = []
hour = int(ln[14:16])
minute = int(ln[17:19])
second = int(ln[20:22])
if second == 60:
dt.append(timedelta(minutes=1))
second = 0
if minute == 60:
dt.append(timedelta(hours=1))
minute = 0
if hour == 24:
dt.append(timedelta(days=1))
hour = 0
time = datetime(
year=int(ln[3:7]),
month=int(ln[8:10]),
day=int(ln[11:13]),
hour=hour,
minute=minute,
second=second,
microsecond=int(ln[23:28]),
)
for t in dt:
time += t
return time
def get_sv(ln: str, Nsv: int) -> T.List[str]:
if ln[0] != "+":
return []
i0 = 9
svs = []
for i in range(min(Nsv, 17)):
svs.append(ln[i0 + i * 3 : (i0 + 3) + i * 3])
return svs
| scienceopen/pyrinex | src/georinex/sp3.py | Python | mit | 4,540 | [
"NetCDF"
] | 85cd2d965ba798da63b27a9648938ce66b65af3d4bc5d4c612de453a575eb9b2 |
#
# Copyright (C) 2007, Mark Lee
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 446 $
# $Date: 2009-01-22 20:20:21 -0700 (Thu, 22 Jan 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/agent/ClientAgent.py $
import sys
import rlglue.network.Network as Network
from rlglue.types import Action
from rlglue.types import Observation
class ClientAgent:
kUnknownMessage = "Unknown Message: "
network = None
agent = None
# (agent) -> void
def __init__(self, agent):
self.agent = agent
self.network = Network.Network()
# () -> void
def onAgentInit(self):
taskSpec = self.network.getString()
self.agent.agent_init(taskSpec)
self.network.clearSendBuffer()
self.network.putInt(Network.kAgentInit)
self.network.putInt(0) # No data following this header
# () -> void
def onAgentStart(self):
observation = self.network.getObservation()
action = self.agent.agent_start(observation)
size = self.network.sizeOfAction(action)
self.network.clearSendBuffer()
self.network.putInt(Network.kAgentStart)
self.network.putInt(size)
self.network.putAction(action)
# () -> void
def onAgentStep(self):
reward = self.network.getDouble()
observation = self.network.getObservation()
action = self.agent.agent_step(reward, observation)
size = self.network.sizeOfAction(action)
self.network.clearSendBuffer()
self.network.putInt(Network.kAgentStep)
self.network.putInt(size)
self.network.putAction(action)
# () -> void
def onAgentEnd(self):
reward = self.network.getDouble()
self.agent.agent_end(reward)
self.network.clearSendBuffer()
self.network.putInt(Network.kAgentEnd)
self.network.putInt(0) # No data in this packet
# () -> void
def onAgentCleanup(self):
self.agent.agent_cleanup()
self.network.clearSendBuffer()
self.network.putInt(Network.kAgentCleanup)
self.network.putInt(0) # No data in this packet
# () -> void
def onAgentMessage(self):
message = self.network.getString()
reply = self.agent.agent_message(message)
self.network.clearSendBuffer()
self.network.putInt(Network.kAgentMessage)
if reply == None:
#Brian Tanner added payload even for empty message (IE: send that the size is 0)
self.network.putInt(4)
self.network.putInt(0)
else:
#Brian Tanner, added 4 to the payload size because we putString sends the string AND its size
self.network.putInt(len(reply) + 4)
self.network.putString(reply)
# (string, int, int) -> void
def connect(self, host, port, timeout):
self.network.connect(host, port, timeout);
self.network.clearSendBuffer()
self.network.putInt(Network.kAgentConnection)
self.network.putInt(0) # No body to this packet
self.network.send()
# () -> void
def close(self):
self.network.close()
# () -> void
def runAgentEventLoop(self):
agentState = 0
dataSize = 0
recvSize = 0
remaining = 0
while agentState != Network.kRLTerm:
self.network.clearRecvBuffer();
recvSize = self.network.recv(8) - 8; # We may have received the header and part of the payload
# We need to keep track of how much of the payload was recv'd
agentState = self.network.getInt()
dataSize = self.network.getInt()
remaining = dataSize - recvSize;
if (remaining < 0):
print("Remaining was less than 0!")
remaining = 0
amountReceived = self.network.recv(remaining)
# Already read the header, discard it
self.network.getInt()
self.network.getInt()
switch = {
Network.kAgentInit: lambda self: self.onAgentInit(),
Network.kAgentStart: lambda self: self.onAgentStart(),
Network.kAgentStep: lambda self: self.onAgentStep(),
Network.kAgentEnd: lambda self: self.onAgentEnd(),
Network.kAgentCleanup: lambda self: self.onAgentCleanup(),
Network.kAgentMessage: lambda self: self.onAgentMessage() }
if agentState in switch:
switch[agentState](self)
elif agentState == Network.kRLTerm:
pass
else:
sys.stderr.write(Network.kUnknownMessage % (str(agentState)))
sys.exit(1)
self.network.send()
| mguzdial3/MineCode | python-codec/src/rlglue/agent/ClientAgent.py | Python | apache-2.0 | 4,648 | [
"Brian"
] | 4321d544b658f820d2a8953b06d967de4d39cd99a5ac79fea444909e4f924966 |
# CREATED:2013-08-13 12:02:42 by Brian McFee <brm2132@columbia.edu>
'''
Evaluation criteria for structural segmentation fall into two categories:
boundary annotation and structural annotation. Boundary annotation is the task
of predicting the times at which structural changes occur, such as when a verse
transitions to a refrain. Metrics for boundary annotation compare estimated
segment boundaries to reference boundaries. Structural annotation is the task
of assigning labels to detected segments. The estimated labels may be
arbitrary strings - such as A, B, C, - and they need not describe functional
concepts. Metrics for structural annotation are similar to those used for
clustering data.
Conventions
-----------
Both boundary and structural annotation metrics require two dimensional arrays
with two columns, one for boundary start times and one for boundary end times.
Structural annotation further require lists of reference and estimated segment
labels which must have a length which is equal to the number of rows in the
corresponding list of boundary edges. In both tasks, we assume that
annotations express a partitioning of the track into intervals. The function
:func:`mir_eval.util.adjust_intervals` can be used to pad or crop the segment
boundaries to span the duration of the entire track.
Metrics
-------
* :func:`mir_eval.segment.detection`: An estimated boundary is considered
correct if it falls within a window around a reference boundary
* :func:`mir_eval.segment.deviation`: Computes the median absolute time
difference from a reference boundary to its nearest estimated boundary, and
vice versa
* :func:`mir_eval.segment.pairwise`: For classifying pairs of sampled time
instants as belonging to the same structural component
* :func:`mir_eval.segment.rand_index`: Clusters reference and estimated
annotations and compares them by the Rand Index
* :func:`mir_eval.segment.ari`: Computes the Rand index, adjusted for chance
* :func:`mir_eval.segment.nce`: Interprets sampled reference and estimated
labels as samples of random variables :math:`Y_R, Y_E` from which the
conditional entropy of :math:`Y_R` given :math:`Y_E` (Under-Segmentation) and
:math:`Y_E` given :math:`Y_R` (Over-Segmentation) are estimated
* :func:`mir_eval.segment.mutual_information`: Computes the standard,
normalized, and adjusted mutual information of sampled reference and
estimated segments
'''
import collections
import warnings
import numpy as np
import scipy.stats
import scipy.sparse
import scipy.misc
import scipy.special
from . import util
def validate_boundary(reference_intervals, estimated_intervals, trim):
"""Checks that the input annotations to a segment boundary estimation
metric (i.e. one that only takes in segment intervals) look like valid
segment times, and throws helpful errors if not.
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : bool
will the start and end events be trimmed?
"""
if trim:
# If we're trimming, then we need at least 2 intervals
min_size = 2
else:
# If we're not trimming, then we only need one interval
min_size = 1
if len(reference_intervals) < min_size:
warnings.warn("Reference intervals are empty.")
if len(estimated_intervals) < min_size:
warnings.warn("Estimated intervals are empty.")
for intervals in [reference_intervals, estimated_intervals]:
util.validate_intervals(intervals)
def validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels):
"""Checks that the input annotations to a structure estimation metric (i.e.
one that takes in both segment boundaries and their labels) look like valid
segment times and labels, and throws helpful errors if not.
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
"""
for (intervals, labels) in [(reference_intervals, reference_labels),
(estimated_intervals, estimated_labels)]:
util.validate_intervals(intervals)
if intervals.shape[0] != len(labels):
raise ValueError('Number of intervals does not match number '
'of labels')
# Check only when intervals are non-empty
if intervals.size > 0:
# Make sure intervals start at 0
if not np.allclose(intervals.min(), 0.0):
raise ValueError('Segment intervals do not start at 0')
if reference_intervals.size == 0:
warnings.warn("Reference intervals are empty.")
if estimated_intervals.size == 0:
warnings.warn("Estimated intervals are empty.")
# Check only when intervals are non-empty
if reference_intervals.size > 0 and estimated_intervals.size > 0:
if not np.allclose(reference_intervals.max(),
estimated_intervals.max()):
raise ValueError('End times do not match')
def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
"""Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5,
... trim=True)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
(Default value = 0.5)
beta : float > 0
weighting constant for F-measure.
(Default value = 1.0)
trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
(Default value = False)
Returns
-------
precision : float
precision of estimated predictions
recall : float
recall of reference reference boundaries
f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def deviation(reference_intervals, estimated_intervals, trim=False):
"""Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference
def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation evaluation by pair-wise agreement.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
precision : float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def rand_index(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""(Non-adjusted) Rand index.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> rand_index = mir_eval.structure.rand_index(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
rand_index : float > 0
Rand index
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
# Find where they agree
matches_pos = np.logical_and(agree_ref, agree_est)
# Find where they disagree
matches_neg = np.logical_and(~agree_ref, ~agree_est)
n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0
n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0
n_matches_neg = matches_neg.sum() / 2.0
rand = (n_matches_pos + n_matches_neg) / n_pairs
return rand
def _contingency_matrix(reference_indices, estimated_indices):
"""Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
"""
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
def _adjusted_rand_index(reference_indices, estimated_indices):
"""Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.misc.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.misc.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.misc.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.misc.comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return ((sum_comb - prod_comb)/(mean_comb - prod_comb))
def ari(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
"""Adjusted Rand Index (ARI) for frame clustering segmentation evaluation.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> ari_score = mir_eval.structure.ari(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
Returns
-------
ari_score : float > 0
Adjusted Rand index between segmentations.
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
return _adjusted_rand_index(y_ref, y_est)
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def _entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
def _adjusted_mutual_info_score(reference_indices, estimated_indices):
"""Compute the mutual information between two sequence labelings, adjusted for
chance.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ami : float <= 1.0
Mutual information
.. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score
and sklearn.metrics.cluster.expected_mutual_info_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# The following code is based on
# sklearn.metrics.cluster.expected_mutual_information
R, C = contingency.shape
N = float(n_samples)
a = np.sum(contingency, axis=1).astype(np.int32)
b = np.sum(contingency, axis=0).astype(np.int32)
# There are three major terms to the EMI equation, which are multiplied to
# and then summed over varying nij values.
# While nijs[0] will never be used, having it simplifies the indexing.
nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float')
# Stops divide by zero warnings. As its not used, no issue.
nijs[0] = 1
# term1 is nij / N
term1 = nijs / N
# term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b)
# term2 uses the outer product
log_ab_outer = np.log(np.outer(a, b))
# term2 uses N * nij
log_Nnij = np.log(N * nijs)
# term3 is large, and involved many factorials. Calculate these in log
# space to stop overflows.
gln_a = scipy.special.gammaln(a + 1)
gln_b = scipy.special.gammaln(b + 1)
gln_Na = scipy.special.gammaln(N - a + 1)
gln_Nb = scipy.special.gammaln(N - b + 1)
gln_N = scipy.special.gammaln(N + 1)
gln_nij = scipy.special.gammaln(nijs + 1)
# start and end values for nij terms for each summation.
start = np.array([[v - N + w for w in b] for v in a], dtype='int')
start = np.maximum(start, 1)
end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1
# emi itself is a summation over the various values.
emi = 0
for i in range(R):
for j in range(C):
for nij in range(start[i, j], end[i, j]):
term2 = log_Nnij[nij] - log_ab_outer[i, j]
# Numerators are positive, denominators are negative.
gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j] -
gln_N - gln_nij[nij] -
scipy.special.gammaln(a[i] - nij + 1) -
scipy.special.gammaln(b[j] - nij + 1) -
scipy.special.gammaln(N - a[i] - b[j] + nij + 1))
term3 = np.exp(gln)
emi += (term1[nij] * term2 * term3)
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def _normalized_mutual_info_score(reference_indices, estimated_indices):
"""Compute the mutual information between two sequence labelings, adjusted for
chance.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
nmi : float <= 1.0
Normalized mutual information
.. note:: Based on sklearn.metrics.cluster.normalized_mutual_info_score
"""
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def mutual_information(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
"""Frame-clustering segmentation: mutual information metrics.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> mi, ami, nmi = mir_eval.structure.mutual_information(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
Returns
-------
MI : float > 0
Mutual information between segmentations
AMI : float
Adjusted mutual information between segmentations.
NMI : float > 0
Normalize mutual information between segmentations
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Mutual information
mutual_info = _mutual_info_score(y_ref, y_est)
# Adjusted mutual information
adj_mutual_info = _adjusted_mutual_info_score(y_ref, y_est)
# Normalized mutual information
norm_mutual_info = _normalized_mutual_info_score(y_ref, y_est)
return mutual_info, adj_mutual_info, norm_mutual_info
def nce(reference_intervals, reference_labels, estimated_intervals,
estimated_labels, frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation: normalized conditional entropy
Computes cross-entropy of cluster assignment, normalized by the
max-entropy.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> S_over, S_under, S_F = mir_eval.structure.nce(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta for F-measure
(Default value = 1.0)
Returns
-------
S_over
Over-clustering score:
``1 - H(y_est | y_ref) / log(|y_est|)``
If `|y_est|==1`, then `S_over` will be 0.
S_under
Under-clustering score:
``1 - H(y_ref | y_est) / log(|y_ref|)``
If `|y_ref|==1`, then `S_under` will be 0.
S_F
F-measure for (S_over, S_under)
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Make the contingency table: shape = (n_ref, n_est)
contingency = _contingency_matrix(y_ref, y_est).astype(float)
# Normalize by the number of frames
contingency = contingency / len(y_ref)
# Compute the marginals
p_est = contingency.sum(axis=0)
p_ref = contingency.sum(axis=1)
# H(true | prediction) = sum_j P[estimated = j] *
# sum_i P[true = i | estimated = j] log P[true = i | estimated = j]
# entropy sums over axis=0, which is true labels
# The following scipy.stats.entropy calls are equivalent to
# scipy.stats.entropy(contingency, base=2)
# However the `base` kwarg has only been introduced in scipy 0.14.0
true_given_est = p_est.dot(scipy.stats.entropy(contingency) / np.log(2))
pred_given_ref = p_ref.dot(scipy.stats.entropy(contingency.T) / np.log(2))
score_under = 0.0
if contingency.shape[0] > 1:
score_under = 1. - true_given_est / np.log2(contingency.shape[0])
score_over = 0.0
if contingency.shape[1] > 1:
score_over = 1. - pred_given_ref / np.log2(contingency.shape[1])
f_measure = util.f_measure(score_over, score_under, beta=beta)
return score_over, score_under, f_measure
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
"""Compute all metrics for the given reference and estimated annotations.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> scores = mir_eval.segment.evaluate(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
ref_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
ref_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Adjust timespan of estimations relative to ground truth
ref_intervals, ref_labels = \
util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)
est_intervals, est_labels = \
util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,
t_max=ref_intervals.max())
# Now compute all the metrics
scores = collections.OrderedDict()
# Boundary detection
# Force these values for window
kwargs['window'] = .5
scores['Precision@0.5'], scores['Recall@0.5'], scores['F-measure@0.5'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
kwargs['window'] = 3.0
scores['Precision@3.0'], scores['Recall@3.0'], scores['F-measure@3.0'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
# Boundary deviation
scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \
util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)
# Pairwise clustering
(scores['Pairwise Precision'],
scores['Pairwise Recall'],
scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,
ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Rand index
scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,
ref_labels, est_intervals,
est_labels, **kwargs)
# Adjusted rand index
scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Mutual information metrics
(scores['Mutual Information'],
scores['Adjusted Mutual Information'],
scores['Normalized Mutual Information']) = \
util.filter_kwargs(mutual_information, ref_intervals, ref_labels,
est_intervals, est_labels, **kwargs)
# Conditional entropy metrics
scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \
util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
return scores
| faroit/mir_eval | mir_eval/segment.py | Python | mit | 43,678 | [
"Brian"
] | c991e30d9c802501d301d7aca0f48bae8d2d60b5fb4607dd35f7ef79eaccd647 |
from util import *
from InputParameters import InputParameters
class Tester(object):
# Static Method
def getValidParams():
params = InputParameters()
# Common Options
params.addRequiredParam('type', "The type of test of Tester to create for this test.")
params.addParam('max_time', 300, "The maximum in seconds that the test will be allowed to run.")
params.addParam('min_reported_time', "The minimum time elapsed before a test is reported as taking to long to run.")
params.addParam('skip', "Provide a reason this test will be skipped.")
params.addParam('deleted', "Tests that only show up when using the '-e' option (Permanently skipped or not implemented).")
params.addParam('heavy', False, "Set to True if this test should only be run when the '--heavy' option is used.")
params.addParam('group', [], "A list of groups for which this test belongs.")
params.addParam('prereq', [], "A list of prereq tests that need to run successfully before launching this test.")
params.addParam('skip_checks', False, "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)")
# Test Filters
params.addParam('platform', ['ALL'], "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')")
params.addParam('compiler', ['ALL'], "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')")
params.addParam('petsc_version', ['ALL'], "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
params.addParam('mesh_mode', ['ALL'], "A list of mesh modes for which this test will run ('PARALLEL', 'SERIAL')")
params.addParam('method', ['ALL'], "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')")
params.addParam('library_mode', ['ALL'], "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')")
params.addParam('dtk', ['ALL'], "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('unique_ids', ['ALL'], "A test that runs only if UNIQUE_IDs are enabled ('ALL', 'TRUE', 'FALSE')")
params.addParam('recover', True, "A test that runs with '--recover' mode enabled")
params.addParam('vtk', ['ALL'], "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')")
return params
getValidParams = staticmethod(getValidParams)
def __init__(self, name, params):
self.specs = params
# Override this method to tell the harness whether or not this test should run.
# This function should return a tuple (Boolean, reason)
# If a reason is provided it'll be printed and counted as skipped. If the reason
# is left blank, the test will not be printed at all nor counted in the test totals.
def checkRunnable(self, options):
return (True, '')
# This method is called prior to running the test. It can be used to cleanup files
# or do other preparations before the tester is run
def prepare(self):
return
# This method should return the executable command that will be executed by the tester
def getCommand(self, options):
return
# This method will be called to process the results of running the test. Any post-test
# processing should happen in this method
def processResults(self, moose_dir, retcode, options, output):
return
# This is the base level runnable check common to all Testers. DO NOT override
# this method in any of your derived classes. Instead see "checkRunnable"
def checkRunnableBase(self, options, checks):
reason = ''
# Are we running only tests in a specific group?
if options.group <> 'ALL' and options.group not in self.specs['group']:
return (False, reason)
if options.not_group <> '' and options.not_group in self.specs['group']:
return (False, reason)
# Store regexp for matching tests if --re is used
if options.reg_exp:
match_regexp = re.compile(options.reg_exp)
# If --re then only test matching regexp. Needs to run before other SKIP methods
if options.reg_exp and not match_regexp.search(self.specs['test_name']):
return (False, reason)
# Check for deleted tests
if self.specs.isValid('deleted'):
if options.extra_info:
# We might want to trim the string so it formats nicely
if len(self.specs['deleted']) >= TERM_COLS - (len(self.specs['test_name'])+21):
test_reason = (self.specs['deleted'])[:(TERM_COLS - (len(self.specs['test_name'])+24))] + '...'
else:
test_reason = self.specs['deleted']
reason = 'deleted (' + test_reason + ')'
return (False, reason)
# Check for skipped tests
if self.specs.type('skip') is bool and self.specs['skip']:
# Backwards compatible (no reason)
return (False, 'skipped')
elif self.specs.type('skip') is not bool and self.specs.isValid('skip'):
skip_message = self.specs['skip']
# We might want to trim the string so it formats nicely
if len(skip_message) >= TERM_COLS - (len(self.specs['test_name'])+21):
test_reason = (skip_message)[:(TERM_COLS - (len(self.specs['test_name'])+24))] + '...'
else:
test_reason = skip_message
reason = 'skipped (' + test_reason + ')'
return (False, reason)
# If were testing for SCALE_REFINE, then only run tests with a SCALE_REFINE set
elif options.store_time and self.specs['scale_refine'] == 0:
return (False, reason)
# If we're testing with valgrind, then skip tests that require parallel or threads or don't meet the valgrind setting
elif options.valgrind_mode != '':
if self.specs['valgrind'] == 'NONE':
reason = 'skipped (Valgrind==NONE)'
elif self.specs['valgrind'] == 'HEAVY' and options.valgrind_mode == 'NORMAL':
reason = 'skipped (Valgrind==HEAVY)'
elif self.specs['min_parallel'] > 1 or self.specs['min_threads'] > 1:
reason = 'skipped (Valgrind requires serial)'
if reason != '':
return (False, reason)
# If we're running in recover mode skip tests that have recover = false
elif options.enable_recover and self.specs['recover'] == False:
reason = 'skipped (NO RECOVER)'
return (False, reason)
# Check for PETSc versions
(petsc_status, logic_reason, petsc_version) = checkPetscVersion(checks, self.specs)
if not petsc_status:
reason = 'skipped (using PETSc ' + str(checks['petsc_version']) + ' REQ: ' + logic_reason + ' ' + petsc_version + ')'
return (False, reason)
# PETSc is being explicitly checked above
local_checks = ['platform', 'compiler', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids', 'vtk']
for check in local_checks:
test_platforms = set()
for x in self.specs[check]:
test_platforms.add(x.upper())
if not len(test_platforms.intersection(checks[check])):
reason = 'skipped (' + re.sub(r'\[|\]', '', check).upper() + '!=' + ', '.join(self.specs[check]) + ')'
return (False, reason)
# Check for heavy tests
if options.all_tests or options.heavy_tests:
if not self.specs['heavy'] and options.heavy_tests:
reason = 'skipped (NOT HEAVY)'
return (False, reason)
elif self.specs['heavy']:
reason = 'skipped (HEAVY)'
return (False, reason)
# Check for positive scale refine values when using store timing options
if self.specs['scale_refine'] == 0 and options.store_time:
return (False, reason)
# Check the return values of the derived classes
return self.checkRunnable(options)
| amburan/moose | framework/scripts/TestHarness/testers/Tester.py | Python | lgpl-2.1 | 7,826 | [
"VTK"
] | 4d91ee5c4463c8927c28f9c4c88fdd6c3aca25142d7a91083eb7a8e68f113215 |
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/tests/test_naive_bayes.py | Python | mit | 16,259 | [
"Gaussian"
] | 0c98ad5abac8962f86873e97e5d8949d24866e0562ec52ce5a3d1f749cf9590c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.