text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
import coverage.plugin
import hy.importer
import ast
import os.path
class HyCoveragePlugin(coverage.plugin.CoveragePlugin):
"""The main entry point for the coverage plugin. This is responsible
for determining which modules it understands (i.e. all Hy
modules), and acts as an Abstract Factory to instantiate
appropriate objects to handle coverage reporting in Hy files.
"""
def __init__(self):
super(HyCoveragePlugin, self).__init__()
def file_tracer(self, filename):
if filename.endswith('.hy'):
return HyFileTracer(filename)
else:
return None
def file_reporter(self, filename):
return HyFileReporter(filename)
def sys_info(self):
return []
class HyFileTracer(coverage.plugin.FileTracer):
"""Class for holding metadata about a Hy code file at runtime.
"""
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return self.filename
class ASTLineCollector(ast.NodeVisitor):
"""An AST visitor that collects line numbers that belong to one of the
nodes in the AST.
"""
def __init__(self, line_set):
self.line_set = line_set
def generic_visit(self, node):
if hasattr(node, 'lineno'):
self.line_set.add(node.lineno)
super(ASTLineCollector, self).generic_visit(node)
class HyFileReporter(coverage.plugin.FileReporter):
"""This class is used in the analysis phase to provide information
about hittable lines in a particular Hy module.
"""
def __init__(self, filename):
super(HyFileReporter, self).__init__(filename)
def lines(self):
ast = hy.importer.import_file_to_ast(
self.filename,
self.module_name_from_filename(self.filename))
hittable_lines = set()
line_collector = ASTLineCollector(hittable_lines)
line_collector.visit(ast)
return hittable_lines
def module_name_from_filename(self, filename):
directory_name, filename = os.path.split(filename)
module_name = os.path.splitext(filename)[0]
while True:
if (not os.path.exists(os.path.join(directory_name, "__init__.py"))
and not os.path.exists(os.path.join(directory_name, "__init__.hy"))):
return module_name
directory_name, package = os.path.split(directory_name)
module_name = package + '.' + module_name
| timmartin/hy-coverage | hy_coverage_plugin/hy_coverage.py | Python | mit | 2,500 | [
"VisIt"
] | fa51aae832940bcee7839a2cb800bf24bad106e066ab88a1e7e867933db7a7f8 |
# -*- coding: utf-8 -*-
"""
Shortest path algorithms for weighed graphs.
"""
__author__ = """\n""".join(['Aric Hagberg <hagberg@lanl.gov>',
'Loïc Séguin-C. <loicseguin@gmail.com>',
'Dan Schult <dschult@colgate.edu>'])
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['dijkstra_path',
'dijkstra_path_length',
'bidirectional_dijkstra',
'single_source_dijkstra',
'single_source_dijkstra_path',
'single_source_dijkstra_path_length',
'all_pairs_dijkstra_path',
'all_pairs_dijkstra_path_length',
'dijkstra_predecessor_and_distance',
'bellman_ford',
'negative_edge_cycle',
'goldberg_radzik']
from collections import deque
from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx.utils import generate_unique_node
def dijkstra_path(G, source, target, weight='weight'):
"""Returns the shortest path from source to target in a weighted graph G.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
path : list
List of nodes in a shortest path.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path(G,0,4))
[0, 1, 2, 3, 4]
Notes
------
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
(length, path) = single_source_dijkstra(G, source, target=target,
weight=weight)
try:
return path[target]
except KeyError:
raise nx.NetworkXNoPath(
"node %s not reachable from %s" % (source, target))
def dijkstra_path_length(G, source, target, weight='weight'):
"""Returns the shortest path length from source to target
in a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path_length(G,0,4))
4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
length = single_source_dijkstra_path_length(G, source, weight=weight)
try:
return length[target]
except KeyError:
raise nx.NetworkXNoPath(
"node %s not reachable from %s" % (source, target))
def single_source_dijkstra_path(G, source, cutoff=None, weight='weight'):
"""Compute shortest path between source and all other reachable
nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
paths : dictionary
Dictionary of shortest path lengths keyed by target.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.single_source_dijkstra_path(G,0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
(length, path) = single_source_dijkstra(
G, source, cutoff=cutoff, weight=weight)
return path
def single_source_dijkstra_path_length(G, source, cutoff=None,
weight='weight'):
"""Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
length : dictionary
Dictionary of shortest lengths keyed by target.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.single_source_dijkstra_path_length(G,0)
>>> length[4]
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
push = heappush
pop = heappop
dist = {} # dictionary of final distances
seen = {source: 0}
c = count()
fringe = [] # use heapq with (distance,label) tuples
push(fringe, (0, next(c), source))
while fringe:
(d, _, v) = pop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
# for ignore,w,edgedata in G.edges_iter(v,data=True):
# is about 30% slower than the following
if G.is_multigraph():
edata = []
for w, keydata in G[v].items():
minweight = min((dd.get(weight, 1)
for k, dd in keydata.items()))
edata.append((w, {weight: minweight}))
else:
edata = iter(G[v].items())
for w, edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight, 1)
if cutoff is not None:
if vw_dist > cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
push(fringe, (vw_dist, next(c), w))
return dist
def single_source_dijkstra(G, source, target=None, cutoff=None, weight='weight'):
"""Compute shortest paths and lengths in a weighted graph G.
Uses Dijkstra's algorithm for shortest paths.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance,path : dictionaries
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
Examples
--------
>>> G=nx.path_graph(5)
>>> length,path=nx.single_source_dijkstra(G,0)
>>> print(length[4])
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
>>> path[4]
[0, 1, 2, 3, 4]
Notes
---------
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
Based on the Python cookbook recipe (119466) at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
single_source_dijkstra_path()
single_source_dijkstra_path_length()
"""
if source == target:
return ({source: 0}, {source: [source]})
push = heappush
pop = heappop
dist = {} # dictionary of final distances
paths = {source: [source]} # dictionary of paths
seen = {source: 0}
c = count()
fringe = [] # use heapq with (distance,label) tuples
push(fringe, (0, next(c), source))
while fringe:
(d, _, v) = pop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if v == target:
break
# for ignore,w,edgedata in G.edges_iter(v,data=True):
# is about 30% slower than the following
if G.is_multigraph():
edata = []
for w, keydata in G[v].items():
minweight = min((dd.get(weight, 1)
for k, dd in keydata.items()))
edata.append((w, {weight: minweight}))
else:
edata = iter(G[v].items())
for w, edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight, 1)
if cutoff is not None:
if vw_dist > cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
push(fringe, (vw_dist, next(c), w))
paths[w] = paths[v] + [w]
return (dist, paths)
def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight='weight'):
"""Compute shortest path length and predecessors on shortest paths
in weighted graphs.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
pred,distance : dictionaries
Returns two dictionaries representing a list of predecessors
of a node and the distance to each node.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The list of predecessors contains more than one element only when
there are more than one shortest paths to the key node.
"""
push = heappush
pop = heappop
dist = {} # dictionary of final distances
pred = {source: []} # dictionary of predecessors
seen = {source: 0}
c = count()
fringe = [] # use heapq with (distance,label) tuples
push(fringe, (0, next(c), source))
while fringe:
(d, _, v) = pop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if G.is_multigraph():
edata = []
for w, keydata in G[v].items():
minweight = min((dd.get(weight, 1)
for k, dd in keydata.items()))
edata.append((w, {weight: minweight}))
else:
edata = iter(G[v].items())
for w, edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight, 1)
if cutoff is not None:
if vw_dist > cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
push(fringe, (vw_dist, next(c), w))
pred[w] = [v]
elif vw_dist == seen[w]:
pred[w].append(v)
return (pred, dist)
def all_pairs_dijkstra_path_length(G, cutoff=None, weight='weight'):
""" Compute shortest path lengths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest path lengths.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.all_pairs_dijkstra_path_length(G)
>>> print(length[1][4])
3
>>> length[1]
{0: 1, 1: 0, 2: 1, 3: 2, 4: 3}
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionary returned only has keys for reachable node pairs.
"""
paths = {}
for n in G:
paths[n] = single_source_dijkstra_path_length(G, n, cutoff=cutoff,
weight=weight)
return paths
def all_pairs_dijkstra_path(G, cutoff=None, weight='weight'):
""" Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.all_pairs_dijkstra_path(G)
>>> print(path[0][4])
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall()
"""
paths = {}
for n in G:
paths[n] = single_source_dijkstra_path(G, n, cutoff=cutoff,
weight=weight)
return paths
def bellman_ford(G, source, weight='weight'):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of O(mn) where n is the number of
nodes and m is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle.
Examples
--------
>>> import networkx as nx
>>> G = nx.path_graph(5, create_using = nx.DiGraph())
>>> pred, dist = nx.bellman_ford(G, 0)
>>> sorted(pred.items())
[(0, None), (1, 0), (2, 1), (3, 2), (4, 3)]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> from nose.tools import assert_raises
>>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
>>> G[1][2]['weight'] = -7
>>> assert_raises(nx.NetworkXUnbounded, nx.bellman_ford, G, 0)
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative cost (di)cycle, it
will not be detected.
"""
if source not in G:
raise KeyError("Node %s is not found in the graph" % source)
for u, v, attr in G.selfloop_edges(data=True):
if attr.get(weight, 1) < 0:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
dist = {source: 0}
pred = {source: None}
if len(G) == 1:
return pred, dist
if G.is_multigraph():
def get_weight(edge_dict):
return min(eattr.get(weight, 1) for eattr in edge_dict.values())
else:
def get_weight(edge_dict):
return edge_dict.get(weight, 1)
if G.is_directed():
G_succ = G.succ
else:
G_succ = G.adj
inf = float('inf')
n = len(G)
count = {}
q = deque([source])
in_q = set([source])
while q:
u = q.popleft()
in_q.remove(u)
# Skip relaxations if the predecessor of u is in the queue.
if pred[u] not in in_q:
dist_u = dist[u]
for v, e in G_succ[u].items():
dist_v = dist_u + get_weight(e)
if dist_v < dist.get(v, inf):
if v not in in_q:
q.append(v)
in_q.add(v)
count_v = count.get(v, 0) + 1
if count_v == n:
raise nx.NetworkXUnbounded(
"Negative cost cycle detected.")
count[v] = count_v
dist[v] = dist_v
pred[v] = u
return pred, dist
def goldberg_radzik(G, source, weight='weight'):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of O(mn) where n is the number of
nodes and m is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle.
Examples
--------
>>> import networkx as nx
>>> G = nx.path_graph(5, create_using = nx.DiGraph())
>>> pred, dist = nx.goldberg_radzik(G, 0)
>>> sorted(pred.items())
[(0, None), (1, 0), (2, 1), (3, 2), (4, 3)]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> from nose.tools import assert_raises
>>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
>>> G[1][2]['weight'] = -7
>>> assert_raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 0)
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative cost (di)cycle, it
will not be detected.
"""
if source not in G:
raise KeyError("Node %s is not found in the graph" % source)
for u, v, attr in G.selfloop_edges(data=True):
if attr.get(weight, 1) < 0:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
if len(G) == 1:
return {source: None}, {source: 0}
if G.is_multigraph():
def get_weight(edge_dict):
return min(attr.get(weight, 1) for attr in edge_dict.values())
else:
def get_weight(edge_dict):
return edge_dict.get(weight, 1)
if G.is_directed():
G_succ = G.succ
else:
G_succ = G.adj
inf = float('inf')
d = dict((u, inf) for u in G)
d[source] = 0
pred = {source: None}
def topo_sort(relabeled):
"""Topologically sort nodes relabeled in the previous round and detect
negative cycles.
"""
# List of nodes to scan in this round. Denoted by A in Goldberg and
# Radzik's paper.
to_scan = []
# In the DFS in the loop below, neg_count records for each node the
# number of edges of negative reduced costs on the path from a DFS root
# to the node in the DFS forest. The reduced cost of an edge (u, v) is
# defined as d[u] + weight[u][v] - d[v].
#
# neg_count also doubles as the DFS visit marker array.
neg_count = {}
for u in relabeled:
# Skip visited nodes.
if u in neg_count:
continue
d_u = d[u]
# Skip nodes without out-edges of negative reduced costs.
if all(d_u + get_weight(e) >= d[v] for v, e in G_succ[u].items()):
continue
# Nonrecursive DFS that inserts nodes reachable from u via edges of
# nonpositive reduced costs into to_scan in (reverse) topological
# order.
stack = [(u, iter(G_succ[u].items()))]
in_stack = set([u])
neg_count[u] = 0
while stack:
u, it = stack[-1]
try:
v, e = next(it)
except StopIteration:
to_scan.append(u)
stack.pop()
in_stack.remove(u)
continue
t = d[u] + get_weight(e)
d_v = d[v]
if t <= d_v:
is_neg = t < d_v
d[v] = t
pred[v] = u
if v not in neg_count:
neg_count[v] = neg_count[u] + int(is_neg)
stack.append((v, iter(G_succ[v].items())))
in_stack.add(v)
elif (v in in_stack and
neg_count[u] + int(is_neg) > neg_count[v]):
# (u, v) is a back edge, and the cycle formed by the
# path v to u and (u, v) contains at least one edge of
# negative reduced cost. The cycle must be of negative
# cost.
raise nx.NetworkXUnbounded(
'Negative cost cycle detected.')
to_scan.reverse()
return to_scan
def relax(to_scan):
"""Relax out-edges of relabeled nodes.
"""
relabeled = set()
# Scan nodes in to_scan in topological order and relax incident
# out-edges. Add the relabled nodes to labeled.
for u in to_scan:
d_u = d[u]
for v, e in G_succ[u].items():
w_e = get_weight(e)
if d_u + w_e < d[v]:
d[v] = d_u + w_e
pred[v] = u
relabeled.add(v)
return relabeled
# Set of nodes relabled in the last round of scan operations. Denoted by B
# in Goldberg and Radzik's paper.
relabeled = set([source])
while relabeled:
to_scan = topo_sort(relabeled)
relabeled = relax(to_scan)
d = dict((u, d[u]) for u in pred)
return pred, d
def negative_edge_cycle(G, weight='weight'):
"""Return True if there exists a negative edge cycle anywhere in G.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
negative_cycle : bool
True if a negative edge cycle exists, otherwise False.
Examples
--------
>>> import networkx as nx
>>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
>>> print(nx.negative_edge_cycle(G))
False
>>> G[1][2]['weight'] = -7
>>> print(nx.negative_edge_cycle(G))
True
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
This algorithm uses bellman_ford() but finds negative cycles
on any component by first adding a new node connected to
every node, and starting bellman_ford on that node. It then
removes that extra node.
"""
newnode = generate_unique_node()
G.add_edges_from([(newnode, n) for n in G])
try:
bellman_ford(G, newnode, weight)
except nx.NetworkXUnbounded:
return True
finally:
G.remove_node(newnode)
return False
def bidirectional_dijkstra(G, source, target, weight='weight'):
"""Dijkstra's algorithm for shortest paths using bidirectional search.
Parameters
----------
G : NetworkX graph
source : node
Starting node.
target : node
Ending node.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> length,path=nx.bidirectional_dijkstra(G,0,4)
>>> print(length)
4
>>> print(path)
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
In practice bidirectional Dijkstra is much more than twice as fast as
ordinary Dijkstra.
Ordinary Dijkstra expands nodes in a sphere-like manner from the
source. The radius of this sphere will eventually be the length
of the shortest path. Bidirectional Dijkstra will expand nodes
from both the source and the target, making two spheres of half
this radius. Volume of the first sphere is pi*r*r while the
others are 2*pi*r/2*r/2, making up half the volume.
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
shortest_path
shortest_path_length
"""
if source == target:
return (0, [source])
push = heappush
pop = heappop
# Init: Forward Backward
dists = [{}, {}] # dictionary of final distances
paths = [{source: [source]}, {target: [target]}] # dictionary of paths
fringe = [[], []] # heap of (distance, node) tuples for
# extracting next node to expand
seen = [{source: 0}, {target: 0}] # dictionary of distances to
# nodes seen
c = count()
# initialize fringe heap
push(fringe[0], (0, next(c), source))
push(fringe[1], (0, next(c), target))
# neighs for extracting correct neighbor information
if G.is_directed():
neighs = [G.successors_iter, G.predecessors_iter]
else:
neighs = [G.neighbors_iter, G.neighbors_iter]
# variables to hold shortest discovered path
#finaldist = 1e30000
finalpath = []
dir = 1
while fringe[0] and fringe[1]:
# choose direction
# dir == 0 is forward direction and dir == 1 is back
dir = 1 - dir
# extract closest to expand
(dist, _, v) = pop(fringe[dir])
if v in dists[dir]:
# Shortest path to v has already been found
continue
# update distance
dists[dir][v] = dist # equal to seen[dir][v]
if v in dists[1 - dir]:
# if we have scanned v in both directions we are done
# we have now discovered the shortest path
return (finaldist, finalpath)
for w in neighs[dir](v):
if(dir == 0): # forward
if G.is_multigraph():
minweight = min((dd.get(weight, 1)
for k, dd in G[v][w].items()))
else:
minweight = G[v][w].get(weight, 1)
vwLength = dists[dir][v] + minweight # G[v][w].get(weight,1)
else: # back, must remember to change v,w->w,v
if G.is_multigraph():
minweight = min((dd.get(weight, 1)
for k, dd in G[w][v].items()))
else:
minweight = G[w][v].get(weight, 1)
vwLength = dists[dir][v] + minweight # G[w][v].get(weight,1)
if w in dists[dir]:
if vwLength < dists[dir][w]:
raise ValueError(
"Contradictory paths found: negative weights?")
elif w not in seen[dir] or vwLength < seen[dir][w]:
# relaxing
seen[dir][w] = vwLength
push(fringe[dir], (vwLength, next(c), w))
paths[dir][w] = paths[dir][v] + [w]
if w in seen[0] and w in seen[1]:
# see if this path is better than than the already
# discovered shortest path
totaldist = seen[0][w] + seen[1][w]
if finalpath == [] or finaldist > totaldist:
finaldist = totaldist
revpath = paths[1][w][:]
revpath.reverse()
finalpath = paths[0][w] + revpath[1:]
raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
| ReganBell/QReview | networkx/algorithms/shortest_paths/weighted.py | Python | bsd-3-clause | 30,414 | [
"VisIt"
] | a9e05399c9840be57184766af38a48a7e84d8ec871dc046db644d30fdced80d9 |
# coding: utf-8
"""
Integration Tests
~~~~~~~~~~~~~~~~~
"""
import sys
import re
import argparse
import iocapture
import mock
import pytest
import argh
from argh.exceptions import AssemblingError
from .base import DebugArghParser, get_usage_string, run, CmdResult as R
@pytest.mark.xfail(reason='TODO')
def test_guessing_integration():
"guessing is used in dispatching"
assert 0
def test_set_default_command_integration():
def cmd(foo=1):
return foo
p = DebugArghParser()
p.set_default_command(cmd)
assert run(p, '') == R(out='1\n', err='')
assert run(p, '--foo 2') == R(out='2\n', err='')
assert run(p, '--help', exit=True) == None
def test_set_default_command_integration_merging():
@argh.arg('--foo', help='bar')
def cmd(foo=1):
return foo
p = DebugArghParser()
p.set_default_command(cmd)
assert run(p, '') == R(out='1\n', err='')
assert run(p, '--foo 2') == R(out='2\n', err='')
assert 'bar' in p.format_help()
#
# Function can be added to parser as is
#
def test_simple_function_no_args():
def cmd():
yield 1
p = DebugArghParser()
p.set_default_command(cmd)
assert run(p, '') == R(out='1\n', err='')
def test_simple_function_positional():
def cmd(x):
yield x
p = DebugArghParser()
p.set_default_command(cmd)
if sys.version_info < (3,3):
msg = 'too few arguments'
else:
msg = 'the following arguments are required: x'
assert run(p, '', exit=True) == msg
assert run(p, 'foo') == R(out='foo\n', err='')
def test_simple_function_defaults():
def cmd(x='foo'):
yield x
p = DebugArghParser()
p.set_default_command(cmd)
assert run(p, '') == R(out='foo\n', err='')
assert run(p, 'bar', exit=True) == 'unrecognized arguments: bar'
assert run(p, '--x bar') == R(out='bar\n', err='')
def test_simple_function_varargs():
def func(*file_paths):
# `paths` is the single positional argument with nargs='*'
yield ', '.join(file_paths)
p = DebugArghParser()
p.set_default_command(func)
assert run(p, '') == R(out='\n', err='')
assert run(p, 'foo') == R(out='foo\n', err='')
assert run(p, 'foo bar') == R(out='foo, bar\n', err='')
def test_simple_function_kwargs():
@argh.arg('foo')
@argh.arg('--bar')
def cmd(**kwargs):
# `kwargs` contain all arguments not fitting ArgSpec.args and .varargs.
# if ArgSpec.keywords in None, all @arg()'s will have to fit ArgSpec.args
for k in sorted(kwargs):
yield '{0}: {1}'.format(k, kwargs[k])
p = DebugArghParser()
p.set_default_command(cmd)
if sys.version_info < (3,3):
msg = 'too few arguments'
else:
msg = 'the following arguments are required: foo'
assert run(p, '', exit=True) == msg
assert run(p, 'hello') == R(out='bar: None\nfoo: hello\n', err='')
assert run(p, '--bar 123', exit=True) == msg
assert run(p, 'hello --bar 123') == R(out='bar: 123\nfoo: hello\n', err='')
@pytest.mark.xfail
def test_simple_function_multiple():
raise NotImplementedError
@pytest.mark.xfail
def test_simple_function_nested():
raise NotImplementedError
@pytest.mark.xfail
def test_class_method_as_command():
raise NotImplementedError
def test_all_specs_in_one():
@argh.arg('foo')
@argh.arg('--bar')
@argh.arg('fox')
@argh.arg('--baz')
def cmd(foo, bar=1, *args, **kwargs):
yield 'foo: {0}'.format(foo)
yield 'bar: {0}'.format(bar)
yield '*args: {0}'.format(args)
for k in sorted(kwargs):
yield '** {0}: {1}'.format(k, kwargs[k])
p = DebugArghParser()
p.set_default_command(cmd)
# 1) bar=1 is treated as --bar so positionals from @arg that go **kwargs
# will still have higher priority than bar.
# 2) *args, a positional with nargs='*', sits between two required
# positionals (foo and fox), so it gets nothing.
assert run(p, 'one two') == R(out=
'foo: one\n'
'bar: 1\n'
'*args: ()\n'
'** baz: None\n'
'** fox: two\n', err='')
# two required positionals (foo and fox) get an argument each and one extra
# is left; therefore the middle one is given to *args.
assert run(p, 'one two three') == R(out=
'foo: one\n'
'bar: 1\n'
"*args: ('two',)\n"
'** baz: None\n'
'** fox: three\n', err='')
# two required positionals (foo and fox) get an argument each and two extra
# are left; both are given to *args (it's greedy).
assert run(p, 'one two three four') == R(out=
'foo: one\n'
'bar: 1\n'
"*args: ('two', 'three')\n"
'** baz: None\n'
'** fox: four\n', err='')
def test_arg_merged():
""" @arg merges into function signature.
"""
@argh.arg('my', help='a moose once bit my sister')
@argh.arg('-b', '--brain', help='i am made entirely of wood')
def gumby(my, brain=None):
return my, brain, 'hurts'
p = DebugArghParser('PROG')
p.set_default_command(gumby)
help_msg = p.format_help()
assert 'a moose once bit my sister' in help_msg
assert 'i am made entirely of wood' in help_msg
def test_arg_mismatch_positional():
""" An `@arg('positional')` must match function signature.
"""
@argh.arg('bogus-argument')
def confuse_a_cat(vet, funny_things=123):
return vet, funny_things
p = DebugArghParser('PROG')
with pytest.raises(AssemblingError) as excinfo:
p.set_default_command(confuse_a_cat)
msg = ("confuse_a_cat: argument bogus-argument does not fit "
"function signature: vet, -f/--funny-things")
assert msg in str(excinfo.value)
def test_arg_mismatch_flag():
""" An `@arg('--flag')` must match function signature.
"""
@argh.arg('--bogus-argument')
def confuse_a_cat(vet, funny_things=123):
return vet, funny_things
p = DebugArghParser('PROG')
with pytest.raises(AssemblingError) as excinfo:
p.set_default_command(confuse_a_cat)
msg = ("confuse_a_cat: argument --bogus-argument does not fit "
"function signature: vet, -f/--funny-things")
assert msg in str(excinfo.value)
def test_arg_mismatch_positional_vs_flag():
""" An `@arg('arg')` must match a positional arg in function signature.
"""
@argh.arg('foo')
def func(foo=123):
return foo
p = DebugArghParser('PROG')
with pytest.raises(AssemblingError) as excinfo:
p.set_default_command(func)
msg = ('func: argument "foo" declared as optional (in function signature)'
' and positional (via decorator)')
assert msg in str(excinfo.value)
def test_arg_mismatch_flag_vs_positional():
""" An `@arg('--flag')` must match a keyword in function signature.
"""
@argh.arg('--foo')
def func(foo):
return foo
p = DebugArghParser('PROG')
with pytest.raises(AssemblingError) as excinfo:
p.set_default_command(func)
msg = ('func: argument "foo" declared as positional (in function signature)'
' and optional (via decorator)')
assert msg in str(excinfo.value)
class TestErrorWrapping:
def _get_parrot(self):
def parrot(dead=False):
if dead:
raise ValueError('this parrot is no more')
else:
return 'beautiful plumage'
return parrot
def test_error_raised(self):
parrot = self._get_parrot()
p = DebugArghParser()
p.set_default_command(parrot)
assert run(p, '') == R('beautiful plumage\n', '')
with pytest.raises(ValueError) as excinfo:
run(p, '--dead')
assert re.match('this parrot is no more', str(excinfo.value))
def test_error_wrapped(self):
parrot = self._get_parrot()
wrapped_parrot = argh.wrap_errors([ValueError])(parrot)
p = DebugArghParser()
p.set_default_command(wrapped_parrot)
assert run(p, '') == R('beautiful plumage\n', '')
assert run(p, '--dead') == R('', 'ValueError: this parrot is no more\n')
def test_processor(self):
parrot = self._get_parrot()
wrapped_parrot = argh.wrap_errors([ValueError])(parrot)
def failure(err):
return 'ERR: ' + str(err) + '!'
processed_parrot = argh.wrap_errors(processor=failure)(wrapped_parrot)
p = argh.ArghParser()
p.set_default_command(processed_parrot)
assert run(p, '--dead') == R('', 'ERR: this parrot is no more!\n')
def test_stderr_vs_stdout(self):
@argh.wrap_errors([KeyError])
def func(key):
db = {'a': 1}
return db[key]
p = argh.ArghParser()
p.set_default_command(func)
assert run(p, 'a') == R(out='1\n', err='')
assert run(p, 'b') == R(out='', err="KeyError: 'b'\n")
def test_argv():
def echo(text):
return 'you said {0}'.format(text)
p = DebugArghParser()
p.add_commands([echo])
_argv = sys.argv
sys.argv = sys.argv[:1] + ['echo', 'hi there']
assert run(p, None) == R('you said hi there\n', '')
sys.argv = _argv
def test_commands_not_defined():
p = DebugArghParser()
assert run(p, '', {'raw_output': True}).out == p.format_usage()
assert run(p, '').out == p.format_usage() + '\n'
assert 'unrecognized arguments' in run(p, 'foo', exit=True)
assert 'unrecognized arguments' in run(p, '--foo', exit=True)
def test_command_not_chosen():
def cmd(args):
return 1
p = DebugArghParser()
p.add_commands([cmd])
if sys.version_info < (3,3):
# Python before 3.3 exits with an error
assert 'too few arguments' in run(p, '', exit=True)
else:
# Python since 3.3 returns a help message and doesn't exit
assert 'usage:' in run(p, '').out
def test_invalid_choice():
def cmd(args):
return 1
# root level command
p = DebugArghParser()
p.add_commands([cmd])
assert run(p, 'bar', exit=True).startswith('invalid choice')
if sys.version_info < (3,3):
# Python before 3.3 exits with a less informative error
assert 'too few arguments' in run(p, '--bar', exit=True)
else:
# Python since 3.3 exits with a more informative error
assert run(p, '--bar', exit=True) == 'unrecognized arguments: --bar'
# nested command
p = DebugArghParser()
p.add_commands([cmd], namespace='nest')
assert run(p, 'nest bar', exit=True).startswith('invalid choice')
if sys.version_info < (3,3):
# Python before 3.3 exits with a less informative error
assert 'too few arguments' in run(p, 'nest --bar', exit=True)
else:
# Python since 3.3 exits with a more informative error
assert run(p, 'nest --bar', exit=True) == 'unrecognized arguments: --bar'
def test_unrecognized_arguments():
def cmd():
return 1
# single-command parser
p = DebugArghParser()
p.set_default_command(cmd)
assert run(p, '--bar', exit=True) == 'unrecognized arguments: --bar'
assert run(p, 'bar', exit=True) == 'unrecognized arguments: bar'
# multi-command parser
p = DebugArghParser()
p.add_commands([cmd])
assert run(p, 'cmd --bar', exit=True) == 'unrecognized arguments: --bar'
assert run(p, 'cmd bar', exit=True) == 'unrecognized arguments: bar'
def test_echo():
"A simple command is resolved to a function."
def echo(text):
return 'you said {0}'.format(text)
p = DebugArghParser()
p.add_commands([echo])
assert run(p, 'echo foo') == R(out='you said foo\n', err='')
def test_bool_action():
"Action `store_true`/`store_false` is inferred from default value."
def parrot(dead=False):
return 'this parrot is no more' if dead else 'beautiful plumage'
p = DebugArghParser()
p.add_commands([parrot])
assert run(p, 'parrot').out == 'beautiful plumage\n'
assert run(p, 'parrot --dead').out == 'this parrot is no more\n'
def test_bare_namespace():
"A command can be resolved to a function, not a namespace."
def hello():
return 'hello world'
p = DebugArghParser()
p.add_commands([hello], namespace='greet')
# without arguments
if sys.version_info < (3,3):
# Python before 3.3 exits with an error
assert run(p, 'greet', exit=True) == 'too few arguments'
else:
# Python since 3.3 returns a help message and doesn't exit
assert 'usage:' in run(p, 'greet', exit=True).out
# with an argument
if sys.version_info < (3,3):
# Python before 3.3 exits with a less informative error
message = 'too few arguments'
else:
# Python since 3.3 exits with a more informative error
message = 'unrecognized arguments: --name=world'
assert run(p, 'greet --name=world', exit=True) == message
def test_namespaced_function():
"A subcommand is resolved to a function."
def hello(name='world'):
return 'Hello {0}!'.format(name or 'world')
def howdy(buddy):
return 'Howdy {0}?'.format(buddy)
p = DebugArghParser()
p.add_commands([hello, howdy], namespace='greet')
assert run(p, 'greet hello').out == 'Hello world!\n'
assert run(p, 'greet hello --name=John').out == 'Hello John!\n'
assert run(p, 'greet hello John', exit=True) == 'unrecognized arguments: John'
if sys.version_info < (3,3):
# Python before 3.3 exits with a less informative error
message = 'too few arguments'
else:
# Python since 3.3 exits with a more informative error
message = 'the following arguments are required: buddy'
assert message in run(p, 'greet howdy --name=John', exit=True)
assert run(p, 'greet howdy John').out == 'Howdy John?\n'
def test_explicit_cmd_name():
@argh.named('new-name')
def orig_name():
return 'ok'
p = DebugArghParser()
p.add_commands([orig_name])
assert run(p, 'orig-name', exit=True).startswith('invalid choice')
assert run(p, 'new-name').out == 'ok\n'
def test_aliases():
@argh.aliases('alias2', 'alias3')
def alias1():
return 'ok'
p = DebugArghParser()
p.add_commands([alias1])
if argh.assembling.SUPPORTS_ALIASES:
assert run(p, 'alias1').out == 'ok\n'
assert run(p, 'alias2').out == 'ok\n'
assert run(p, 'alias3').out == 'ok\n'
def test_help_alias():
p = DebugArghParser()
# assert the commands don't fail
assert None == run(p, '--help', exit=True)
assert None == run(p, 'greet --help', exit=True)
assert None == run(p, 'greet hello --help', exit=True)
assert None == run(p, 'help', exit=True)
assert None == run(p, 'help greet', exit=True)
assert None == run(p, 'help greet hello', exit=True)
def test_arg_order():
"""Positional arguments are resolved in the order in which the @arg
decorators are defined.
"""
def cmd(foo, bar):
return foo, bar
p = DebugArghParser()
p.set_default_command(cmd)
assert run(p, 'foo bar').out == 'foo\nbar\n'
def test_raw_output():
"If the raw_output flag is set, no extra whitespace is added"
def cmd(foo, bar):
return foo, bar
p = DebugArghParser()
p.set_default_command(cmd)
assert run(p, 'foo bar').out == 'foo\nbar\n'
assert run(p, 'foo bar', {'raw_output': True}).out == 'foobar'
def test_output_file():
def cmd():
return 'Hello world!'
p = DebugArghParser()
p.set_default_command(cmd)
assert run(p, '').out == 'Hello world!\n'
assert run(p, '', {'output_file': None}).out == 'Hello world!\n'
def test_command_error():
def whiner_plain():
raise argh.CommandError('I feel depressed.')
def whiner_iterable():
yield 'Hello...'
raise argh.CommandError('I feel depressed.')
p = DebugArghParser()
p.add_commands([whiner_plain, whiner_iterable])
assert run(p, 'whiner-plain') == R(
out='', err='CommandError: I feel depressed.\n')
assert run(p, 'whiner-iterable') == R(
out='Hello...\n', err='CommandError: I feel depressed.\n')
def test_custom_namespace():
@argh.expects_obj
def cmd(args):
return args.custom_value
p = DebugArghParser()
p.set_default_command(cmd)
namespace = argparse.Namespace()
namespace.custom_value = 'foo'
assert run(p, '', {'namespace': namespace}).out == 'foo\n'
def test_normalized_keys():
""" Underscores in function args are converted to dashes and back.
"""
def cmd(a_b):
return a_b
p = DebugArghParser()
p.set_default_command(cmd)
assert run(p, 'hello').out == 'hello\n'
@mock.patch('argh.assembling.COMPLETION_ENABLED', True)
def test_custom_argument_completer():
"Issue #33: Enable custom per-argument shell completion"
@argh.arg('foo', completer='STUB')
def func(foo):
pass
p = argh.ArghParser()
p.set_default_command(func)
assert p._actions[-1].completer == 'STUB'
def test_class_members():
"Issue #34: class members as commands"
class Controller:
var = 123
def instance_meth(self, value):
return value, self.var
@classmethod
def class_meth(cls, value):
return value, cls.var
@staticmethod
def static_meth(value):
return value, 'w00t?'
@staticmethod
def static_meth2(value):
return value, 'huh!'
controller = Controller()
p = DebugArghParser()
p.add_commands([
controller.instance_meth,
controller.class_meth,
controller.static_meth,
Controller.static_meth2,
])
assert run(p, 'instance-meth foo').out == 'foo\n123\n'
assert run(p, 'class-meth foo').out == 'foo\n123\n'
assert run(p, 'static-meth foo').out == 'foo\nw00t?\n'
assert run(p, 'static-meth2 foo').out == 'foo\nhuh!\n'
def test_kwonlyargs():
"Correct dispatch in presence of keyword-only arguments"
if sys.version_info < (3,0):
pytest.skip('unsupported configuration')
ns = {}
exec("""def cmd(*args, foo='1', bar, baz='3', **kwargs):
return ' '.join(args), foo, bar, baz, len(kwargs)
""", None, ns)
cmd = ns['cmd']
p = DebugArghParser()
p.set_default_command(cmd)
assert (run(p, '--baz=done test this --bar=do').out ==
'test this\n1\ndo\ndone\n0\n')
if sys.version_info < (3,3):
message = 'argument --bar is required'
else:
message = 'the following arguments are required: --bar'
assert run(p, 'test --foo=do', exit=True) == message
def test_default_arg_values_in_help():
"Argument defaults should appear in the help message implicitly"
@argh.arg('name', default='Basil')
@argh.arg('--task', default='hang the Moose')
@argh.arg('--note', help='why is it a remarkable animal?')
def remind(name, task=None, reason='there are creatures living in it',
note='it can speak English'):
return "Oh what is it now, can't you leave me in peace..."
p = DebugArghParser()
p.set_default_command(remind)
assert 'Basil' in p.format_help()
assert 'Moose' in p.format_help()
assert 'creatures' in p.format_help()
# explicit help message is not obscured by the implicit one...
assert 'remarkable animal' in p.format_help()
# ...but is still present
assert 'it can speak' in p.format_help()
def test_default_arg_values_in_help__regression():
"Empty string as default value → empty help string → broken argparse"
def foo(bar=''):
return bar
p = DebugArghParser()
p.set_default_command(foo)
# doesn't break
p.format_help()
# now check details
assert "-b BAR, --bar BAR ''" in p.format_help()
# note the empty str repr ^^^
def test_help_formatting_is_preserved():
"Formatting of docstrings should not be messed up in help messages"
def func():
"""
Sample function.
Parameters:
foo: float
An example argument.
bar: bool
Another argument.
"""
return 'hello'
p = DebugArghParser()
p.set_default_command(func)
assert func.__doc__ in p.format_help()
def test_prog():
"Program name propagates from sys.argv[0]"
def cmd(foo=1):
return foo
p = DebugArghParser()
p.add_commands([cmd])
usage = get_usage_string()
with iocapture.capture() as captured:
assert run(p, '-h', exit=True) == None
assert captured.stdout.startswith(usage)
def test_unknown_args():
def cmd(foo=1):
return foo
p = DebugArghParser()
p.set_default_command(cmd)
usage = get_usage_string('[-f FOO]')
assert run(p, '--foo 1') == R(out='1\n', err='')
assert run(p, '--bar 1', exit=True) == 'unrecognized arguments: --bar 1'
assert run(p, '--bar 1', exit=False,
kwargs={'skip_unknown_args': True}) == R(out=usage, err='')
| neithere/argh | test/test_integration.py | Python | gpl-3.0 | 21,166 | [
"MOOSE"
] | 72cb057c17b038fd93a38e27c20dd6bbd04fe43f91cde01fbea1673e56cfdf0c |
# -*- coding: utf-8 -*-
"""
pygments.lexers.boa
~~~~~~~~~~~~~~~~~~~~
Lexers for the Boa language.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import *
__all__ = ['BoaLexer']
line_re = re.compile('.*?\n')
class BoaLexer(RegexLexer):
"""
Lexer for the `Boa <http://boa.cs.iastate.edu/docs/>`_ language.
.. versionadded:: 2.4
"""
name = 'Boa'
aliases = ['boa']
filenames = ['*.boa']
reserved = words(
('input', 'output', 'of', 'weight', 'before', 'after', 'stop', 'ifall', 'foreach', 'exists', 'function',
'break', 'switch', 'case', 'visitor', 'default', 'return', 'visit', 'while', 'if', 'else'),
suffix=r'\b', prefix=r'\b')
keywords = words(
('bottom', 'collection', 'maximum', 'mean', 'minimum', 'set', 'sum', 'top', 'string', 'int', 'bool', 'float',
'time', 'false', 'true', 'array', 'map', 'stack', 'enum', 'type'), suffix=r'\b', prefix=r'\b')
classes = words(
('Project', 'ForgeKind', 'CodeRepository', 'Revision', 'RepositoryKind', 'ChangedFile', 'FileKind', 'ASTRoot',
'Namespace', 'Declaration', 'Type', 'Method', 'Variable', 'Statement', 'Expression', 'Modifier',
'StatementKind', 'ExpressionKind', 'ModifierKind', 'Visibility', 'TypeKind', 'Person', 'ChangeKind'),
suffix=r'\b', prefix=r'\b')
operators = ('->', ':=', ':', '=', '<<', '!', '++', '||', '&&', '+', '-', '*', ">", "<")
string_sep = ('`', '\"')
built_in_functions = words(
(
# Array functions
'new', 'sort',
# Date & Time functions
'yearof', 'dayofyear', 'hourof', 'minuteof', 'secondof', 'now', 'addday', 'addmonth', 'addweek', 'addyear',
'dayofmonth', 'dayofweek', 'dayofyear', 'formattime', 'trunctoday', 'trunctohour', 'trunctominute',
'trunctomonth', 'trunctosecond', 'trunctoyear',
# Map functions
'clear', 'haskey', 'keys', 'lookup', 'remove', 'values',
# Math functions
'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'cos', 'cosh', 'exp', 'floor',
'highbit', 'isfinite', 'isinf', 'isnan', 'isnormal', 'log', 'log10', 'max', 'min', 'nrand', 'pow', 'rand',
'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc',
# Other functions
'def', 'hash', 'len',
# Set functions
'add', 'contains', 'remove',
# String functions
'format', 'lowercase', 'match', 'matchposns', 'matchstrs', 'regex', 'split', 'splitall', 'splitn',
'strfind', 'strreplace', 'strrfind', 'substring', 'trim', 'uppercase',
# Type Conversion functions
'bool', 'float', 'int', 'string', 'time',
# Domain-Specific functions
'getast', 'getsnapshot', 'hasfiletype', 'isfixingrevision', 'iskind', 'isliteral',
),
prefix=r'\b',
suffix=r'\(')
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(reserved, Keyword.Reserved),
(built_in_functions, Name.Function),
(keywords, Keyword.Type),
(classes, Name.Classes),
(words(operators), Operator),
(r'[][(),;{}\\.]', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String),
(r'`(\\\\|\\`|[^`])*`', String),
(words(string_sep), String.Delimeter),
(r'[a-zA-Z_]+', Name.Variable),
(r'[0-9]+', Number.Integer),
(r'\s+?', Text), # Whitespace
]
}
| unreal666/outwiker | plugins/source/source/pygments/lexers/boa.py | Python | gpl-3.0 | 3,862 | [
"VisIt"
] | ffedf7339d9f430f69b05e8fd0ff1f2f5784cb6a1507654bfd953f2c05cf9e18 |
#!/usr/bin/python
# Import standard Python modules
import os,sys
# Debug info
print 'Python version: %s' % unicode(sys.version_info)
# In order to find our custom data files, make sure that we are in the directory
# containing the executable.
oldworkingdir = os.getcwdu()
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
# Import MatPlotLib to configure key parameters
import matplotlib
matplotlib.use('Qt4Agg')
#matplotlib.rcParams['numerix'] = 'numpy'
# Now import our custom modules
import xmlstore
import core.common, core.scenario, core.result, core.report
if len(sys.argv)==1:
print \
"""
=============================================================================
GOTM-GUI batch simulation command line utility
=============================================================================
This utility allows you to process the scenario and result files of GOTM-GUI
automatically, without ever showing the GUI. It may be used to simulate a
scenario and generate a result file and/or report, or to generate a report
from an existing result.
-----------------------------------------------------------------------------
Syntax (arguments between square brackets are optional):
batch <path> [-writeresult <resultfile> [-cdf]] [-writereport <reportdir>]
[-gotmoutput]
-----------------------------------------------------------------------------
<path>
Path to an existing GOTM-GUI scenario or result. This can be a
.gotmscenario or .gotmresult file created with GOTM-GUI, or a directory
that contains the extracted contents of one of these files (.gotmscenario
and .gotmresult are actually ZIP archives). If a scenario is specified,
it will first be simulated; if a result is specified the existing data
will be used.
-writeresult <resultfile>
Specifies that a result file must be written to the path <resultfile>
-cdf
Specifies that the result must be written in NetCDF format, rather than
the GOTM-GUI .gotmresult format. Only used if -writeresult <resultfile>
is specified.
-writereport <reportdir>
Specifies that a report must be written to the directory <reportdir>.
If this directory does not exist, it will be created.
-gotmoutput
Specifies that the original output of GOTM must be shown, rather than
percentages and time remaining. Only used if a path to a scenario is
specified as first argument.
=============================================================================
"""
sys.exit(1)
# Parse command line arguments
cdf = core.common.getSwitchArgument('-cdf')
gotmoutput = core.common.getSwitchArgument('-gotmoutput')
resultpath = core.common.getNamedArgument('-writeresult')
reportpath = core.common.getNamedArgument('-writereport')
path = os.path.normpath(os.path.join(oldworkingdir, sys.argv[1]))
del sys.argv[1]
# Warn for remaining (i.e., unused) command line arguments.
if len(sys.argv)>1:
print '\n'
for arg in sys.argv[1:]:
print 'WARNING: command line argument "%s" is unknown and will be ignored.' % arg
print 'Run "batch" without arguments to see a list of accepted arguments.\n'
container = None
try:
# Open specified path as data container.
container = xmlstore.datatypes.DataContainer.fromPath(path)
try:
if core.scenario.Scenario.canBeOpened(container):
# Try to load scenario.
scen = core.scenario.Scenario.fromSchemaName(core.scenario.guiscenarioversion)
scen.loadAll(container)
res = None
elif core.result.Result.canBeOpened(container):
# Try to load result.
res = core.result.Result()
res.load(container)
scen = res.scenario.addref()
else:
raise Exception('"%s" does not contain a scenario or result.' % path)
finally:
container.release()
except Exception,e:
print 'Cannot open "%s". Error: %s' % (path,e)
sys.exit(1)
# Callback for simulation progress notifications.
def printprogress(progress,remaining):
print '%5.1f %% done, %.0f seconds remaining...' % (progress*100,remaining)
# Simulate
if res is None:
if gotmoutput:
progcallback = None
else:
progcallback = printprogress
import core.simulator
res = core.simulator.simulate(scen,progresscallback=progcallback,redirect=not gotmoutput)
if res.returncode==0:
print 'Simulation completed successfully.'
elif res.returncode==1:
print 'Simulation failed. Error: %s.\n\nGOTM output:\n%s' % (res.errormessage,res.stderr)
elif res.returncode==2:
print 'Simulation was cancelled by user.'
else:
assert False, 'GOTM simulator returned unknown code %i.' % res.returncode
if res.returncode==0:
# Write result to file, if requested.
if resultpath is not None:
resultpath = os.path.normpath(os.path.join(oldworkingdir, resultpath))
if cdf:
print 'Writing NetCDF result to "%s".' % resultpath
res.saveNetCDF(resultpath)
else:
print 'Writing result to "%s".' % resultpath
res.save(resultpath)
# Generate report, if requested.
if reportpath is not None:
def reportprogress(progress,description):
print '%5.1f %% done, %s' % (progress*100,description)
reportpath = os.path.normpath(os.path.join(oldworkingdir, reportpath))
reptemplates = core.report.Report.getTemplates()
rep = core.report.Report()
# Use report settings stored within the result (if any)
rep.store.root.copyFrom(res.store['ReportSettings'])
# Add all possible output variables
treestore = res.getVariableTree(os.path.join(core.common.getDataRoot(),'schemas/outputtree.schema'),plottableonly=True)
selroot = rep.store['Figures/Selection']
for node in treestore.root.getDescendants():
if node.canHaveValue() and not node.isHidden():
ch = selroot.addChild('VariablePath')
ch.setValue('/'.join(node.location))
treestore.unlink()
print 'Creating report in "%s".' % reportpath
rep.generate(res,reportpath,reptemplates['default'],callback=reportprogress)
rep.release()
# Clean-up
if scen is not None: scen.release()
if res is not None: res.release()
# Reset previous working directory
os.chdir(os.path.dirname(oldworkingdir))
sys.exit(res.returncode)
| BoldingBruggeman/gotm | gui.py/batch.py | Python | gpl-2.0 | 6,457 | [
"NetCDF"
] | 9809f1df7d7b64f14fb81f19bd9ff0a562356a905502525de6c2b27d51622681 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('visit', '0054_auto_20150811_1304'),
]
operations = [
migrations.CreateModel(
name='InvolvementType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.UUIDField(default=uuid.uuid4, unique=True)),
('name', models.CharField(max_length=255)),
('index', models.PositiveSmallIntegerField(default=10)),
('is_active', models.BooleanField(default=True)),
],
options={
'ordering': ('index',),
'abstract': False,
},
),
migrations.AlterModelOptions(
name='issueprek',
options={'ordering': ('index',)},
),
migrations.AlterModelOptions(
name='issueprimary',
options={'ordering': ('index',)},
),
]
| koebbe/homeworks | visit/migrations/0055_auto_20150815_0213.py | Python | mit | 1,129 | [
"VisIt"
] | a183be4bd0f5af66845b207f1985b23898db7b4e139e52470bb9a8efa7bf38c5 |
#!/usr/bin/env python
from active_worker.task import task
import yaml
import matplotlib
matplotlib.use('Agg')
import time
import os
import glob
import numpy as np
from task_types import TaskTypes as tt
import helper_functions
@task
def microcircuit_task(configuration_file,
simulation_duration,
thalamic_input,
threads):
'''
Task Manifest Version: 1
Full Name: microcircuit_task
Caption: Cortical microcircuit simulation
Author: NEST Developers
Description: |
Multi-layer microcircuit model of early sensory cortex
(Potjans, T. C., & Diesmann, M. (2014)
Cerebral Cortex 24(3):785-806, code available at
http://www.opensourcebrain.org/projects/potjansdiesmann2014),
originally implemented in NEST (http://nest-simulator.org).
PyNN version modified to run as task in the Collaboratory.
Simulation parameters are defined in microcircuit.yaml, which needs
to be passed as a configuration file. A template can be downloaded
from https://github.com/INM-6/UP-Tasks.
It is possible to provide an empty or partial configuration file.
For the missing parameters, default values will be used.
After uploading the YAML file, its content type needs to be changed
to 'application/vnd.juelich.simulation.config'. Parameters defined
in the WUI overwrite values defined in the configuration file.
For running the full model, 8 CPU cores and 15360 MB memory should
be requested.
Categories:
- NEST
Compatible_queues: ['cscs_viz', 'cscs_bgq', 'epfl_viz']
Accepts:
configuration_file:
type: application/vnd.juelich.simulation.config
description: YAML file, specifying parameters of the simulation.
Point to an empty file to use default parameters.
simulation_duration:
type: double
description: Simulation duration in ms [default=1000].
thalamic_input:
type: bool
description: If True, a transient thalamic input is applied to
the network [default=False].
threads:
type: long
description: Number of threads NEST uses [default=1].
Needs to be set to the same value as 'CPU cores'.
Returns:
res: application/vnd.juelich.bundle.nest.data
'''
# load config file provided by user
user_cfile = microcircuit_task.task.uri.get_file(configuration_file)
with open(user_cfile, 'r') as f:
user_conf = yaml.load(f)
# load default config file
default_cfile = 'microcircuit.yaml'
yaml_path = os.path.join(os.path.dirname(__file__), default_cfile)
with open(yaml_path) as f:
default_conf = yaml.load(f)
# create config by merging user and default dicts
conf = default_conf.copy()
if user_conf is not None:
conf.update(user_conf)
# update dict with parameters given in webinterface; these take
# precedence over those in the configuration file
conf['simulator_params']['nest']['sim_duration'] = simulation_duration
conf['simulator_params']['nest']['threads'] = threads
conf['thalamic_input'] = thalamic_input
plot_filename = 'spiking_activity.png'
# create bundle & export bundle, mime type for nest simulation output
my_bundle_mimetype = "application/vnd.juelich.bundle.nest.data"
bundle = microcircuit_task.task.uri.build_bundle(my_bundle_mimetype)
results = _run_microcircuit(plot_filename, conf)
# print and return bundle
print "results = ", results
for file_name, file_mimetype in results:
bundle.add_file(src_path=file_name,
dst_path=file_name,
bundle_path=file_name,
mime_type=file_mimetype)
my_bundle_name = 'microcircuit_model_bundle'
return bundle.save(my_bundle_name)
def _run_microcircuit(plot_filename, conf):
import plotting
import logging
simulator = conf['simulator']
# we here only need nest as simulator, simulator = 'nest'
import pyNN.nest as sim
# prepare simulation
logging.basicConfig()
# extract parameters from config file
master_seed = conf['params_dict']['nest']['master_seed']
layers = conf['layers']
pops = conf['pops']
plot_spiking_activity = conf['plot_spiking_activity']
raster_t_min = conf['raster_t_min']
raster_t_max = conf['raster_t_max']
frac_to_plot = conf['frac_to_plot']
record_corr = conf['params_dict']['nest']['record_corr']
tau_max = conf['tau_max']
# Numbers of neurons from which to record spikes
n_rec = helper_functions.get_n_rec(conf)
sim.setup(**conf['simulator_params'][simulator])
if simulator == 'nest':
n_vp = sim.nest.GetKernelStatus('total_num_virtual_procs')
if sim.rank() == 0:
print 'n_vp: ', n_vp
print 'master_seed: ', master_seed
sim.nest.SetKernelStatus({'print_time': False,
'dict_miss_is_error': False,
'grng_seed': master_seed,
'rng_seeds': range(master_seed + 1,
master_seed + n_vp + 1),
'data_path': conf['system_params'] \
['output_path']})
import network
# result of export-files
results = []
# create network
start_netw = time.time()
n = network.Network(sim)
# contains the GIDs of the spike detectors and voltmeters needed for
# retrieving filenames later
device_list = n.setup(sim, conf)
end_netw = time.time()
if sim.rank() == 0:
print 'Creating the network took ', end_netw - start_netw, ' s'
# simulate
if sim.rank() == 0:
print "Simulating..."
start_sim = time.time()
sim.run(conf['simulator_params'][simulator]['sim_duration'])
end_sim = time.time()
if sim.rank() == 0:
print 'Simulation took ', end_sim - start_sim, ' s'
# extract filename from device_list (spikedetector/voltmeter),
# gid of neuron and thread. merge outputs from all threads
# into a single file which is then added to the task output.
for dev in device_list:
label = sim.nest.GetStatus(dev)[0]['label']
gid = sim.nest.GetStatus(dev)[0]['global_id']
# use the file extension to distinguish between spike and voltage
# output
extension = sim.nest.GetStatus(dev)[0]['file_extension']
if extension == 'gdf': # spikes
data = np.empty((0, 2))
elif extension == 'dat': # voltages
data = np.empty((0, 3))
for thread in xrange(conf['simulator_params']['nest']['threads']):
filenames = glob.glob(conf['system_params']['output_path']
+ '%s-*%d-%d.%s' % (label, gid, thread, extension))
assert(
len(filenames) == 1), 'Multiple input files found. Use a clean output directory.'
data = np.vstack([data, np.loadtxt(filenames[0])])
# delete original files
os.remove(filenames[0])
order = np.argsort(data[:, 1])
data = data[order]
outputfile_name = 'collected_%s-%d.%s' % (label, gid, extension)
outputfile = open(outputfile_name, 'w')
# the outputfile should have same format as output from NEST.
# i.e., [int, float] for spikes and [int, float, float] for voltages,
# hence we write it line by line and assign the corresponding filetype
if extension == 'gdf': # spikes
for line in data:
outputfile.write('%d\t%.3f\n' % (line[0], line[1]))
outputfile.close()
filetype = 'application/vnd.juelich.nest.spike_times'
elif extension == 'dat': # voltages
for line in data:
outputfile.write(
'%d\t%.3f\t%.3f\n' % (line[0], line[1], line[2]))
outputfile.close()
filetype = 'application/vnd.juelich.nest.analogue_signal'
res = (outputfile_name, filetype)
results.append(res)
if record_corr and simulator == 'nest':
start_corr = time.time()
if sim.nest.GetStatus(n.corr_detector, 'local')[0]:
print 'getting count_covariance on rank ', sim.rank()
cov_all = sim.nest.GetStatus(
n.corr_detector, 'count_covariance')[0]
delta_tau = sim.nest.GetStatus(n.corr_detector, 'delta_tau')[0]
cov = {}
for target_layer in np.sort(layers.keys()):
for target_pop in pops:
target_index = conf['structure'][target_layer][target_pop]
cov[target_index] = {}
for source_layer in np.sort(layers.keys()):
for source_pop in pops:
source_index = conf['structure'][
source_layer][source_pop]
cov[target_index][source_index] = \
np.array(list(
cov_all[target_index][source_index][::-1])
+ list(cov_all[source_index][target_index][1:]))
f = open(conf['system_params'][
'output_path'] + '/covariances.dat', 'w')
print >>f, 'tau_max: ', tau_max
print >>f, 'delta_tau: ', delta_tau
print >>f, 'simtime: ', conf['simulator_params'][
simulator]['sim_duration'], '\n'
for target_layer in np.sort(layers.keys()):
for target_pop in pops:
target_index = conf['structure'][target_layer][target_pop]
for source_layer in np.sort(layers.keys()):
for source_pop in pops:
source_index = conf['structure'][
source_layer][source_pop]
print >>f, target_layer, target_pop, '-', source_layer, source_pop
print >>f, 'n_events_target: ', sim.nest.GetStatus(
n.corr_detector, 'n_events')[0][target_index]
print >>f, 'n_events_source: ', sim.nest.GetStatus(
n.corr_detector, 'n_events')[0][source_index]
for i in xrange(len(cov[target_index][source_index])):
print >>f, cov[target_index][source_index][i]
print >>f, ''
f.close()
# add file covariances.dat into bundle
res_cov = ('covariances.dat',
'text/plain')
results.append(res_cov)
end_corr = time.time()
print "Writing covariances took ", end_corr - start_corr, " s"
if plot_spiking_activity and sim.rank() == 0:
plotting.plot_raster_bars(raster_t_min, raster_t_max, n_rec,
frac_to_plot, n.pops,
conf['system_params']['output_path'],
plot_filename, conf)
res_plot = (plot_filename, 'image/png')
results.append(res_plot)
sim.end()
return results
if __name__ == '__main__':
configuration_file = 'user_config.yaml' #'microcircuit.yaml'
simulation_duration = 1000.
thalamic_input = True
threads = 4
filename = tt.URI(
'application/vnd.juelich.simulation.config', configuration_file)
result = microcircuit_task(
filename, simulation_duration, thalamic_input, threads)
print result
| INM-6/UP-Tasks | NEST/microcircuit_task/microcircuit_task.py | Python | gpl-2.0 | 12,026 | [
"NEURON"
] | fd56b77e063fbc94c8ad8e65d8c14ff7fa9585bebf7f292e03333afa0350bca7 |
# AvsP - an AviSynth editor
# Copyright 2007 Peter Jang
# http://avisynth.nl/users/qwerpoi
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
# http://www.gnu.org/copyleft/gpl.html .
# pyavi - AVI functions in Python
# Dependencies:
# Python (tested with v2.4.2)
# ctypes (tested with v0.9.6) - note that ctypes is included with Python 2.5+
import ctypes
import sys
import os
import codecs
encoding = sys.getfilesystemencoding()
# Define C types and constants
DWORD = ctypes.c_ulong
UINT = ctypes.c_uint
WORD = ctypes.c_ushort
LONG = ctypes.c_long
BYTE = ctypes.c_byte
CHAR = ctypes.c_char
HANDLE = ctypes.c_ulong
NULL = 0
streamtypeVIDEO = DWORD(1935960438)
OF_READ = UINT(0)
BI_RGB = 0
GENERIC_WRITE = 0x40000000L
CREATE_ALWAYS = 2
FILE_ATTRIBUTE_NORMAL = 0x00000080
try: _
except NameError:
def _(s): return s
# Define C structures
class RECT(ctypes.Structure):
_fields_ = [("left", LONG),
("top", LONG),
("right", LONG),
("bottom", LONG)]
class BITMAPINFOHEADER(ctypes.Structure):
_fields_ = [("biSize", DWORD),
("biWidth", LONG),
("biHeight", LONG),
("biPlanes", WORD),
("biBitCount", WORD),
("biCompression", DWORD),
("biSizeImage", DWORD),
("biXPelsPerMeter", LONG),
("biYPelsPerMeter", LONG),
("biClrUsed", DWORD),
("biClrImportant", DWORD)]
class RGBQUAD(ctypes.Structure):
_fields_ = [("rgbBlue", BYTE),
("rgbGreen", BYTE),
("rgbRed", BYTE),
("rgbReserved", BYTE)]
class BITMAPINFO(ctypes.Structure):
_fields_ = [("bmiHeader", BITMAPINFOHEADER),
("bmiColors", RGBQUAD)]
class BITMAPFILEHEADER(ctypes.Structure):
_fields_ = [
("bfType", WORD),
("bfSize", DWORD),
("bfReserved1", WORD),
("bfReserved2", WORD),
("bfOffBits", DWORD)]
class AVISTREAMINFO(ctypes.Structure):
_fields_ = [("fccType", DWORD),
("fccHandler", DWORD),
("dwFlags", DWORD),
("dwCaps", DWORD),
("wPriority", WORD),
("wLanguage", WORD),
("dwScale", DWORD),
("dwRate", DWORD),
("dwStart", DWORD),
("dwLength", DWORD),
("dwInitialFrames", DWORD),
("dwSuggestedBufferSize", DWORD),
("dwQuality", DWORD),
("dwSampleSize", DWORD),
("rcFrame", RECT),
("dwEditCount", DWORD),
("dwFormatChangeCount", DWORD),
("szName", CHAR * 64)]
# Define C functions
AVIFileInit = ctypes.windll.avifil32.AVIFileInit
try:
AVIStreamOpenFromFileA = ctypes.windll.avifil32.AVIStreamOpenFromFileA
AVIStreamOpenFromFileW = ctypes.windll.avifil32.AVIStreamOpenFromFileW
except AttributeError:
AVIStreamOpenFromFileA = ctypes.windll.avifil32.AVIStreamOpenFromFile
AVIStreamInfo = ctypes.windll.avifil32.AVIStreamInfo
AVIStreamReadFormat = ctypes.windll.avifil32.AVIStreamReadFormat
AVIStreamGetFrameOpen = ctypes.windll.avifil32.AVIStreamGetFrameOpen
AVIStreamGetFrameClose = ctypes.windll.avifil32.AVIStreamGetFrameClose
AVIStreamRelease = ctypes.windll.avifil32.AVIStreamRelease
AVIFileRelease = ctypes.windll.avifil32.AVIFileRelease
AVIFileExit = ctypes.windll.avifil32.AVIFileExit
AVIStreamGetFrame = ctypes.windll.avifil32.AVIStreamGetFrame
SetDIBitsToDevice = ctypes.windll.gdi32.SetDIBitsToDevice
CreateFile = ctypes.windll.kernel32.CreateFileA
WriteFile = ctypes.windll.kernel32.WriteFile
CloseHandle = ctypes.windll.kernel32.CloseHandle
DrawDibOpen = ctypes.windll.msvfw32.DrawDibOpen
DrawDibClose = ctypes.windll.msvfw32.DrawDibClose
DrawDibDraw = ctypes.windll.msvfw32.DrawDibDraw
handleDib = [None]
def InitRoutines():
AVIFileInit()
handleDib[0] = DrawDibOpen()
def ExitRoutines():
AVIFileExit()
DrawDibClose(handleDib[0])
def MakePreviewScriptFile(script, filename):
# Construct the filename of the temporary avisynth script
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname) or not os.access(dirname, os.W_OK):
dirname = os.getcwd()
previewname = os.path.join(dirname, 'preview.avs')
i = 1
while os.path.exists(previewname):
previewname = os.path.join(dirname, 'preview%i.avs' % i)
i = i+1
# Write the file
try:
f = open(previewname,'w')
f.write(script)
f.close()
except UnicodeEncodeError:
f = codecs.open(previewname, 'w', encoding)
f.write(script)
f.close()
return previewname
class AvsClip:
def __init__(self, script, filename='', fitHeight=None, fitWidth=None, oldFramecount=None, keepRaw=False):
self.initialized = False
self.error_message = None
self.current_frame = -1
self.pvidstream = LONG() # = PAVISTREAM()
self.bmih = BITMAPINFOHEADER()
self.pgf = LONG()
self.pBits = None
self.pInfo = None
psi = AVISTREAMINFO()
# Avisynth script properties
self.Width = -1
self.Height = -1
self.Framecount = -1
self.Framerate = -1.0
self.FramerateNumerator = -1
self.FramerateDenominator = -1
self.Audiorate = -1.0
self.Audiolength = -1
#~ self.AudiolengthF = None
self.Audiochannels = -1
self.Audiobits = -1
self.IsAudioFloat = None
self.IsAudioInt = None
self.IsRGB = None
self.IsRGB24 = None
self.IsRGB32 = None
self.IsYUY2 = None
self.IsYV12 = None
self.IsYUV = None
self.IsPlanar = None
self.IsInterleaved = None
self.IsFieldBased = None
self.IsFrameBased = None
self.GetParity = None
self.HasAudio = None
self.HasVideo = None
self.Colorspace = 'RGB32'
# Open the avi file
previewname = MakePreviewScriptFile(script, filename)
AVIStreamOpenFromFile = AVIStreamOpenFromFileA
if type(previewname) == type(u''):
try:
AVIStreamOpenFromFile = AVIStreamOpenFromFileW
except NameError:
pass
if (AVIStreamOpenFromFile(ctypes.byref(self.pvidstream), previewname, streamtypeVIDEO, 0, OF_READ, NULL)!=0):
if __debug__:
print>>sys.stderr, _("Failed to open the AVI file")
#~ print>>sys.stderr, filename
#~ AVIFileExit()
return
else:
if __debug__:
print "AVI file opened successfully"
pass
# Read basic data from the avi file
AVIStreamInfo(self.pvidstream, ctypes.byref(psi), ctypes.sizeof(psi))
self.Framecount = psi.dwLength
self.Width = psi.rcFrame.right-psi.rcFrame.left
self.Height = psi.rcFrame.bottom-psi.rcFrame.top
self.WidthActual, self.HeightActual = self.Width, self.Height
self.Framerate = psi.dwRate/(psi.dwScale+0.0)
if fitHeight is not None:
fitWidthTemp = int(round(fitHeight * (self.Width/float(self.Height))))
if fitWidth is None:
fitWidth = fitWidthTemp
elif fitWidthTemp > fitWidth:
fitHeight = int(round(fitWidth * (self.Height/float(self.Width))))
else:
fitWidth = fitWidthTemp
if fitHeight >= 4 and fitWidth >= 4:
resizeScript = 'Import("%s").ConvertToRGB().BicubicResize(%i,%i)' % (previewname, fitWidth, fitHeight)
previewname2 = MakePreviewScriptFile(resizeScript, filename)
AVIStreamRelease(self.pvidstream)
if (AVIStreamOpenFromFile(ctypes.byref(self.pvidstream), previewname2, streamtypeVIDEO, 0, OF_READ, NULL)!=0):
if __debug__:
print>>sys.stderr, _("Failed to open the AVI file")
return
else:
if __debug__:
print "AVI file opened successfully"
pass
# Set internal width and height variables appropriately
self.Width, self.Height = fitWidth, fitHeight
os.remove(previewname2)
# Define the desired image format
self.bmih.biSize = ctypes.sizeof(BITMAPINFOHEADER)
self.bmih.biPlanes = 1
self.bmih.biBitCount = 24
self.bmih.biWidth = self.Width
self.bmih.biHeight = self.Height
self.bmih.biCompression = BI_RGB
self.bmih.biSizeImage = 0
self.bmih.biClrUsed = 0
self.bmih.biClrImportant = 0
# Change desired format to 32 bit (RGBA) if necessary
bmihtemp = BITMAPINFOHEADER()
bmihtemp_size = LONG(ctypes.sizeof(bmihtemp))
AVIStreamReadFormat(self.pvidstream,0,ctypes.byref(bmihtemp),ctypes.byref(bmihtemp_size))
if(bmihtemp.biBitCount==32):
self.bmih.biBitCount = 32
# Open the video stream
self.pgf = AVIStreamGetFrameOpen(self.pvidstream,ctypes.byref(self.bmih))
if self.pgf==-1:
AVIStreamRelease(self.pvidstream)
if __debug__:
print>>sys.stderr, _("Failed to open the AVI frame")
#~ AVIFileExit()
return
else:
if __debug__:
print "AVI frame opened successfully"
pass
self.AVIStreamGetFrameClose = AVIStreamGetFrameClose
self.AVIStreamRelease = AVIStreamRelease
self.AVIFileRelease = AVIFileRelease
#~ self.AVIFileExit = AVIFileExit
self.initialized = True
os.remove(previewname)
def __del__(self):
if self.initialized:
if __debug__:
print "Deleting allocated video memory..."
self.AVIStreamGetFrameClose(self.pgf)
self.AVIStreamRelease(self.pvidstream)
def _GetFrame(self, frame):
if self.initialized:
if(frame<0):
frame = 0
if(frame>=self.Framecount):
frame = self.Framecount-1
try:
self.lpbi = AVIStreamGetFrame(self.pgf, frame) #Grab Data From The AVI Stream
except WindowsError:
print>>sys.stderr, _("Failed to retrieve AVI frame")
return False
self.pInfo = LONG(self.lpbi)
self.pBits = LONG(self.lpbi + self.bmih.biSize + self.bmih.biClrUsed * ctypes.sizeof(RGBQUAD))
return True
else:
return False
def DrawFrame(self, frame, dc=None, offset=(0,0), size=None):
if not self._GetFrame(frame):
return
if dc:
hdc = dc.GetHDC()
if size is None:
w = self.Width
h = self.Height
else:
w, h = size
#~ SetDIBitsToDevice(hdc, offset[0], offset[1], w, h, 0, 0, 0, h, self.pBits, self.pInfo, 0)
DrawDibDraw(handleDib[0], dc, offset[0], offset[1], w, h, self.pInfo, self.pBits, 0, 0, -1, -1, 0)
def GetPixelYUV(self, x, y):
return (-1,-1,-1)
def GetPixelRGB(self, x, y):
return (-1,-1,-1)
def GetPixelRGBA(self, x, y):
return (-1,-1,-1,-1)
def GetVarType(self, strVar):
return 'unknown'
def IsErrorClip(self):
return self.error_message is not None
def _x_SaveFrame(self, filename, frame=None):
# Get the frame to display
if frame == None:
if self.pInfo == None or self.pBits == None:
self._GetFrame(0)
else:
self._GetFrame(frame)
# Create the file for writing
buffer = ctypes.create_string_buffer(filename)
hFile = CreateFile(
ctypes.byref(buffer),
GENERIC_WRITE,
0,
NULL,
CREATE_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
NULL
)
# Write the bitmap file header
fileheadersize = 14
bmpheadersize = 40
extrabytes = (4 - self.bmih.biWidth % 4) % 4
widthPadded = self.bmih.biWidth + extrabytes
bitmapsize = (widthPadded * self.bmih.biHeight * self.bmih.biBitCount) / 8
bfType = WORD(0x4d42)
bfSize = DWORD(fileheadersize + bmpheadersize + bitmapsize)
bfReserved1 = WORD(0)
bfReserved2 = WORD(0)
bfOffBits = DWORD(fileheadersize + bmpheadersize)
dwBytesWritten = DWORD()
WriteFile(
hFile,
ctypes.byref(bfType),
2,
ctypes.byref(dwBytesWritten),
NULL
)
WriteFile(
hFile,
ctypes.byref(bfSize),
4,
ctypes.byref(dwBytesWritten),
NULL
)
WriteFile(
hFile,
ctypes.byref(bfReserved1),
2,
ctypes.byref(dwBytesWritten),
NULL
)
WriteFile(
hFile,
ctypes.byref(bfReserved2),
2,
ctypes.byref(dwBytesWritten),
NULL
)
WriteFile(
hFile,
ctypes.byref(bfOffBits),
4,
ctypes.byref(dwBytesWritten),
NULL
)
# Write the bitmap info header and (unused) color table
WriteFile(
hFile,
self.pInfo,
bmpheadersize, #(self.bmih.biSize + self.bmih.biClrUsed * ctypes.sizeof(RGBQUAD)), # + bitmapsize),
ctypes.byref(dwBytesWritten),
NULL
)
# Write the bitmap bits
WriteFile(
hFile,
self.pBits,
bitmapsize,
ctypes.byref(dwBytesWritten),
NULL
)
CloseHandle(hFile)
if __name__ == '__main__':
AVI = PyAVIFile("D:\\test.avs")
AVI.SaveFrame("D:\\test_save_frame.bmp", 100)
print "Exit program."
| AvsPmod/AvsPmod | pyavs_avifile.py | Python | gpl-2.0 | 15,204 | [
"VisIt"
] | 7b6af904f5d2b62f63eee0c3482dec46f93df0c7c419cdb26aa5da8fcedc473a |
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from .ChiggerResult import ChiggerResult
class ResultGroup(ChiggerResult):
"""
An object for containing multiple ChiggerResult objects that share a renderer.
"""
@staticmethod
def getOptions():
opt = ChiggerResult.getOptions()
return opt
def __init__(self, **kwargs):
super(ResultGroup, self).__init__(**kwargs)
self._results = []
def getResults(self):
"""
Return a list of ChiggerResult objects.
"""
return self._results
def __iter__(self):
"""
Provide iterator access to the readers.
"""
for result in self._results:
yield result
def __getitem__(self, index):
"""
Provide operator[] access to the readers.
"""
return self._results[index]
def update(self, **kwargs):
"""
Call update on all children.
"""
pass
def setOptions(self, *args, **kwargs):
"""
Apply options to all results.
"""
for result in self._results:
result.setOptions(*args, **kwargs)
def needsUpdate(self):
"""
Check if the group needs to be updated.
"""
return any([result.needsUpdate() for result in self._results])
def add(self, result, *args, **kwargs):
"""
Adds a new ChiggerResult object.
Args:
result: A ChiggerResult class (not instance) to create.
args: The arguments to pass into the class.
kwargs: Key, value pairs to pass into the class.
"""
kwargs.setdefault('renderer', self.getVTKRenderer())
self._results.append(result(*args, **kwargs))
| nuclear-wizard/moose | python/chigger/base/ResultGroup.py | Python | lgpl-2.1 | 2,059 | [
"MOOSE"
] | cec0fb77e0e42b8169c86a8ceb26e959f2fd5b611c2c5442bfcc3f3baf000b4f |
import numpy as np
import mdtraj as md
from mdtraj.testing import eq
random = np.random.RandomState(0)
def compute_neighbors_reference(traj, cutoff, query_indices, haystack_indices=None):
if haystack_indices is None:
haystack_indices = range(traj.n_atoms)
# explicitly enumerate the pairs of query-haystack indices we need to
# check
pairs = np.array([(q, i) for i in haystack_indices for q in query_indices if i != q])
dists = md.compute_distances(traj, pairs)
# some of the haystack might be within cutoff of more than one of the
# query atoms, so we need unique
reference = [np.unique(pairs[dists[i] < cutoff, 1]) for i in range(traj.n_frames)]
return reference
def test_compute_neighbors_1():
n_frames = 2
n_atoms = 20
cutoff = 2
xyz = random.randn(n_frames, n_atoms, 3)
traj = md.Trajectory(xyz=xyz, topology=None)
query_indices = [0, 1]
value = md.compute_neighbors(traj, cutoff, query_indices)
reference = compute_neighbors_reference(traj, cutoff, query_indices)
for i in range(n_frames):
eq(value[i], reference[i])
def test_compute_neighbors_2(get_fn):
traj = md.load(get_fn('4ZUO.pdb'))
query_indices = traj.top.select('residue 1')
cutoff = 1.0
value = md.compute_neighbors(traj, cutoff, query_indices)
reference = compute_neighbors_reference(traj, cutoff, query_indices)
for i in range(traj.n_frames):
eq(value[i], reference[i])
def test_compute_neighbors_3(get_fn):
traj = md.load(get_fn('test_good.nc'), top=get_fn('test.parm7'))
query_indices = traj.top.select('residue 1')
cutoff = 1.0
value = md.compute_neighbors(traj, cutoff, query_indices)
reference = compute_neighbors_reference(traj, cutoff, query_indices)
for i in range(traj.n_frames):
eq(value[i], reference[i])
| dwhswenson/mdtraj | tests/test_neighbors.py | Python | lgpl-2.1 | 1,848 | [
"MDTraj"
] | 7a7d790e52e732105ec932a911dcf86a7fff4fac135f6fdb40ae038229729d3f |
#!/usr/bin/env python
import sys
import os
import os.path
if len(sys.argv)!=2:
print("Usage:", sys.argv[0], "simu_directory")
sys.exit(0)
import numpy as np
from io import StringIO
# Open lammps log file to extract thermodynamic observables
logfile = open(os.path.join(os.getcwd(),sys.argv[1], 'log.lammps')).readlines()
start_indices = [(i,l) for (i,l) in enumerate(logfile) if l.startswith('Step Time Temp')]
stop_indices = [(i,l) for (i,l) in enumerate(logfile) if l.startswith('Loop time')]
def from_log(idx=-1):
i0 = start_indices[idx][0]
i1 = stop_indices[idx][0]
return np.loadtxt(StringIO(u''.join(logfile[i0+1:i1])), unpack=True)
time, step, temp, e_kin, e_vdw, press, vol, rho = from_log()
target = 0.85
T_av = temp.mean()
T_std = temp.std()
print("Temperature: {} +/- {}".format(T_av, T_std))
if (T_av-target)**2>0.002**2:
print("Temperature too far off")
sys.exit(1)
else:
print("Temperature OK")
sys.exit(0)
| pdebuyl/ljrr | check_T.py | Python | bsd-3-clause | 966 | [
"LAMMPS"
] | f101d283c15fad6f993cba9ccfc0391959c6706e41c8844dbfc72fa5cbd0ad0c |
"""
Tests the forum notification views.
"""
import json
import logging
from datetime import datetime
from unittest.mock import ANY, Mock, call, patch
import ddt
import pytest
from django.http import Http404
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import translation
from edx_django_utils.cache import RequestCache
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.roles import CourseStaffRole, UserBasedRole
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from common.djangoapps.util.testing import EventTestMixin, UrlResetMixin
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from lms.djangoapps.discussion import views
from lms.djangoapps.discussion.django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY
from lms.djangoapps.discussion.django_comment_client.permissions import get_team
from lms.djangoapps.discussion.django_comment_client.tests.group_id import (
CohortedTopicGroupIdTestMixin,
GroupIdAssertionMixin,
NonCohortedTopicGroupIdTestMixin
)
from lms.djangoapps.discussion.django_comment_client.tests.unicode import UnicodeTestMixin
from lms.djangoapps.discussion.django_comment_client.tests.utils import (
CohortedTestCase,
ForumsEnableMixin,
config_course_discussions,
topic_name_to_id
)
from lms.djangoapps.discussion.django_comment_client.utils import strip_none
from lms.djangoapps.discussion.views import _get_discussion_default_topic_id, course_discussions_settings_handler
from lms.djangoapps.teams.tests.factories import CourseTeamFactory, CourseTeamMembershipFactory
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts
from openedx.core.djangoapps.course_groups.tests.test_views import CohortViewsTestCase
from openedx.core.djangoapps.django_comment_common.comment_client.utils import CommentClientPaginatedResult
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_STUDENT,
CourseDiscussionSettings,
ForumsConfig
)
from openedx.core.djangoapps.django_comment_common.utils import ThreadContext, seed_permissions_roles
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from openedx.core.djangoapps.waffle_utils.testutils import WAFFLE_TABLES
from openedx.core.lib.teams_config import TeamsConfig
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
from openedx.features.enterprise_support.tests.mixins.enterprise import EnterpriseTestConsentRequired
from xmodule.modulestore import ModuleStoreEnum # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.django_utils import ( # lint-amnesty, pylint: disable=wrong-import-order
TEST_DATA_MONGO_MODULESTORE,
ModuleStoreTestCase,
SharedModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls # lint-amnesty, pylint: disable=wrong-import-order
log = logging.getLogger(__name__)
QUERY_COUNT_TABLE_BLACKLIST = WAFFLE_TABLES
class ViewsExceptionTestCase(UrlResetMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super().setUp()
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('common.djangoapps.student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
password = 'test'
# Create the student
self.student = UserFactory(username=uname, password=password, email=email)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
# Log the student in
self.client = Client()
assert self.client.login(username=uname, password=password)
config = ForumsConfig.current()
config.enabled = True
config.save()
@patch('common.djangoapps.student.models.cc.User.from_django_user')
@patch('common.djangoapps.student.models.cc.User.active_threads')
def test_user_profile_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = [], 1, 1
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('user_profile',
kwargs={'course_id': str(self.course.id), 'user_id': '12345'}) # There is no user 12345
response = self.client.get(url)
assert response.status_code == 404
@patch('common.djangoapps.student.models.cc.User.from_django_user')
@patch('common.djangoapps.student.models.cc.User.subscribed_threads')
def test_user_followed_threads_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = CommentClientPaginatedResult(collection=[], page=1, num_pages=1)
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('followed_threads',
kwargs={'course_id': str(self.course.id), 'user_id': '12345'}) # There is no user 12345
response = self.client.get(url)
assert response.status_code == 404
def make_mock_thread_data( # lint-amnesty, pylint: disable=missing-function-docstring
course,
text,
thread_id,
num_children,
group_id=None,
group_name=None,
commentable_id=None,
is_commentable_divided=None,
):
data_commentable_id = (
commentable_id or course.discussion_topics.get('General', {}).get('id') or "dummy_commentable_id"
)
thread_data = {
"id": thread_id,
"type": "thread",
"title": text,
"body": text,
"commentable_id": data_commentable_id,
"resp_total": 42,
"resp_skip": 25,
"resp_limit": 5,
"group_id": group_id,
"context": (
ThreadContext.COURSE if get_team(data_commentable_id) is None else ThreadContext.STANDALONE
)
}
if group_id is not None:
thread_data['group_name'] = group_name
if is_commentable_divided is not None:
thread_data['is_commentable_divided'] = is_commentable_divided
if num_children is not None:
thread_data["children"] = [{
"id": f"dummy_comment_id_{i}",
"type": "comment",
"body": text,
} for i in range(num_children)]
return thread_data
def make_mock_collection_data( # lint-amnesty, pylint: disable=missing-function-docstring
course,
text,
thread_id,
num_children=None,
group_id=None,
commentable_id=None,
thread_list=None
):
if thread_list:
return [
make_mock_thread_data(course=course, text=text, num_children=num_children, **thread)
for thread in thread_list
]
else:
return [
make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_children,
group_id=group_id,
commentable_id=commentable_id,
)
]
def make_mock_perform_request_impl( # lint-amnesty, pylint: disable=missing-function-docstring
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
thread_list=None
):
def mock_perform_request_impl(*args, **kwargs):
url = args[1]
if url.endswith("threads") or url.endswith("user_profile"):
return {
"collection": make_mock_collection_data(
course, text, thread_id, None, group_id, commentable_id, thread_list
)
}
elif thread_id and url.endswith(thread_id):
return make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_thread_responses,
group_id=group_id,
commentable_id=commentable_id
)
elif "/users/" in url:
res = {
"default_sort_key": "date",
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
# comments service adds these attributes when course_id param is present
if kwargs.get('params', {}).get('course_id'):
res.update({
"threads_count": 1,
"comments_count": 2
})
return res
else:
return None
return mock_perform_request_impl
def make_mock_request_impl( # lint-amnesty, pylint: disable=missing-function-docstring
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
thread_list=None,
):
impl = make_mock_perform_request_impl(
course,
text,
thread_id=thread_id,
group_id=group_id,
commentable_id=commentable_id,
num_thread_responses=num_thread_responses,
thread_list=thread_list
)
def mock_request_impl(*args, **kwargs):
data = impl(*args, **kwargs)
if data:
return Mock(status_code=200, text=json.dumps(data), json=Mock(return_value=data))
else:
return Mock(status_code=404)
return mock_request_impl
class StringEndsWithMatcher: # lint-amnesty, pylint: disable=missing-class-docstring
def __init__(self, suffix):
self.suffix = suffix
def __eq__(self, other):
return other.endswith(self.suffix)
class PartialDictMatcher: # lint-amnesty, pylint: disable=missing-class-docstring
def __init__(self, expected_values):
self.expected_values = expected_values
def __eq__(self, other):
return all(
key in other and other[key] == value
for key, value in self.expected_values.items()
)
@patch('requests.request', autospec=True)
class SingleThreadTestCase(ForumsEnableMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
CREATE_USER = False
def setUp(self):
super().setUp()
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def test_ajax(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
str(self.course.id),
"dummy_discussion_id",
"test_thread_id"
)
assert response.status_code == 200
response_data = json.loads(response.content.decode('utf-8'))
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
assert response_data['content'] == strip_none(make_mock_thread_data(
course=self.course,
text=text,
thread_id=thread_id,
num_children=1
))
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({"mark_as_read": True, "user_id": 1, "recursive": True}),
headers=ANY,
timeout=ANY
)
def test_skip_limit(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
response_skip = "45"
response_limit = "15"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
{"resp_skip": response_skip, "resp_limit": response_limit},
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
str(self.course.id),
"dummy_discussion_id",
"test_thread_id"
)
assert response.status_code == 200
response_data = json.loads(response.content.decode('utf-8'))
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
assert response_data['content'] == strip_none(make_mock_thread_data(
course=self.course,
text=text,
thread_id=thread_id,
num_children=1
))
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({
"mark_as_read": True,
"user_id": 1,
"recursive": True,
"resp_skip": response_skip,
"resp_limit": response_limit,
}),
headers=ANY,
timeout=ANY
)
def test_post(self, _mock_request):
request = RequestFactory().post("dummy_url")
response = views.single_thread(
request,
str(self.course.id),
"dummy_discussion_id",
"dummy_thread_id"
)
assert response.status_code == 405
def test_not_found(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
# Mock request to return 404 for thread request
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id=None)
self.assertRaises(
Http404,
views.single_thread,
request,
str(self.course.id),
"test_discussion_id",
"test_thread_id"
)
def test_private_team_thread_html(self, mock_request):
discussion_topic_id = 'dummy_discussion_id'
thread_id = 'test_thread_id'
CourseTeamFactory.create(discussion_topic_id=discussion_topic_id)
user_not_in_team = UserFactory.create()
CourseEnrollmentFactory.create(user=user_not_in_team, course_id=self.course.id)
self.client.login(username=user_not_in_team.username, password='test')
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy",
thread_id=thread_id,
commentable_id=discussion_topic_id
)
with patch('lms.djangoapps.teams.api.is_team_discussion_private', autospec=True) as mocked:
mocked.return_value = True
response = self.client.get(
reverse('single_thread', kwargs={
'course_id': str(self.course.id),
'discussion_id': discussion_topic_id,
'thread_id': thread_id,
})
)
assert response.status_code == 200
assert response['Content-Type'] == 'text/html; charset=utf-8'
html = response.content.decode('utf-8')
# Verify that the access denied error message is in the HTML
assert 'This is a private discussion. You do not have permissions to view this discussion' in html
class AllowPlusOrMinusOneInt(int):
"""
A workaround for the fact that assertNumQueries doesn't let you
specify a range or any tolerance. An 'int' that is 'equal to' its value,
but also its value +/- 1
"""
def __init__(self, value):
super().__init__()
self.value = value
self.values = (value, value - 1, value + 1)
def __eq__(self, other):
return other in self.values
def __repr__(self):
return f"({self.value} +/- 1)"
@ddt.ddt
@patch('requests.request', autospec=True)
class SingleThreadQueryCountTestCase(ForumsEnableMixin, ModuleStoreTestCase):
"""
Ensures the number of modulestore queries and number of sql queries are
independent of the number of responses retrieved for a given discussion thread.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
@ddt.data(
# Old mongo with cache. There is an additional SQL query for old mongo
# because the first time that disabled_xblocks is queried is in call_single_thread,
# vs. the creation of the course (CourseFactory.create). The creation of the
# course is outside the context manager that is verifying the number of queries,
# and with split mongo, that method ends up querying disabled_xblocks (which is then
# cached and hence not queried as part of call_single_thread).
(ModuleStoreEnum.Type.mongo, False, 1, 5, 2, 21, 7),
(ModuleStoreEnum.Type.mongo, False, 50, 5, 2, 21, 7),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, False, 1, 2, 2, 21, 8),
(ModuleStoreEnum.Type.split, False, 50, 2, 2, 21, 8),
# Enabling Enterprise integration should have no effect on the number of mongo queries made.
(ModuleStoreEnum.Type.mongo, True, 1, 5, 2, 21, 7),
(ModuleStoreEnum.Type.mongo, True, 50, 5, 2, 21, 7),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, True, 1, 2, 2, 21, 8),
(ModuleStoreEnum.Type.split, True, 50, 2, 2, 21, 8),
)
@ddt.unpack
def test_number_of_mongo_queries(
self,
default_store,
enterprise_enabled,
num_thread_responses,
num_uncached_mongo_calls,
num_cached_mongo_calls,
num_uncached_sql_queries,
num_cached_sql_queries,
mock_request
):
ContentTypeGatingConfig.objects.create(enabled=True, enabled_as_of=datetime(2018, 1, 1))
with modulestore().default_store(default_store):
course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=course.id)
test_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=course, text="dummy content", thread_id=test_thread_id, num_thread_responses=num_thread_responses
)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = student
def call_single_thread():
"""
Call single_thread and assert that it returns what we expect.
"""
with patch.dict("django.conf.settings.FEATURES", dict(ENABLE_ENTERPRISE_INTEGRATION=enterprise_enabled)):
response = views.single_thread(
request,
str(course.id),
"dummy_discussion_id",
test_thread_id
)
assert response.status_code == 200
assert len(json.loads(response.content.decode('utf-8'))['content']['children']) == num_thread_responses
# Test uncached first, then cached now that the cache is warm.
cached_calls = [
[num_uncached_mongo_calls, num_uncached_sql_queries],
# Sometimes there will be one more or fewer sql call than expected, because the call to
# CourseMode.modes_for_course sometimes does / doesn't get cached and does / doesn't hit the DB.
# EDUCATOR-5167
[num_cached_mongo_calls, AllowPlusOrMinusOneInt(num_cached_sql_queries)],
]
for expected_mongo_calls, expected_sql_queries in cached_calls:
with self.assertNumQueries(expected_sql_queries, table_blacklist=QUERY_COUNT_TABLE_BLACKLIST):
with check_mongo_calls(expected_mongo_calls):
call_single_thread()
@patch('requests.request', autospec=True)
class SingleCohortedThreadTestCase(CohortedTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def _create_mock_cohorted_thread(self, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring
mock_text = "dummy content"
mock_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=mock_text,
thread_id=mock_thread_id,
group_id=self.student_cohort.id,
commentable_id="cohorted_topic",
)
return mock_text, mock_thread_id
def test_ajax(self, mock_request):
mock_text, mock_thread_id = self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
str(self.course.id),
"cohorted_topic",
mock_thread_id
)
assert response.status_code == 200
response_data = json.loads(response.content.decode('utf-8'))
assert response_data['content'] == make_mock_thread_data(
course=self.course,
commentable_id='cohorted_topic',
text=mock_text,
thread_id=mock_thread_id,
num_children=1,
group_id=self.student_cohort.id,
group_name=self.student_cohort.name,
is_commentable_divided=True
)
def test_html(self, mock_request):
_mock_text, mock_thread_id = self._create_mock_cohorted_thread(mock_request)
self.client.login(username=self.student.username, password='test')
response = self.client.get(
reverse('single_thread', kwargs={
'course_id': str(self.course.id),
'discussion_id': "cohorted_topic",
'thread_id': mock_thread_id,
})
)
assert response.status_code == 200
assert response['Content-Type'] == 'text/html; charset=utf-8'
html = response.content.decode('utf-8')
# Verify that the group name is correctly included in the HTML
self.assertRegex(html, r'"group_name": "student_cohort"')
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class SingleThreadAccessTestCase(CohortedTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def call_view(self, mock_request, commentable_id, user, group_id, thread_group_id=None, pass_group_id=True): # lint-amnesty, pylint: disable=missing-function-docstring
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", thread_id=thread_id, group_id=thread_group_id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.single_thread(
request,
str(self.course.id),
commentable_id,
thread_id
)
def test_student_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.student, self.student_cohort.id)
assert resp.status_code == 200
def test_student_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.student_cohort.id
)
assert resp.status_code == 200
# this test ensures that a thread response from the cs with group_id: null
# behaves the same as a thread response without a group_id (see: TNL-444)
def test_student_global_thread_in_cohorted_topic(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=None
)
assert resp.status_code == 200
def test_student_different_cohort(self, mock_request):
pytest.raises(Http404, (lambda: self.call_view(
mock_request,
'cohorted_topic',
self.student,
self.student_cohort.id,
thread_group_id=self.moderator_cohort.id
)))
def test_moderator_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.moderator_cohort.id)
assert resp.status_code == 200
def test_moderator_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.moderator_cohort.id
)
assert resp.status_code == 200
def test_moderator_different_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.student_cohort.id
)
assert resp.status_code == 200
def test_private_team_thread(self, mock_request):
CourseTeamFactory.create(discussion_topic_id='dummy_discussion_id')
user_not_in_team = UserFactory.create()
CourseEnrollmentFactory(user=user_not_in_team, course_id=self.course.id)
with patch('lms.djangoapps.teams.api.is_team_discussion_private', autospec=True) as mocked:
mocked.return_value = True
response = self.call_view(
mock_request,
'non_cohorted_topic',
user_not_in_team,
''
)
assert 403 == response.status_code
assert views.TEAM_PERMISSION_MESSAGE == response.content.decode('utf-8')
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class SingleThreadGroupIdTestCase(CohortedTestCase, GroupIdAssertionMixin): # lint-amnesty, pylint: disable=missing-class-docstring
cs_endpoint = "/threads/dummy_thread_id"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False): # lint-amnesty, pylint: disable=missing-function-docstring
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", group_id=self.student_cohort.id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=user.username, password='test')
return self.client.get(
reverse('single_thread', args=[str(self.course.id), commentable_id, "dummy_thread_id"]),
data=request_data,
**headers
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['content']
)
@patch('requests.request', autospec=True)
class ForumFormDiscussionContentGroupTestCase(ForumsEnableMixin, ContentGroupTestCase):
"""
Tests `forum_form_discussion api` works with different content groups.
Discussion modules are setup in ContentGroupTestCase class i.e
alpha_module => alpha_group_discussion => alpha_cohort => alpha_user/community_ta
beta_module => beta_group_discussion => beta_cohort => beta_user
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super().setUp()
self.thread_list = [
{"thread_id": "test_general_thread_id"},
{"thread_id": "test_global_group_thread_id", "commentable_id": self.global_module.discussion_id},
{"thread_id": "test_alpha_group_thread_id", "group_id": self.alpha_module.group_access[0][0],
"commentable_id": self.alpha_module.discussion_id},
{"thread_id": "test_beta_group_thread_id", "group_id": self.beta_module.group_access[0][0],
"commentable_id": self.beta_module.discussion_id}
]
def assert_has_access(self, response, expected_discussion_threads):
"""
Verify that a users have access to the threads in their assigned
cohorts and non-cohorted modules.
"""
discussion_data = json.loads(response.content.decode('utf-8'))['discussion_data']
assert len(discussion_data) == expected_discussion_threads
def call_view(self, mock_request, user): # lint-amnesty, pylint: disable=missing-function-docstring
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy content",
thread_list=self.thread_list
)
self.client.login(username=user.username, password='test')
return self.client.get(
reverse("forum_form_discussion", args=[str(self.course.id)]),
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
def test_community_ta_user(self, mock_request):
"""
Verify that community_ta user has access to all threads regardless
of cohort.
"""
response = self.call_view(
mock_request,
self.community_ta
)
self.assert_has_access(response, 4)
def test_alpha_cohort_user(self, mock_request):
"""
Verify that alpha_user has access to alpha_cohort and non-cohorted
threads.
"""
response = self.call_view(
mock_request,
self.alpha_user
)
self.assert_has_access(response, 3)
def test_beta_cohort_user(self, mock_request):
"""
Verify that beta_user has access to beta_cohort and non-cohorted
threads.
"""
response = self.call_view(
mock_request,
self.beta_user
)
self.assert_has_access(response, 3)
def test_global_staff_user(self, mock_request):
"""
Verify that global staff user has access to all threads regardless
of cohort.
"""
response = self.call_view(
mock_request,
self.staff_user
)
self.assert_has_access(response, 4)
@patch('requests.request', autospec=True)
class SingleThreadContentGroupTestCase(ForumsEnableMixin, UrlResetMixin, ContentGroupTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super().setUp()
def assert_can_access(self, user, discussion_id, thread_id, should_have_access):
"""
Verify that a user has access to a thread within a given
discussion_id when should_have_access is True, otherwise
verify that the user does not have access to that thread.
"""
def call_single_thread():
self.client.login(username=user.username, password='test')
return self.client.get(
reverse('single_thread', args=[str(self.course.id), discussion_id, thread_id])
)
if should_have_access:
assert call_single_thread().status_code == 200
else:
assert call_single_thread().status_code == 404
def test_staff_user(self, mock_request):
"""
Verify that the staff user can access threads in the alpha,
beta, and global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.alpha_module, self.beta_module, self.global_module]:
self.assert_can_access(self.staff_user, discussion_xblock.discussion_id, thread_id, True)
def test_alpha_user(self, mock_request):
"""
Verify that the alpha user can access threads in the alpha and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.alpha_module, self.global_module]:
self.assert_can_access(self.alpha_user, discussion_xblock.discussion_id, thread_id, True)
self.assert_can_access(self.alpha_user, self.beta_module.discussion_id, thread_id, False)
def test_beta_user(self, mock_request):
"""
Verify that the beta user can access threads in the beta and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.beta_module, self.global_module]:
self.assert_can_access(self.beta_user, discussion_xblock.discussion_id, thread_id, True)
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_non_cohorted_user(self, mock_request):
"""
Verify that the non-cohorted user can access threads in just the
global discussion module.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
self.assert_can_access(self.non_cohorted_user, self.global_module.discussion_id, thread_id, True)
self.assert_can_access(self.non_cohorted_user, self.alpha_module.discussion_id, thread_id, False)
self.assert_can_access(self.non_cohorted_user, self.beta_module.discussion_id, thread_id, False)
def test_course_context_respected(self, mock_request):
"""
Verify that course threads go through discussion_category_id_access method.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id
)
# Beta user does not have access to alpha_module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_standalone_context_respected(self, mock_request):
"""
Verify that standalone threads don't go through discussion_category_id_access method.
"""
# For this rather pathological test, we are assigning the alpha module discussion_id (commentable_id)
# to a team so that we can verify that standalone threads don't go through discussion_category_id_access.
thread_id = "test_thread_id"
CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.alpha_module.discussion_id
)
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id,
commentable_id=self.alpha_module.discussion_id
)
# If a thread returns context other than "course", the access check is not done, and the beta user
# can see the alpha discussion module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, True)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionContextTestCase(ForumsEnableMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
self.discussion_topic_id = "dummy_topic"
self.team = CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.discussion_topic_id
)
self.team.add_user(self.user)
self.user_not_in_team = UserFactory.create()
def test_context_can_be_standalone(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy text",
commentable_id=self.discussion_topic_id
)
request = RequestFactory().get("dummy_url")
request.user = self.user
response = views.inline_discussion(
request,
str(self.course.id),
self.discussion_topic_id,
)
json_response = json.loads(response.content.decode('utf-8'))
assert json_response['discussion_data'][0]['context'] == ThreadContext.STANDALONE
def test_private_team_discussion(self, mock_request):
# First set the team discussion to be private
CourseEnrollmentFactory(user=self.user_not_in_team, course_id=self.course.id)
request = RequestFactory().get("dummy_url")
request.user = self.user_not_in_team
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy text",
commentable_id=self.discussion_topic_id
)
with patch('lms.djangoapps.teams.api.is_team_discussion_private', autospec=True) as mocked:
mocked.return_value = True
response = views.inline_discussion(
request,
str(self.course.id),
self.discussion_topic_id,
)
assert response.status_code == 403
assert response.content.decode('utf-8') == views.TEAM_PERMISSION_MESSAGE
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionGroupIdTestCase( # lint-amnesty, pylint: disable=missing-class-docstring
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def setUp(self):
super().setUp()
self.cohorted_commentable_id = 'cohorted_topic'
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {'commentable_id': self.cohorted_commentable_id}
if group_id:
# avoid causing a server error when the LMS chokes attempting
# to find a group name for the group_id, when we're testing with
# an invalid one.
try:
CourseUserGroup.objects.get(id=group_id)
kwargs['group_id'] = group_id
except CourseUserGroup.DoesNotExist:
pass
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data
)
request.user = user
return views.inline_discussion(
request,
str(self.course.id),
commentable_id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
self.cohorted_commentable_id,
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class ForumFormDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False): # pylint: disable=arguments-differ
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=user.username, password='test')
return self.client.get(
reverse("forum_form_discussion", args=[str(self.course.id)]),
data=request_data,
**headers
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class UserProfileDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring
cs_endpoint = "/active_threads"
def call_view_for_profiled_user(
self, mock_request, requesting_user, profiled_user, group_id, pass_group_id, is_ajax=False
):
"""
Calls "user_profile" view method on behalf of "requesting_user" to get information about
the user "profiled_user".
"""
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=requesting_user.username, password='test')
return self.client.get(
reverse('user_profile', args=[str(self.course.id), profiled_user.id]),
data=request_data,
**headers
)
def call_view(self, mock_request, _commentable_id, user, group_id, pass_group_id=True, is_ajax=False): # pylint: disable=arguments-differ
return self.call_view_for_profiled_user(
mock_request, user, user, group_id, pass_group_id=pass_group_id, is_ajax=is_ajax
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
def _test_group_id_passed_to_user_profile(
self, mock_request, expect_group_id_in_request, requesting_user, profiled_user, group_id, pass_group_id
):
"""
Helper method for testing whether or not group_id was passed to the user_profile request.
"""
def get_params_from_user_info_call(for_specific_course):
"""
Returns the request parameters for the user info call with either course_id specified or not,
depending on value of 'for_specific_course'.
"""
# There will be 3 calls from user_profile. One has the cs_endpoint "active_threads", and it is already
# tested. The other 2 calls are for user info; one of those calls is for general information about the user,
# and it does not specify a course_id. The other call does specify a course_id, and if the caller did not
# have discussion moderator privileges, it should also contain a group_id.
for r_call in mock_request.call_args_list:
if not r_call[0][1].endswith(self.cs_endpoint):
params = r_call[1]["params"]
has_course_id = "course_id" in params
if (for_specific_course and has_course_id) or (not for_specific_course and not has_course_id):
return params
pytest.fail("Did not find appropriate user_profile call for 'for_specific_course'=" + for_specific_course)
mock_request.reset_mock()
self.call_view_for_profiled_user(
mock_request,
requesting_user,
profiled_user,
group_id,
pass_group_id=pass_group_id,
is_ajax=False
)
# Should never have a group_id if course_id was not included in the request.
params_without_course_id = get_params_from_user_info_call(False)
assert 'group_id' not in params_without_course_id
params_with_course_id = get_params_from_user_info_call(True)
if expect_group_id_in_request:
assert 'group_id' in params_with_course_id
assert group_id == params_with_course_id['group_id']
else:
assert 'group_id' not in params_with_course_id
def test_group_id_passed_to_user_profile_student(self, mock_request):
"""
Test that the group id is always included when requesting user profile information for a particular
course if the requester does not have discussion moderation privileges.
"""
def verify_group_id_always_present(profiled_user, pass_group_id):
"""
Helper method to verify that group_id is always present for student in course
(non-privileged user).
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.student, profiled_user, self.student_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the student (non-privileged user).
# The profile returned on behalf of the student is for the profiled_user.
verify_group_id_always_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=False)
def test_group_id_user_profile_moderator(self, mock_request):
"""
Test that the group id is only included when a privileged user requests user profile information for a
particular course and user if the group_id is explicitly passed in.
"""
def verify_group_id_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
def verify_group_id_not_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is not present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, False, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the moderator (privileged user).
# If the group_id is explicitly passed, it will be present in the request.
verify_group_id_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_present(
profiled_user=self.student, pass_group_id=True, requested_cohort=self.student_cohort
)
# If the group_id is not explicitly passed, it will not be present because the requesting_user
# has discussion moderator privileges.
verify_group_id_not_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_not_present(profiled_user=self.moderator, pass_group_id=False)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class FollowedThreadsDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring
cs_endpoint = "/subscribed_threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.followed_threads(
request,
str(self.course.id),
user.id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionTestCase(ForumsEnableMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.course = CourseFactory.create(
org="TestX",
number="101",
display_name="Test Course",
teams_configuration=TeamsConfig({
'topics': [{
'id': 'topic_id',
'name': 'A topic',
'description': 'A topic',
}]
})
)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
display_name='Discussion1',
discussion_category="Chapter",
discussion_target="Discussion1"
)
def send_request(self, mock_request, params=None):
"""
Creates and returns a request with params set, and configures
mock_request to return appropriate values.
"""
request = RequestFactory().get("dummy_url", params if params else {})
request.user = self.student
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", commentable_id=self.discussion1.discussion_id
)
return views.inline_discussion(
request, str(self.course.id), self.discussion1.discussion_id
)
def test_context(self, mock_request):
team = CourseTeamFactory(
name='Team Name',
topic_id='topic_id',
course_id=self.course.id,
discussion_topic_id=self.discussion1.discussion_id
)
team.add_user(self.student)
self.send_request(mock_request)
assert mock_request.call_args[1]['params']['context'] == ThreadContext.STANDALONE
@patch('requests.request', autospec=True)
class UserProfileTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
TEST_THREAD_TEXT = 'userprofile-test-text'
TEST_THREAD_ID = 'userprofile-test-thread-id'
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
self.profiled_user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.profiled_user, course_id=self.course.id)
def get_response(self, mock_request, params, **headers): # lint-amnesty, pylint: disable=missing-function-docstring
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
self.client.login(username=self.student.username, password='test')
response = self.client.get(
reverse('user_profile', kwargs={
'course_id': str(self.course.id),
'user_id': self.profiled_user.id,
}),
data=params,
**headers
)
mock_request.assert_any_call(
"get",
StringEndsWithMatcher(f'/users/{self.profiled_user.id}/active_threads'),
data=None,
params=PartialDictMatcher({
"course_id": str(self.course.id),
"page": params.get("page", 1),
"per_page": views.THREADS_PER_PAGE
}),
headers=ANY,
timeout=ANY
)
return response
def check_html(self, mock_request, **params): # lint-amnesty, pylint: disable=missing-function-docstring
response = self.get_response(mock_request, params)
assert response.status_code == 200
assert response['Content-Type'] == 'text/html; charset=utf-8'
html = response.content.decode('utf-8')
self.assertRegex(html, r'data-page="1"')
self.assertRegex(html, r'data-num-pages="1"')
self.assertRegex(html, r'<span class="discussion-count">1</span> discussion started')
self.assertRegex(html, r'<span class="discussion-count">2</span> comments')
self.assertRegex(html, f''id': '{self.TEST_THREAD_ID}'')
self.assertRegex(html, f''title': '{self.TEST_THREAD_TEXT}'')
self.assertRegex(html, f''body': '{self.TEST_THREAD_TEXT}'')
self.assertRegex(html, f''username': '{self.student.username}'')
def check_ajax(self, mock_request, **params): # lint-amnesty, pylint: disable=missing-function-docstring
response = self.get_response(mock_request, params, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
assert response.status_code == 200
assert response['Content-Type'] == 'application/json; charset=utf-8'
response_data = json.loads(response.content.decode('utf-8'))
assert sorted(response_data.keys()) == ['annotated_content_info', 'discussion_data', 'num_pages', 'page']
assert len(response_data['discussion_data']) == 1
assert response_data['page'] == 1
assert response_data['num_pages'] == 1
assert response_data['discussion_data'][0]['id'] == self.TEST_THREAD_ID
assert response_data['discussion_data'][0]['title'] == self.TEST_THREAD_TEXT
assert response_data['discussion_data'][0]['body'] == self.TEST_THREAD_TEXT
def test_html(self, mock_request):
self.check_html(mock_request)
def test_ajax(self, mock_request):
self.check_ajax(mock_request)
def test_404_non_enrolled_user(self, __):
"""
Test that when student try to visit un-enrolled students' discussion profile,
the system raises Http404.
"""
unenrolled_user = UserFactory.create()
request = RequestFactory().get("dummy_url")
request.user = self.student
with pytest.raises(Http404):
views.user_profile(
request,
str(self.course.id),
unenrolled_user.id
)
def test_404_profiled_user(self, _mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with pytest.raises(Http404):
views.user_profile(
request,
str(self.course.id),
-999
)
def test_404_course(self, _mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with pytest.raises(Http404):
views.user_profile(
request,
"non/existent/course",
self.profiled_user.id
)
def test_post(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().post("dummy_url")
request.user = self.student
response = views.user_profile(
request,
str(self.course.id),
self.profiled_user.id
)
assert response.status_code == 405
@patch('requests.request', autospec=True)
class CommentsServiceRequestHeadersTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
CREATE_USER = False
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super().setUp()
username = "foo"
password = "bar"
# Invoke UrlResetMixin
super().setUp()
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
assert self.client.login(username=username, password=password)
self.addCleanup(translation.deactivate)
def assert_all_calls_have_header(self, mock_request, key, value): # lint-amnesty, pylint: disable=missing-function-docstring
expected = call(
ANY, # method
ANY, # url
data=ANY,
params=ANY,
headers=PartialDictMatcher({key: value}),
timeout=ANY
)
for actual in mock_request.call_args_list:
assert expected == actual
def test_accept_language(self, mock_request):
lang = "eo"
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
self.client.get(
reverse(
"single_thread",
kwargs={
"course_id": str(self.course.id),
"discussion_id": "dummy_discussion_id",
"thread_id": thread_id,
}
),
HTTP_ACCEPT_LANGUAGE=lang,
)
self.assert_all_calls_have_header(mock_request, "Accept-Language", lang)
@override_settings(COMMENTS_SERVICE_KEY="test_api_key")
def test_api_key(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id="dummy")
self.client.get(
reverse(
"forum_form_discussion",
kwargs={"course_id": str(self.course.id)}
),
)
self.assert_all_calls_have_header(mock_request, "X-Edx-Api-Key", "test_api_key")
class InlineDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super().setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
response = views.inline_discussion(
request, str(self.course.id), self.course.discussion_topics['General']['id']
)
assert response.status_code == 200
response_data = json.loads(response.content.decode('utf-8'))
assert response_data['discussion_data'][0]['title'] == text
assert response_data['discussion_data'][0]['body'] == text
class ForumFormDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super().setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, str(self.course.id))
assert response.status_code == 200
response_data = json.loads(response.content.decode('utf-8'))
assert response_data['discussion_data'][0]['title'] == text
assert response_data['discussion_data'][0]['body'] == text
@ddt.ddt
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class ForumDiscussionXSSTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super().setUp()
username = "foo"
password = "bar"
self.course = CourseFactory.create()
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
assert self.client.login(username=username, password=password)
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('common.djangoapps.student.models.cc.User.from_django_user')
def test_forum_discussion_xss_prevent(self, malicious_code, mock_user, mock_req):
"""
Test that XSS attack is prevented
"""
mock_user.return_value.to_dict.return_value = {}
mock_req.return_value.status_code = 200
reverse_url = "{}{}".format(reverse(
"forum_form_discussion",
kwargs={"course_id": str(self.course.id)}), '/forum_form_discussion')
# Test that malicious code does not appear in html
url = "{}?{}={}".format(reverse_url, 'sort_key', malicious_code)
resp = self.client.get(url)
self.assertNotContains(resp, malicious_code)
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('common.djangoapps.student.models.cc.User.from_django_user')
@patch('common.djangoapps.student.models.cc.User.active_threads')
def test_forum_user_profile_xss_prevent(self, malicious_code, mock_threads, mock_from_django_user, mock_request):
"""
Test that XSS attack is prevented
"""
mock_threads.return_value = [], 1, 1
mock_from_django_user.return_value.to_dict.return_value = {
'upvoted_ids': [],
'downvoted_ids': [],
'subscribed_thread_ids': []
}
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
url = reverse('user_profile',
kwargs={'course_id': str(self.course.id), 'user_id': str(self.student.id)})
# Test that malicious code does not appear in html
url_string = "{}?{}={}".format(url, 'page', malicious_code)
resp = self.client.get(url_string)
self.assertNotContains(resp, malicious_code)
class ForumDiscussionSearchUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super().setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
data = {
"ajax": 1,
"text": text,
}
request = RequestFactory().get("dummy_url", data)
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, str(self.course.id))
assert response.status_code == 200
response_data = json.loads(response.content.decode('utf-8'))
assert response_data['discussion_data'][0]['title'] == text
assert response_data['discussion_data'][0]['body'] == text
class SingleThreadUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super().setUpClassAndTestData():
cls.course = CourseFactory.create(discussion_topics={'dummy_discussion_id': {'id': 'dummy_discussion_id'}})
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.single_thread(request, str(self.course.id), "dummy_discussion_id", thread_id)
assert response.status_code == 200
response_data = json.loads(response.content.decode('utf-8'))
assert response_data['content']['title'] == text
assert response_data['content']['body'] == text
class UserProfileUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super().setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.user_profile(request, str(self.course.id), str(self.student.id))
assert response.status_code == 200
response_data = json.loads(response.content.decode('utf-8'))
assert response_data['discussion_data'][0]['title'] == text
assert response_data['discussion_data'][0]['body'] == text
class FollowedThreadsUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super().setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.followed_threads(request, str(self.course.id), str(self.student.id))
assert response.status_code == 200
response_data = json.loads(response.content.decode('utf-8'))
assert response_data['discussion_data'][0]['title'] == text
assert response_data['discussion_data'][0]['body'] == text
class EnrollmentTestCase(ForumsEnableMixin, ModuleStoreTestCase):
"""
Tests for the behavior of views depending on if the student is enrolled
in the course
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def test_unenrolled(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
request = RequestFactory().get('dummy_url')
request.user = self.student
with pytest.raises(CourseAccessRedirect):
views.forum_form_discussion(request, course_id=str(self.course.id)) # pylint: disable=no-value-for-parameter, unexpected-keyword-arg
@patch('requests.request', autospec=True)
class EnterpriseConsentTestCase(EnterpriseTestConsentRequired, ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Ensure that the Enterprise Data Consent redirects are in place only when consent is required.
"""
CREATE_USER = False
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Invoke UrlResetMixin setUp
super().setUp()
username = "foo"
password = "bar"
self.discussion_id = 'dummy_discussion_id'
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': self.discussion_id}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
assert self.client.login(username=username, password=password)
self.addCleanup(translation.deactivate)
@patch('openedx.features.enterprise_support.api.enterprise_customer_for_request')
def test_consent_required(self, mock_enterprise_customer_for_request, mock_request):
"""
Test that enterprise data sharing consent is required when enabled for the various discussion views.
"""
# ENT-924: Temporary solution to replace sensitive SSO usernames.
mock_enterprise_customer_for_request.return_value = None
thread_id = 'dummy'
course_id = str(self.course.id)
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy', thread_id=thread_id)
for url in (
reverse('forum_form_discussion',
kwargs=dict(course_id=course_id)),
reverse('single_thread',
kwargs=dict(course_id=course_id, discussion_id=self.discussion_id, thread_id=thread_id)),
):
self.verify_consent_required(self.client, url) # pylint: disable=no-value-for-parameter
class DividedDiscussionsTestCase(CohortViewsTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def create_divided_discussions(self):
"""
Set up a divided discussion in the system, complete with all the fixings
"""
divided_inline_discussions = ['Topic A']
divided_course_wide_discussions = ["Topic B"]
divided_discussions = divided_inline_discussions + divided_course_wide_discussions
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_name_to_id(self.course, "Topic A"),
discussion_category="Chapter",
discussion_target="Discussion",
start=datetime.now()
)
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(
self.course,
is_cohorted=True,
)
config_course_discussions(
self.course,
discussion_topics=discussion_topics,
divided_discussions=divided_discussions
)
return divided_inline_discussions, divided_course_wide_discussions
class CourseDiscussionTopicsTestCase(DividedDiscussionsTestCase):
"""
Tests the `divide_discussion_topics` view.
"""
def test_non_staff(self):
"""
Verify that we cannot access divide_discussion_topics if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(views.discussion_topics, "GET", [str(self.course.id)])
def test_get_discussion_topics(self):
"""
Verify that discussion_topics is working for HTTP GET.
"""
# create inline & course-wide discussion to verify the different map.
self.create_divided_discussions()
response = self.get_handler(self.course, handler=views.discussion_topics)
start_date = response['inline_discussions']['subcategories']['Chapter']['start_date']
expected_response = {
"course_wide_discussions": {
'children': [['Topic B', TYPE_ENTRY]],
'entries': {
'Topic B': {
'sort_key': 'A',
'is_divided': True,
'id': topic_name_to_id(self.course, "Topic B"),
'start_date': response['course_wide_discussions']['entries']['Topic B']['start_date']
}
}
},
"inline_discussions": {
'subcategories': {
'Chapter': {
'subcategories': {},
'children': [['Discussion', TYPE_ENTRY]],
'entries': {
'Discussion': {
'sort_key': None,
'is_divided': True,
'id': topic_name_to_id(self.course, "Topic A"),
'start_date': start_date
}
},
'sort_key': 'Chapter',
'start_date': start_date
}
},
'children': [['Chapter', TYPE_SUBCATEGORY]]
}
}
assert response == expected_response
class CourseDiscussionsHandlerTestCase(DividedDiscussionsTestCase):
"""
Tests the course_discussion_settings_handler
"""
def get_expected_response(self):
"""
Returns the static response dict.
"""
return {
'always_divide_inline_discussions': False,
'divided_inline_discussions': [],
'divided_course_wide_discussions': [],
'id': 1,
'division_scheme': 'cohort',
'available_division_schemes': ['cohort']
}
def test_non_staff(self):
"""
Verify that we cannot access course_discussions_settings_handler if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(
course_discussions_settings_handler, "GET", [str(self.course.id)]
)
self._verify_non_staff_cannot_access(
course_discussions_settings_handler, "PATCH", [str(self.course.id)]
)
def test_update_always_divide_inline_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for always_divide_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=course_discussions_settings_handler)
expected_response = self.get_expected_response()
assert response == expected_response
expected_response['always_divide_inline_discussions'] = True
response = self.patch_handler(
self.course, data=expected_response, handler=course_discussions_settings_handler
)
assert response == expected_response
def test_update_course_wide_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for divided_course_wide_discussions via HTTP PATCH.
"""
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(self.course, is_cohorted=True)
config_course_discussions(self.course, discussion_topics=discussion_topics)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
assert response == expected_response
expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, "Topic B")]
response = self.patch_handler(
self.course, data=expected_response, handler=views.course_discussions_settings_handler
)
assert response == expected_response
def test_update_inline_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for divided_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
assert response == expected_response
RequestCache.clear_all_namespaces()
now = datetime.now()
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="Topic_A",
discussion_category="Chapter",
discussion_target="Discussion",
start=now
)
expected_response['divided_inline_discussions'] = ["Topic_A"]
response = self.patch_handler(
self.course, data=expected_response, handler=views.course_discussions_settings_handler
)
assert response == expected_response
def test_get_settings(self):
"""
Verify that course_discussions_settings_handler is working for HTTP GET.
"""
divided_inline_discussions, divided_course_wide_discussions = self.create_divided_discussions()
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
expected_response['divided_inline_discussions'] = [topic_name_to_id(self.course, name)
for name in divided_inline_discussions]
expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, name)
for name in divided_course_wide_discussions]
assert response == expected_response
def test_update_settings_with_invalid_field_data_type(self):
"""
Verify that course_discussions_settings_handler return HTTP 400 if field data type is incorrect.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.patch_handler(
self.course,
data={'always_divide_inline_discussions': ''},
expected_response_code=400,
handler=views.course_discussions_settings_handler
)
assert 'Incorrect field type for `{}`. Type must be `{}`'.format(
'always_divide_inline_discussions',
bool.__name__
) == response.get('error')
def test_available_schemes(self):
# Cohorts disabled, single enrollment mode.
config_course_cohorts(self.course, is_cohorted=False)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
expected_response['available_division_schemes'] = []
assert response == expected_response
# Add 2 enrollment modes
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response['available_division_schemes'] = [CourseDiscussionSettings.ENROLLMENT_TRACK]
assert response == expected_response
# Enable cohorts
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response['available_division_schemes'] = [
CourseDiscussionSettings.COHORT, CourseDiscussionSettings.ENROLLMENT_TRACK
]
assert response == expected_response
class DefaultTopicIdGetterTestCase(ModuleStoreTestCase):
"""
Tests the `_get_discussion_default_topic_id` helper.
"""
def test_no_default_topic(self):
discussion_topics = {
'dummy discussion': {
'id': 'dummy_discussion_id',
},
}
course = CourseFactory.create(discussion_topics=discussion_topics)
expected_id = None
result = _get_discussion_default_topic_id(course)
assert expected_id == result
def test_default_topic_id(self):
discussion_topics = {
'dummy discussion': {
'id': 'dummy_discussion_id',
},
'another discussion': {
'id': 'another_discussion_id',
'default': True,
},
}
course = CourseFactory.create(discussion_topics=discussion_topics)
expected_id = 'another_discussion_id'
result = _get_discussion_default_topic_id(course)
assert expected_id == result
class ThreadViewedEventTestCase(EventTestMixin, ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Forum thread views are expected to launch analytics events. Test these here.
"""
CATEGORY_ID = 'i4x-edx-discussion-id'
CATEGORY_NAME = 'Discussion 1'
PARENT_CATEGORY_NAME = 'Chapter 1'
DUMMY_THREAD_ID = 'dummythreadids'
DUMMY_TITLE = 'Dummy title'
DUMMY_URL = 'https://example.com/dummy/url/'
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self): # pylint: disable=arguments-differ
super().setUp('eventtracking.tracker')
self.course = CourseFactory.create(
teams_configuration=TeamsConfig({
'topics': [{
'id': 'arbitrary-topic-id',
'name': 'arbitrary-topic-name',
'description': 'arbitrary-topic-desc'
}]
})
)
seed_permissions_roles(self.course.id)
PASSWORD = 'test'
self.student = UserFactory.create(password=PASSWORD)
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.staff = UserFactory.create(is_staff=True)
UserBasedRole(user=self.staff, role=CourseStaffRole.ROLE).add_course(self.course.id)
self.category = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id=self.CATEGORY_ID,
discussion_category=self.PARENT_CATEGORY_NAME,
discussion_target=self.CATEGORY_NAME,
)
self.team = CourseTeamFactory.create(
name='Team 1',
course_id=self.course.id,
topic_id='arbitrary-topic-id',
discussion_topic_id=self.category.discussion_id,
)
CourseTeamMembershipFactory.create(team=self.team, user=self.student)
self.client.login(username=self.student.username, password=PASSWORD)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.perform_request')
def test_thread_viewed_event(self, mock_perform_request):
mock_perform_request.side_effect = make_mock_perform_request_impl(
course=self.course,
text=self.DUMMY_TITLE,
thread_id=self.DUMMY_THREAD_ID,
commentable_id=self.category.discussion_id,
)
url = '/courses/{}/discussion/forum/{}/threads/{}'.format(
str(self.course.id),
self.category.discussion_id,
self.DUMMY_THREAD_ID
)
self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
expected_event = {
'id': self.DUMMY_THREAD_ID,
'title': self.DUMMY_TITLE,
'commentable_id': self.category.discussion_id,
'category_id': self.category.discussion_id,
'category_name': self.category.discussion_target,
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'target_username': self.student.username,
'team_id': self.team.id,
'url': self.DUMMY_URL,
}
expected_event_items = list(expected_event.items())
self.assert_event_emission_count('edx.forum.thread.viewed', 1)
_, event = self.get_latest_call_args()
event_items = list(event.items())
assert ((kv_pair in event_items) for kv_pair in expected_event_items)
| edx/edx-platform | lms/djangoapps/discussion/tests/test_views.py | Python | agpl-3.0 | 91,300 | [
"VisIt"
] | d6506f971cd52545c25b670970f1afe0ba57f9a41207b0346f0d4f15ca1e6ea0 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.058389
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/web/mediaplayeradd.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class mediaplayeradd(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(mediaplayeradd, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_85247273 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_85247273
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_mediaplayeradd= 'respond'
## END CLASS DEFINITION
if not hasattr(mediaplayeradd, '_initCheetahAttributes'):
templateAPIClass = getattr(mediaplayeradd, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(mediaplayeradd)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=mediaplayeradd()).run()
| MOA-2011/e2openplugin-OpenWebif | plugin/controllers/views/web/mediaplayeradd.py | Python | gpl-2.0 | 5,225 | [
"VisIt"
] | 90680c198fdf2710b78d0be9683cd77ce61b74be73ad7c9d81e5657f2441a7f6 |
#!/usr/bin/env python
__author__ = 'Xiaocheng Tang and Ted Ralphs'
__maintainer__ = 'Ted Ralphs'
__email__ = 'ted@lehigh.edu'
__version_ = '1.0.0'
__url__ = 'https://github.com/tkralphs/GoogleDriveScripts'
# Last modified 2/17/2016 Ted Ralphs
# Visit this URL to download client secret file
# https://console.developers.google.com/start/api?id=drive
import httplib2
import pprint
import sys, os
from os.path import expanduser, join
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow, argparser
import argparse
class DriveSort:
def __init__(self):
HOME = expanduser("~")
parser = argparse.ArgumentParser(parents=[argparser])
parser.add_argument('--folder-name', dest='folder_name',
help='Name of folder on Google Drive',
required=True)
parser.add_argument('--credentials-file', dest='credentials_file',
help='Name of file to get/store credentials',
default=join(HOME, '.gdrive_credentials'))
parser.add_argument('--dry-run', dest='dry_run', action='store_true',
help='Do dry run')
parser.add_argument('--user_agent', dest='user_agent',
help='Name of app under which to run the script')
parser.add_argument('--client_secret', dest='client_secret',
help='File in which client secret is stored',
default=join(HOME, '.client_secret.json'))
parser.add_argument('--email-domain', dest='email_domain',
help='Domain for e-mail addresses',
default=None)
parser.add_argument('--create-subfolders', dest='create_subfolders',
action='store_true',
help='Create subfolders for each file owner')
parser.add_argument('--move-files', dest='move_files',
action='store_true',
help='Move files to subfolders')
parser.add_argument('--change-permissions', dest='change_permissions',
action='store_true',
help='Change permissions on subfolders')
parser.add_argument('--list', dest='list_contents', action='store_true',
help='List all files in folder')
self.flags = parser.parse_args()
self.authorize()
def authorize(self):
# Check https://developers.google.com/drive/scopes for all available
# scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
# Run through the OAuth flow and retrieve credentials
# Create a Storage object. This object holds the credentials that your
# application needs to authorize access to the user's data. The name of
# the credentials file is provided. If the file does not exist, it is
# created. This object can only hold credentials for a single user, so
# as-written, this script can only handle a single user.
storage = Storage(self.flags.credentials_file)
# The get() function returns the credentials for the Storage object.
# If no credentials were found, None is returned.
credentials = storage.get()
# If no credentials are found or the credentials are invalid due to
# expiration, new credentials need to be obtained from the authorization
# server. The oauth2client.tools.run() function attempts to open an
# authorization server page in your default web browser. The server
# asks the user to grant your application access to the user's data.
# If the user grants access, the run() function returns new credentials.
# The new credentials are also stored in the supplied Storage object,
# which updates the credentials.dat file.
if credentials is None or credentials.invalid:
flow = flow_from_clientsecrets(client_secret, OAUTH_SCOPE)
flow.user_agent = self.flags.user_agent
credentials = run_flow(flow, storage, self.flags)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
self.drive_service = build('drive', 'v3', http=http)
#http://stackoverflow.com/questions/13558653/
def createRemoteFolder(self, folderName, parentID = None):
# Create a folder on Drive, returns the newly created folders ID
body = {
'name': folderName,
'mimeType': "application/vnd.google-apps.folder"
}
if parentID:
body['parents'] = [parentID]
root_folder = self.drive_service.files().create(body = body).execute()
return root_folder['id']
def getFilesInFolder(self, folderName = None):
if folderName == None:
folderName = self.flags.folder_name
q = r"mimeType = 'application/vnd.google-apps.folder'"
pageToken = None
while True:
fields = "nextPageToken, "
fields += "files(id, name)"
results = self.drive_service.files().list(q=q, pageSize=1000,
pageToken=pageToken,
fields=fields).execute()
folders = results['files']
try:
folder_id = filter(lambda x: x['name'] == folderName,
folders)[0]['id']
except IndexError:
pageToken = results.get('nextPageToken')
if not results.get('nextPageToken'):
print "ERROR: Specified folder does not exist."
sys.exit()
else:
break
# search for all files under that folder
q = r"'{}' in parents".format(folder_id)
fields = ("files(id, name, mimeType, permissions, sharingUser, " +
"owners, parents)")
return (folder_id, self.drive_service.files().list(q=q, pageSize=1000,
fields=fields).execute()['files'])
def createSubFolders(self, folderName = None):
if folderName == None:
folderName = self.flags.folder_name
folder_id, files = self.getFilesInFolder(folderName)
print folder_id
user_ids = []
for f in files:
if f['mimeType'] != 'application/vnd.google-apps.folder':
# Google Drive seems to not change ownership sometimes...
try:
user_id = f['sharingUser']['emailAddress'].split('@')[0]
except KeyError:
user_id = f['owners'][0]['emailAddress'].split('@')[0]
if user_id not in user_ids:
user_ids.append(user_id)
self.folderIds = {}
for user_id in user_ids:
print "Creating folder", user_id
# Check to see if it's a dry run or folder is already there
if (self.flags.dry_run == False or
filter(lambda x: x['name'] == user_id, files) != []):
self.folderIds['user_id'] = self.createRemoteFolder(user_id,
folder_id)
def moveFiles(self, folderName = None):
if folderName == None:
folderName = self.flags.folder_name
folder_id, files = self.getFilesInFolder(folderName)
for f in files:
if f['mimeType'] != 'application/vnd.google-apps.folder':
# Google Drive seems to not change ownership sometimes...
try:
user_id = f['sharingUser']['emailAddress'].split('@')[0]
except KeyError:
user_id = f['owners'][0]['emailAddress'].split('@')[0]
print "Moving", f['name'], 'to', user_id
parents = f['parents']
if not self.flags.dry_run:
try:
new_parent = filter(lambda x: x['name'] ==
user_id, files)[0]['id']
except KeyError:
print "Folder not found. Maybe",
print "run creatFolders() again?"
self.drive_service.files().update(fileId=f['id'],
removeParents=parents[0],
addParents=new_parent
).execute()
def changePermissions(self, domain = None, folderName = None):
if folderName == None:
folderName = self.flags.folder_name
if domain == None:
if self.flags.email_domain:
domain = self.flags.email_domain
else:
print "ERROR: Must specify e-mail domain to change permissions."
sys.exit()
folder_id, files = self.getFilesInFolder(folderName)
for f in files:
if f['mimeType'] == 'application/vnd.google-apps.folder':
print 'Sharing', f['name'], 'with', '%s@%s'% (f['name'],
domain)
emailAddress = f['name']+"@"+domain
permissionId = None
for perms in f['permissions']:
if perms['emailAddress'] == emailAddress:
permissionId = perms['id']
if not self.flags.dry_run:
if permissionId:
new_perm = {
'role' : 'commenter'
}
try:
self.drive_service.permissions().update(
fileId=f['id'],
permissionId=permissionId,
body = new_perm).execute()
except:
print "Could not change permissions on", f['name']
else:
new_perm = {
'emailAddress' : emailAddress,
'type' : 'user',
'role' : 'commenter'
}
self.drive_service.permissions().create(fileId=f['id'],
permissionId=permissionId,
body = new_perm).execute()
if __name__ == '__main__':
# Parse arguments and authorize connection
drive = DriveSort()
# Print names of all files in folder
if drive.flags.list_contents:
print "Folder contents:"
for f in drive.getFilesInFolder()[1]:
print f['name']
#Create subfolder with same name as e-mail user ID of last modifying user
if drive.flags.create_subfolders:
drive.createSubFolders()
# Move files into folders
if drive.flags.move_files:
drive.moveFiles()
# Grant permission to original owner
if drive.flags.change_permissions:
drive.changePermissions()
| tkralphs/GoogleDriveScripts | DriveSort.py | Python | mit | 11,500 | [
"VisIt"
] | f16fb7b8cf1f7f7bc6e37f5a4e81bfa58fa7aafd0670463c8b8ba53afa3b7950 |
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
from collections import namedtuple
import pytest
import torch
import pyro.distributions as dist
from pyro.contrib.gp.kernels import RBF, Cosine, Matern32, WhiteNoise
from pyro.contrib.gp.likelihoods import Gaussian
from pyro.contrib.gp.models import (
GPLVM,
GPRegression,
SparseGPRegression,
VariationalGP,
VariationalSparseGP,
)
from pyro.contrib.gp.util import train
from pyro.infer.mcmc.api import MCMC
from pyro.infer.mcmc.hmc import HMC
from pyro.nn.module import PyroSample
from tests.common import assert_equal
logger = logging.getLogger(__name__)
T = namedtuple("TestGPModel", ["model_class", "X", "y", "kernel", "likelihood"])
X = torch.tensor([[1.0, 5.0, 3.0], [4.0, 3.0, 7.0]])
y1D = torch.tensor([2.0, 1.0])
y2D = torch.tensor([[1.0, 2.0], [3.0, 3.0], [1.0, 4.0], [-1.0, 1.0]])
noise = torch.tensor(1e-7)
def _kernel():
return RBF(input_dim=3, variance=torch.tensor(3.0), lengthscale=torch.tensor(2.0))
def _likelihood():
return Gaussian(torch.tensor(1e-7))
def _TEST_CASES():
TEST_CASES = [
T(GPRegression, X, y1D, _kernel(), noise),
T(GPRegression, X, y2D, _kernel(), noise),
T(SparseGPRegression, X, y1D, _kernel(), noise),
T(SparseGPRegression, X, y2D, _kernel(), noise),
T(VariationalGP, X, y1D, _kernel(), _likelihood()),
T(VariationalGP, X, y2D, _kernel(), _likelihood()),
T(VariationalSparseGP, X, y1D, _kernel(), _likelihood()),
T(VariationalSparseGP, X, y2D, _kernel(), _likelihood()),
]
return TEST_CASES
TEST_IDS = [t[0].__name__ + "_y{}D".format(str(t[2].dim())) for t in _TEST_CASES()]
@pytest.mark.parametrize(
"model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS
)
def test_model(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, None, kernel, X, likelihood)
else:
gp = model_class(X, None, kernel, likelihood)
loc, var = gp.model()
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(var, torch.ones(var.shape[-1]).expand(var.shape))
else:
assert_equal(loc.norm().item(), 0)
assert_equal(var, kernel(X).diag())
@pytest.mark.parametrize(
"model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS
)
def test_forward(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.dim() == y.dim()
assert loc0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == y.shape[:-1]
assert cov0.shape[:-2] == y.shape[:-1]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
assert_equal(loc0, loc1)
n = Xnew.shape[0]
cov0_diag = torch.stack([mat.diag() for mat in cov0.view(-1, n, n)]).reshape(
var1.shape
)
assert_equal(cov0_diag, var1)
# test trivial forward: Xnew = X
loc, cov = gp(X, full_cov=True)
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape))
else:
assert_equal(loc, y)
assert_equal(cov.norm().item(), 0)
# test same input forward: Xnew[0,:] = Xnew[1,:] = ...
Xnew = torch.tensor([[2.0, 3.0, 1.0]]).expand(10, 3)
loc, cov = gp(Xnew, full_cov=True)
loc_diff = loc - loc[..., :1].expand(y.shape[:-1] + (10,))
assert_equal(loc_diff.norm().item(), 0)
cov_diff = cov - cov[..., :1, :1].expand(y.shape[:-1] + (10, 10))
assert_equal(cov_diff.norm().item(), 0)
# test noise kernel forward: kernel = WhiteNoise
gp.kernel = WhiteNoise(input_dim=3, variance=torch.tensor(10.0))
loc, cov = gp(X, full_cov=True)
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape) * 10)
@pytest.mark.parametrize(
"model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS
)
def test_forward_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape, no need for test
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is VariationalSparseGP
gp = model_class(X, y, kernel, X, likelihood, latent_shape=torch.Size([]))
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.shape[-1] == Xnew.shape[0]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == torch.Size([])
assert cov0.shape[:-2] == torch.Size([])
assert_equal(loc0, loc1)
assert_equal(cov0.diag(), var1)
@pytest.mark.parametrize(
"model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS
)
@pytest.mark.init(rng_seed=0)
def test_inference(model_class, X, y, kernel, likelihood):
# skip variational GP models because variance/lengthscale highly
# depend on variational parameters
if model_class is VariationalGP or model_class is VariationalSparseGP:
return
elif model_class is GPRegression:
gp = model_class(X, y, RBF(input_dim=3), likelihood)
else: # model_class is SparseGPRegression
gp = model_class(X, y, RBF(input_dim=3), X, likelihood)
# fix inducing points because variance/lengthscale highly depend on it
gp.Xu.requires_grad_(False)
generator = dist.MultivariateNormal(torch.zeros(X.shape[0]), kernel(X))
target_y = generator(sample_shape=torch.Size([1000])).detach()
gp.set_data(X, target_y)
train(gp)
y_cov = gp.kernel(X)
target_y_cov = kernel(X)
assert_equal(y_cov, target_y_cov, prec=0.15)
@pytest.mark.init(rng_seed=0)
def test_inference_sgpr():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N) * 5).sample()
y = (
0.5 * torch.sin(3 * X)
+ dist.Normal(torch.zeros(N), torch.ones(N) * 0.5).sample()
)
kernel = RBF(input_dim=1)
Xu = torch.arange(0.0, 5.5, 0.5)
sgpr = SparseGPRegression(X, y, kernel, Xu)
train(sgpr)
Xnew = torch.arange(0.0, 5.05, 0.05)
loc, var = sgpr(Xnew, full_cov=False)
target = 0.5 * torch.sin(3 * Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.init(rng_seed=0)
def test_inference_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N) * 5).sample()
y = (
0.5 * torch.sin(3 * X)
+ dist.Normal(torch.zeros(N), torch.ones(N) * 0.5).sample()
)
kernel = RBF(input_dim=1)
Xu = torch.arange(0.0, 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian())
optimizer = torch.optim.Adam(vsgp.parameters(), lr=0.03)
train(vsgp, optimizer)
Xnew = torch.arange(0.0, 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3 * Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.06)
@pytest.mark.init(rng_seed=0)
def test_inference_whiten_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N) * 5).sample()
y = (
0.5 * torch.sin(3 * X)
+ dist.Normal(torch.zeros(N), torch.ones(N) * 0.5).sample()
)
kernel = RBF(input_dim=1)
Xu = torch.arange(0.0, 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian(), whiten=True)
train(vsgp)
Xnew = torch.arange(0.0, 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3 * Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.parametrize(
"model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS
)
def test_inference_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape (default=torch.Size([]))
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is SparseVariationalGP
gp = model_class(
X, y, kernel, X.clone(), likelihood, latent_shape=torch.Size([])
)
train(gp, num_steps=1)
@pytest.mark.parametrize(
"model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS
)
def test_inference_with_whiten(model_class, X, y, kernel, likelihood):
# regression models don't use whiten
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, whiten=True)
else: # model_class is SparseVariationalGP
gp = model_class(X, y, kernel, X.clone(), likelihood, whiten=True)
train(gp, num_steps=1)
@pytest.mark.parametrize(
"model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS
)
def test_hmc(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X.clone(), likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
kernel.variance = PyroSample(dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
kernel.lengthscale = PyroSample(dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))
hmc_kernel = HMC(gp.model, step_size=1)
mcmc = MCMC(hmc_kernel, num_samples=10)
mcmc.run()
for name, param in mcmc.get_samples().items():
param_mean = torch.mean(param, 0)
logger.info("Posterior mean - {}".format(name))
logger.info(param_mean)
def test_inference_deepGP():
gp1 = GPRegression(
X,
None,
RBF(input_dim=3, variance=torch.tensor(3.0), lengthscale=torch.tensor(2.0)),
)
Z, _ = gp1.model()
gp2 = VariationalSparseGP(
Z, y2D, Matern32(input_dim=3), Z.clone(), Gaussian(torch.tensor(1e-6))
)
class DeepGP(torch.nn.Module):
def __init__(self, gp1, gp2):
super().__init__()
self.gp1 = gp1
self.gp2 = gp2
def model(self):
Z, _ = self.gp1.model()
self.gp2.set_data(Z, y2D)
self.gp2.model()
def guide(self):
self.gp1.guide()
self.gp2.guide()
deepgp = DeepGP(gp1, gp2)
train(deepgp, num_steps=1)
@pytest.mark.parametrize(
"model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS
)
def test_gplvm(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X.clone(), likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
gplvm = GPLVM(gp)
# test inference
train(gplvm, num_steps=1)
# test forward
gplvm(Xnew=X)
def _pre_test_mean_function():
def f(x):
return 2 * x + 3 + 5 * torch.sin(7 * x)
X = torch.arange(100, dtype=torch.Tensor().dtype)
y = f(X)
Xnew = torch.arange(100, 150, dtype=torch.Tensor().dtype)
ynew = f(Xnew)
kernel = Cosine(input_dim=1)
class Trend(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.nn.Parameter(torch.tensor(0.0))
self.b = torch.nn.Parameter(torch.tensor(1.0))
def forward(self, x):
return self.a * x + self.b
trend = Trend()
return X, y, Xnew, ynew, kernel, trend
def _mape(y_true, y_pred):
return ((y_pred - y_true) / y_true).abs().mean()
def _post_test_mean_function(gpmodule, Xnew, y_true):
assert_equal(gpmodule.mean_function.a.item(), 2, prec=0.03)
assert_equal(gpmodule.mean_function.b.item(), 3, prec=0.03)
y_pred, _ = gpmodule(Xnew)
assert_equal(_mape(y_true, y_pred).item(), 0, prec=0.02)
def test_mean_function_GPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
gpmodule = GPRegression(X, y, kernel, mean_function=mean_fn)
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_SGPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
gpmodule = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn)
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_SGPR_DTC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
gpmodule = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn, approx="DTC")
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_SGPR_FITC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
gpmodule = SparseGPRegression(
X, y, kernel, Xu, mean_function=mean_fn, approx="FITC"
)
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_VGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
gpmodule = VariationalGP(X, y, kernel, likelihood, mean_function=mean_fn)
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_VGP_whiten():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
gpmodule = VariationalGP(
X, y, kernel, likelihood, mean_function=mean_fn, whiten=True
)
optimizer = torch.optim.Adam(gpmodule.parameters(), lr=0.1)
train(gpmodule, optimizer)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_VSGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
likelihood = Gaussian()
gpmodule = VariationalSparseGP(X, y, kernel, Xu, likelihood, mean_function=mean_fn)
optimizer = torch.optim.Adam(gpmodule.parameters(), lr=0.02)
train(gpmodule, optimizer)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_VSGP_whiten():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
likelihood = Gaussian()
gpmodule = VariationalSparseGP(
X, y, kernel, Xu, likelihood, mean_function=mean_fn, whiten=True
)
optimizer = torch.optim.Adam(gpmodule.parameters(), lr=0.1)
train(gpmodule, optimizer)
_post_test_mean_function(gpmodule, Xnew, ynew)
| uber/pyro | tests/contrib/gp/test_models.py | Python | apache-2.0 | 15,042 | [
"Gaussian"
] | ce7c7ccaed8c095c26da5a30966f0075812901b67c7b6fde61a5f8909bbe4c57 |
# coding: utf-8
from __future__ import unicode_literals
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
from six.moves import filter, map
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 4, 2012"
import os
import re
import warnings
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
class StandardTransmuter(object):
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(self, transformed_structures, transformations=None,
extend_collection=0, ncores=None):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans,
extend_collection=extend_collection)
def get_transformed_structures(self):
"""
Returns all TransformedStructures.
.. deprecated:: v2.1.0
Use transformed_structures attribute instead. Will be removed in
next version.
"""
warnings.warn("Use transformed_structures attribute instead.",
DeprecationWarning)
return self.transformed_structures
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False,
clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
p = Pool(self.ncores)
#need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation,
extend_collection,
clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
def write_vasp_input(self, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to create
vasp input files from structures
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Callable to create subdirectory name from
transformed_structure. e.g.,
lambda x: x.other_parameters["tags"][0] to use the first tag.
include_cif (bool): Whether to output a CIF as well. CIF files
are generally better supported in visualization programs.
"""
batch_write_vasp_input(self.transformed_structures, vasp_input_set,
output_dir, create_directory, subfolder,
include_cif)
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match("^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [],
primitive)
transformed_structures.append(tstruct)
super(CifTransmuter, self).__init__(transformed_structures,
transformations, extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
def __init__(self, poscar_string, transformations=None,
extend_collection=False):
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
super(PoscarTransmuter, self).__init__([tstruct], transformations,
extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection)
def batch_write_vasp_input(transformed_structures, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub("\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
create_directory=create_directory)
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula)))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection,
clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
| rousseab/pymatgen | pymatgen/alchemy/transmuters.py | Python | mit | 16,781 | [
"VASP",
"pymatgen"
] | 25718fe3915ea2633f8b6b0fb307e79162cbb2b3736a01ecc1ad0883dca7bfa5 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
pts = vtk.vtkPoints()
pts.SetNumberOfPoints(22)
pts.SetPoint(0,0,0,0)
pts.SetPoint(1,1,0,0)
pts.SetPoint(2,2,0,0)
pts.SetPoint(3,3,0,0)
pts.SetPoint(4,4,0,0)
pts.SetPoint(5,5,0,0)
pts.SetPoint(6,6,0,0)
pts.SetPoint(7,7,0,0)
pts.SetPoint(8,8,0,0)
pts.SetPoint(9,9,0,0)
pts.SetPoint(10,1,1,0)
pts.SetPoint(11,2,1,0)
pts.SetPoint(12,3,1,0)
pts.SetPoint(13,4,1,0)
pts.SetPoint(14,6,1,0)
pts.SetPoint(15,8,1,0)
pts.SetPoint(16,9,1,0)
pts.SetPoint(17,3,2,0)
pts.SetPoint(18,6,2,0)
pts.SetPoint(19,7,2,0)
pts.SetPoint(20,8,2,0)
pts.SetPoint(21,9,2,0)
pd = vtk.vtkPolyData()
pd.SetPoints(pts)
verts = vtk.vtkCellArray()
verts.InsertNextCell(1)
verts.InsertCellPoint(0)
verts.InsertNextCell(2)
verts.InsertCellPoint(1)
verts.InsertCellPoint(10)
pd.SetVerts(verts)
lines = vtk.vtkCellArray()
lines.InsertNextCell(2)
lines.InsertCellPoint(2)
lines.InsertCellPoint(11)
lines.InsertNextCell(3)
lines.InsertCellPoint(3)
lines.InsertCellPoint(12)
lines.InsertCellPoint(17)
pd.SetLines(lines)
polys = vtk.vtkCellArray()
polys.InsertNextCell(3)
polys.InsertCellPoint(4)
polys.InsertCellPoint(5)
polys.InsertCellPoint(13)
polys.InsertNextCell(5)
polys.InsertCellPoint(6)
polys.InsertCellPoint(7)
polys.InsertCellPoint(19)
polys.InsertCellPoint(18)
polys.InsertCellPoint(14)
pd.SetPolys(polys)
strips = vtk.vtkCellArray()
strips.InsertNextCell(6)
strips.InsertCellPoint(8)
strips.InsertCellPoint(9)
strips.InsertCellPoint(15)
strips.InsertCellPoint(16)
strips.InsertCellPoint(20)
strips.InsertCellPoint(21)
pd.SetStrips(strips)
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(4)
colors.SetNumberOfTuples(7)
colors.SetTuple4(0,255,0,0,255)
colors.SetTuple4(1,0,255,0,255)
colors.SetTuple4(2,0,0,255,255)
colors.SetTuple4(3,255,255,0,255)
colors.SetTuple4(4,255,0,255,255)
colors.SetTuple4(5,0,255,0,255)
colors.SetTuple4(6,0,255,255,255)
pd.GetCellData().SetScalars(colors)
tf = vtk.vtkTriangleFilter()
tf.SetInputData(pd)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tf.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create the RenderWindow, Renderer and interactive renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(actor)
ren1.SetBackground(1,1,1)
renWin.SetSize(300,150)
ren1.ResetCamera()
ren1.GetActiveCamera().Zoom(2.5)
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Core/Testing/Python/TestTriangleFilter.py | Python | gpl-3.0 | 2,623 | [
"VTK"
] | d94be52e1a7b69d715dfdf8fb578e63be87c451b6deb4f3631a64735d28b9689 |
from __future__ import print_function
import ast
import os
import codecs
from setuptools import setup
class VersionFinder(ast.NodeVisitor):
def __init__(self):
self.version = None
def visit_Assign(self, node):
if node.targets[0].id == '__version__':
self.version = node.value.s
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*parts):
finder = VersionFinder()
finder.visit(ast.parse(read(*parts)))
return finder.version
setup(
name="django-configurations",
version=find_version("configurations", "__init__.py"),
url='https://django-configurations.readthedocs.io/',
license='BSD',
description="A helper for organizing Django settings.",
long_description=read('README.rst'),
author='Jannis Leidel',
author_email='jannis@leidel.info',
packages=['configurations'],
entry_points={
'console_scripts': [
'django-cadmin = configurations.management:execute_from_command_line',
],
},
extras_require={
'cache': ['django-cache-url'],
'database': ['dj-database-url'],
'email': ['dj-email-url'],
'search': ['dj-search-url'],
'testing': [
'django-discover-runner',
'mock',
'django-cache-url>=1.0.0',
'dj-database-url',
'dj-email-url',
'dj-search-url',
'six',
'Sphinx>=1.4',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Utilities',
],
zip_safe=False,
)
| jezdez/django-configurations | setup.py | Python | bsd-3-clause | 2,117 | [
"VisIt"
] | 3b47a2d990c81fb33f3bb7a973d5608c33f2f75a4450c2f46871ff0ac79ac8a5 |
import unittest
import itertools
from functools import partial
import numpy
from tvtk.api import tvtk
from simphony.cuds.mesh import Mesh, Point, Face, Edge, Cell
from simphony.core.data_container import DataContainer
from simphony.core.cuba import CUBA
from simphony.testing.abc_check_mesh import (
CheckMeshContainer, CheckMeshItemOperations,
CheckMeshPointOperations, CheckMeshEdgeOperations,
CheckMeshFaceOperations, CheckMeshCellOperations)
from simphony.testing.utils import (create_points, compare_data_containers,
compare_points, compare_elements)
from simphony_mayavi.cuds.api import VTKMesh
from simphony_mayavi.core.api import supported_cuba as mayavi_supported_cuba
def vtk_compare_points(point, reference, msg=None, testcase=None):
''' use numpy.allclose to compare point coordinates retrieved
from vtk dataset with the reference as vtk casts coordinates to
double-precision floats and precision errors may be introduced
during casting
'''
self = testcase
self.assertEqual(point.uid, reference.uid)
if not numpy.allclose(point.coordinates, reference.coordinates):
error_message = "{} != {}"
self.failureException(error_message.format(point, reference))
compare_data_containers(point.data, reference.data, testcase=self)
class TestVTKMeshContainer(CheckMeshContainer, unittest.TestCase):
def supported_cuba(self):
return set(CUBA)
def container_factory(self, name):
return VTKMesh(name=name)
class TestVTKMeshPointOperations(CheckMeshPointOperations, unittest.TestCase):
def setUp(self):
CheckMeshItemOperations.setUp(self)
self.addTypeEqualityFunc(
Point, partial(vtk_compare_points, testcase=self))
def create_items(self):
# for simphony-common < 0.2.4
# https://github.com/simphony/simphony-common/issues/217
return create_points(restrict=self.supported_cuba())
def supported_cuba(self):
return mayavi_supported_cuba()
def container_factory(self, name):
return VTKMesh(name=name)
class TestVTKMeshEdgeOperations(CheckMeshEdgeOperations, unittest.TestCase):
def supported_cuba(self):
return mayavi_supported_cuba()
def container_factory(self, name):
container = VTKMesh(name=name)
return container
class TestVTKMeshFaceOperations(CheckMeshFaceOperations, unittest.TestCase):
def supported_cuba(self):
return mayavi_supported_cuba()
def container_factory(self, name):
container = VTKMesh(name=name)
return container
class TestVTKMeshCellOperations(CheckMeshCellOperations, unittest.TestCase):
def supported_cuba(self):
return mayavi_supported_cuba()
def container_factory(self, name):
container = VTKMesh(name=name)
return container
class TestVTKMesh(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(
Point, partial(compare_points, testcase=self))
self.addTypeEqualityFunc(
Edge, partial(compare_elements, testcase=self))
self.addTypeEqualityFunc(
Face, partial(compare_elements, testcase=self))
self.addTypeEqualityFunc(
Cell, partial(compare_elements, testcase=self))
self.points = numpy.array([
[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1],
[2, 0, 0], [3, 0, 0], [3, 1, 0], [2, 1, 0],
[2, 0, 1], [3, 0, 1], [3, 1, 1], [2, 1, 1]],
'f')
self.cells = [
[0, 1, 2, 3], # tetra
[4, 5, 6, 7, 8, 9, 10, 11]] # hex
self.faces = [[2, 7, 11]]
self.edges = [[1, 4], [3, 8]]
def test_initialization_from_cuds(self):
# given
count = itertools.count()
points = [
Point(coordinates=point, data=DataContainer(TEMPERATURE=index))
for index, point in enumerate(self.points)]
container = Mesh('test')
container.add(points)
faces = [
Face(
points=[points[index].uid for index in face],
data=DataContainer(TEMPERATURE=next(count)))
for face in self.faces]
edges = [
Edge(
points=[points[index].uid for index in edge],
data=DataContainer(TEMPERATURE=next(count)))
for edge in self.edges]
cells = [
Cell(
points=[points[index].uid for index in cell],
data=DataContainer(TEMPERATURE=next(count)))
for cell in self.cells]
container.add(edges)
container.add(faces)
container.add(cells)
# when
vtk_container = VTKMesh.from_mesh(container)
# then
self.assertEqual(vtk_container.name, container.name)
self.assertEqual(sum(1 for _ in vtk_container.iter(
item_type=CUBA.POINT)), 12)
self.assertEqual(sum(1 for _ in vtk_container.iter(
item_type=CUBA.EDGE
)), 2)
self.assertEqual(sum(1 for _ in vtk_container.iter(
item_type=CUBA.FACE
)), 1)
self.assertEqual(sum(1 for _ in vtk_container.iter(
item_type=CUBA.CELL
)), 2)
for point in points:
self.assertEqual(vtk_container.get(point.uid), point)
for edge in edges:
self.assertEqual(vtk_container.get(edge.uid), edge)
for face in faces:
self.assertEqual(vtk_container.get(face.uid), face)
for cell in cells:
self.assertEqual(vtk_container.get(cell.uid), cell)
def test_initialization_from_empty_cuds(self):
# given
container = Mesh('test')
# when
vtk_container = VTKMesh.from_mesh(container)
# then
self.assertEqual(vtk_container.name, container.name)
self.assertEqual(sum(1 for _ in vtk_container.iter(
item_type=CUBA.POINT
)), 0)
self.assertEqual(sum(1 for _ in vtk_container.iter(
item_type=CUBA.EDGE
)), 0)
self.assertEqual(sum(1 for _ in vtk_container.iter(
item_type=CUBA.FACE
)), 0)
self.assertEqual(sum(1 for _ in vtk_container.iter(
item_type=CUBA.CELL
)), 0)
def test_initialization_from_data_set(self):
# given
data_set = tvtk.UnstructuredGrid()
# set points
data_set.points = self.points
# set cells
cell_array = tvtk.CellArray()
tetra_type = tvtk.Tetra().cell_type
hex_type = tvtk.Hexahedron().cell_type
edge_type = tvtk.Line().cell_type
triangle_type = tvtk.Triangle().cell_type
cells = (
[2] + self.edges[0] + [2] + self.edges[1] + [3] + self.faces[0] +
[4] + self.cells[0] + [8] + self.cells[1])
cell_types = numpy.array(
[edge_type, edge_type, triangle_type, tetra_type, hex_type])
offset = numpy.array([0, 3, 6, 10, 15])
cell_array.set_cells(5, cells)
data_set.set_cells(cell_types, offset, cell_array)
# set point data
index = data_set.point_data.add_array(numpy.arange(len(self.points)))
data_set.point_data.get_array(index).name = CUBA.TEMPERATURE.name
# set cell data
index = data_set.cell_data.add_array(numpy.arange(len(cells)))
data_set.cell_data.get_array(index).name = CUBA.TEMPERATURE.name
# when
vtk_container = VTKMesh.from_dataset('test', data_set=data_set)
# then
self.assertEqual(vtk_container.count_of(CUBA.POINT), 12)
self.assertEqual(vtk_container.count_of(CUBA.EDGE), 2)
self.assertEqual(vtk_container.count_of(CUBA.FACE), 1)
self.assertEqual(vtk_container.count_of(CUBA.CELL), 2)
index2point = vtk_container.index2point
point2index = vtk_container.point2index
for index, uid in index2point.items():
point = vtk_container.get(uid)
point.uid = None
self.assertEqual(
point,
Point(
coordinates=self.points[index],
data=DataContainer(TEMPERATURE=index)))
for edge in vtk_container.iter(item_type=CUBA.EDGE):
edge.uid = None
links = [point2index[uid] for uid in edge.points]
index = self.edges.index(links)
self.assertEqual(
edge,
Edge(
points=[index2point[i] for i in self.edges[index]],
data=DataContainer(TEMPERATURE=index)))
for face in vtk_container.iter(item_type=CUBA.FACE):
face.uid = None
self.assertEqual(
face,
Face(
points=[index2point[i] for i in self.faces[0]],
data=DataContainer(TEMPERATURE=2.0)))
for cell in vtk_container.iter(item_type=CUBA.CELL):
cell.uid = None
links = [point2index[uid] for uid in cell.points]
index = self.cells.index(links)
self.assertEqual(
cell,
Cell(
points=[index2point[i] for i in self.cells[index]],
data=DataContainer(TEMPERATURE=index + 3.0)))
if __name__ == '__main__':
unittest.main()
| simphony/simphony-mayavi | simphony_mayavi/cuds/tests/test_vtk_mesh.py | Python | bsd-2-clause | 9,374 | [
"VTK"
] | 9dd8d28fa48687b2bed921bc685856f34b2fdc1f7461b4bc032e55eb20f08998 |
########################################################################
# File : CLI.py
# Author : Andrei Tsaregorodtsev
########################################################################
""" CLI is the base class for all the DIRAC consoles ( CLIs ). It contains
several utilities and signal handlers of general purpose.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import six
import cmd
import sys
import os
import signal
from DIRAC import gLogger
gColors = {'red': 1, 'green': 2, 'yellow': 3, 'blue': 4}
def colorEnabled():
return os.environ.get('TERM') in ('xterm', 'xterm-color')
def colorize(text, color):
"""Return colorized text"""
if not colorEnabled():
return text
startCode = '\033[;3'
endCode = '\033[0m'
if isinstance(color, six.integer_types):
return "%s%sm%s%s" % (startCode, color, text, endCode)
try:
return "%s%sm%s%s" % (startCode, gColors[color], text, endCode)
except Exception:
return text
class CLI(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.indentSpace = 20
self._initSignals()
def _handleSignal(self, sig, frame):
print("\nReceived signal", sig, ", exiting ...")
self.do_quit(self)
def _initSignals(self):
"""
Registers signal handlers
"""
for sigNum in (signal.SIGINT, signal.SIGQUIT, signal.SIGKILL, signal.SIGTERM):
try:
signal.signal(sigNum, self._handleSignal)
except Exception:
pass
def _errMsg(self, errMsg):
"""
Print out a colorized error log message
:param str errMsg: error message string
:return: nothing
"""
gLogger.error("%s %s" % (colorize("[ERROR]", "red"), errMsg))
def emptyline(self):
pass
def do_exit(self, args):
""" Exit the shell.
usage: exit
"""
self.do_quit(self)
def do_quit(self, args):
""" Exit the shell.
usage: quit
"""
gLogger.notice('')
sys.exit(0)
def do_EOF(self, args):
""" Handler for EOF ( Ctrl D ) signal - perform quit
"""
self.do_quit(args)
def do_execfile(self, args):
""" Execute a series of CLI commands from a given file
usage:
execfile <filename>
"""
argss = args.split()
fname = argss[0]
if not os.path.exists(fname):
print("Error: File not found %s" % fname)
return
with open(fname, "r") as input_cmd:
contents = input_cmd.readlines()
for line in contents:
try:
gLogger.notice("\n--> Executing %s\n" % line)
self.onecmd(line)
except Exception as error:
self._errMsg(str(error))
break
return
def printPair(self, key, value, separator=":"):
valueList = value.split("\n")
print("%s%s%s %s" % (key, " " * (self.indentSpace - len(key)), separator, valueList[0].strip()))
for valueLine in valueList[1:-1]:
print("%s %s" % (" " * self.indentSpace, valueLine.strip()))
def do_help(self, args):
"""
Shows help information
Usage: help <command>
If no command is specified all commands are shown
"""
if len(args) == 0:
print("\nAvailable commands:\n")
attrList = sorted(dir(self))
for attribute in attrList:
if attribute.startswith("do_"):
self.printPair(attribute[3:], getattr(self, attribute).__doc__[1:])
print("")
else:
command = args.split()[0].strip()
try:
obj = getattr(self, "do_%s" % command)
except Exception:
print("There's no such %s command" % command)
return
self.printPair(command, obj.__doc__[1:])
| yujikato/DIRAC | src/DIRAC/Core/Base/CLI.py | Python | gpl-3.0 | 3,675 | [
"DIRAC"
] | 00aaac4323daaeb2904bae6efb8270776d4fe37010331cce1ff0e80d11d7be78 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Brian McClure
#
# django-media-helper is free software under terms of the MIT License.
#
# Fork of django-cleanup
from setuptools import setup, find_packages
description = """# **django-media-helper** #
When dealing with content from unacquainted sources(e.g., clients or designers)
one often gets images with absurd dimensions and/or filesizes: A 3000px-wide
play-button, a 10MB logo, etc. Media-helper attempts to mitigate this problem
by automating image-resizing, delivering the most appropriately sized image to
the browser.
It is also designed to be dropped into existing projects with minimal effort.
It's still in the alpha stage, but if you're careful it might make your life a
little bit easier while also speeding up your load times and reducing data
transfer."""
setup(
name = 'django-media-helper',
version = '0.3.2',
packages = find_packages(),
include_package_data=True,
requires = ['python (>= 2.7)', 'django (>= 1.8)', 'Pillow (>= 2.1.0)'],
description = 'An image resizing and management app for Django',
long_description = description,
author = 'Brian McClure',
author_email = 'django@jetbrains.com',
url = 'https://github.com/brmc/django-media-helper',
download_url = 'https://github.com/brmc/django-media-helper.git',
license = 'MIT License',
keywords = 'django, imaging, ajax',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| brmc/django-media-helper | setup.py | Python | mit | 1,794 | [
"Brian"
] | 76d0e0cb84bdd800be36c4c8b5569ff771ad37ad062c11c70303104add928140 |
# -*- coding: utf-8 -*-
import ast
import base64
import csv
import glob
import itertools
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import time
import urllib2
import xmlrpclib
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
import openerp
import openerp.modules.registry
from openerp.tools.translate import _
from .. import http
openerpweb = http
#----------------------------------------------------------
# OpenERP Web helpers
#----------------------------------------------------------
def rjsmin(script):
""" Minify js with a clever regex.
Taken from http://opensource.perlig.de/rjsmin
Apache License, Version 2.0 """
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
(groups[6] and ' ') or
(groups[7] and ' ') or
''
)
result = re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#'
r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^'
r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|'
r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0'
r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:'
r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
return result
def db_list(req):
proxy = req.session.proxy("db")
dbs = proxy.list()
h = req.httprequest.environ['HTTP_HOST'].split(':')[0]
d = h.split('.')[0]
r = openerp.tools.config['dbfilter'].replace('%h', h).replace('%d', d)
dbs = [i for i in dbs if re.match(r, i)]
return dbs
def db_monodb(req):
# if only one db exists, return it else return False
try:
dbs = db_list(req)
if len(dbs) == 1:
return dbs[0]
except xmlrpclib.Fault:
# ignore access denied
pass
return False
def module_topological_sort(modules):
""" Return a list of module names sorted so that their dependencies of the
modules are listed before the module itself
modules is a dict of {module_name: dependencies}
:param modules: modules to sort
:type modules: dict
:returns: list(str)
"""
dependencies = set(itertools.chain.from_iterable(modules.itervalues()))
# incoming edge: dependency on other module (if a depends on b, a has an
# incoming edge from b, aka there's an edge from b to a)
# outgoing edge: other module depending on this one
# [Tarjan 1976], http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
#L ← Empty list that will contain the sorted nodes
L = []
#S ← Set of all nodes with no outgoing edges (modules on which no other
# module depends)
S = set(module for module in modules if module not in dependencies)
visited = set()
#function visit(node n)
def visit(n):
#if n has not been visited yet then
if n not in visited:
#mark n as visited
visited.add(n)
#change: n not web module, can not be resolved, ignore
if n not in modules: return
#for each node m with an edge from m to n do (dependencies of n)
for m in modules[n]:
#visit(m)
visit(m)
#add n to L
L.append(n)
#for each node n in S do
for n in S:
#visit(n)
visit(n)
return L
def module_installed(req):
# Candidates module the current heuristic is the /static dir
loadable = openerpweb.addons_manifest.keys()
modules = {}
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = req.session.model('ir.module.module')
domain = [('state','=','installed'), ('name','in', loadable)]
for module in Modules.search_read(domain, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = req.session.model('ir.module.module.dependency').read(deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
loadable = openerpweb.addons_manifest.keys()
modules = {}
try:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
m = registry.get('ir.module.module')
# TODO The following code should move to ir.module.module.list_installed_modules()
domain = [('state','=','installed'), ('name','in', loadable)]
ids = m.search(cr, 1, [('state','=','installed'), ('name','in', loadable)])
for module in m.read(cr, 1, ids, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = registry.get('ir.module.module.dependency').read(cr, 1, deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
except Exception,e:
pass
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_boot(req, db=None):
server_wide_modules = openerp.conf.server_wide_modules or ['web']
serverside = []
dbside = []
for i in server_wide_modules:
if i in openerpweb.addons_manifest:
serverside.append(i)
monodb = db or db_monodb(req)
if monodb:
dbside = module_installed_bypass_session(monodb)
dbside = [i for i in dbside if i not in serverside]
addons = serverside + dbside
return addons
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
xml = ElementTree.parse(fp).getroot()
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def concat_files(file_list, reader=None, intersperse=""):
""" Concatenates contents of all provided files
:param list(str) file_list: list of files to check
:param function reader: reading procedure for each file
:param str intersperse: string to intersperse between file contents
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
if reader is None:
def reader(f):
with open(f, 'rb') as fp:
return fp.read()
files_content = []
for fname in file_list:
contents = reader(fname)
checksum.update(contents)
files_content.append(contents)
files_concat = intersperse.join(files_content)
return files_concat, checksum.hexdigest()
concat_js_cache = {}
def concat_js(file_list):
content, checksum = concat_files(file_list, intersperse=';')
if checksum in concat_js_cache:
content = concat_js_cache[checksum]
else:
content = rjsmin(content)
concat_js_cache[checksum] = content
return content, checksum
def fs2web(path):
"""convert FS path into web path"""
return '/'.join(path.split(os.path.sep))
def manifest_glob(req, extension, addons=None, db=None):
if addons is None:
addons = module_boot(req, db=db)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = openerpweb.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(extension, [])
for pattern in globlist:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append((path, fs2web(path[len(addons_path):])))
return r
def manifest_list(req, extension, mods=None, db=None):
if not req.debug:
path = '/web/webclient/' + extension
if mods is not None:
path += '?mods=' + mods
elif db:
path += '?db=' + db
return [path]
files = manifest_glob(req, extension, addons=mods, db=db)
i_am_diabetic = req.httprequest.environ["QUERY_STRING"].count("no_sugar") >= 1 or \
req.httprequest.environ.get('HTTP_REFERER', '').count("no_sugar") >= 1
if i_am_diabetic:
return [wp for _fp, wp in files]
else:
return ['%s?debug=%s' % (wp, os.path.getmtime(fp)) for fp, wp in files]
def get_last_modified(files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(req, response, last_modified=None, etag=None):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param req: OpenERP request
:type req: web.common.http.WebRequest
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = 0
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(req.httprequest)
def login_and_redirect(req, db, login, key, redirect_url='/'):
wsgienv = req.httprequest.environ
env = dict(
base_location=req.httprequest.url_root.rstrip('/'),
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
req.session.authenticate(db, login, key, env)
return set_cookie_and_redirect(req, redirect_url)
def set_cookie_and_redirect(req, redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
cookie_val = urllib2.quote(simplejson.dumps(req.session_id))
redirect.set_cookie('instance0|session_id', cookie_val)
return redirect
def load_actions_from_ir_values(req, key, key2, models, meta):
Values = req.session.model('ir.values')
actions = Values.get(key, key2, models, meta, req.context)
return [(id, name, clean_action(req, action))
for id, name, action in actions]
def clean_action(req, action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def content_disposition(filename, req):
filename = filename.encode('utf8')
escaped = urllib2.quote(filename)
browser = req.httprequest.user_agent.browser
version = int((req.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "attachment; filename=%s" % escaped
elif browser == 'safari':
return "attachment; filename=%s" % filename
else:
return "attachment; filename*=UTF-8''%s" % escaped
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
html_template = """<!DOCTYPE html>
<html style="height: 100%%">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>OpenERP</title>
<link rel="shortcut icon" href="/web/static/src/img/favicon.ico" type="image/x-icon"/>
<link rel="stylesheet" href="/web/static/src/css/full.css" />
%(css)s
%(js)s
<script type="text/javascript">
$(function() {
var s = new openerp.init(%(modules)s);
%(init)s
});
</script>
</head>
<body>
<!--[if lte IE 8]>
<script src="http://ajax.googleapis.com/ajax/libs/chrome-frame/1/CFInstall.min.js"></script>
<script>CFInstall.check({mode: "overlay"});</script>
<![endif]-->
</body>
</html>
"""
class Home(openerpweb.Controller):
_cp_path = '/'
@openerpweb.httprequest
def index(self, req, s_action=None, db=None, **kw):
js = "\n ".join('<script type="text/javascript" src="%s"></script>' % i for i in manifest_list(req, 'js', db=db))
css = "\n ".join('<link rel="stylesheet" href="%s">' % i for i in manifest_list(req, 'css', db=db))
r = html_template % {
'js': js,
'css': css,
'modules': simplejson.dumps(module_boot(req, db=db)),
'init': 'var wc = new s.web.WebClient();wc.appendTo($(document.body));'
}
return r
@openerpweb.httprequest
def login(self, req, db, login, key):
return login_and_redirect(req, db, login, key)
class WebClient(openerpweb.Controller):
_cp_path = "/web/webclient"
@openerpweb.jsonrequest
def csslist(self, req, mods=None):
return manifest_list(req, 'css', mods=mods)
@openerpweb.jsonrequest
def jslist(self, req, mods=None):
return manifest_list(req, 'js', mods=mods)
@openerpweb.jsonrequest
def qweblist(self, req, mods=None):
return manifest_list(req, 'qweb', mods=mods)
@openerpweb.httprequest
def css(self, req, mods=None, db=None):
files = list(manifest_glob(req, 'css', addons=mods, db=db))
last_modified = get_last_modified(f[0] for f in files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
file_map = dict(files)
rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U)
rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://|data:)""", re.U)
def reader(f):
"""read the a css file and absolutify all relative uris"""
with open(f, 'rb') as fp:
data = fp.read().decode('utf-8')
path = file_map[f]
web_dir = os.path.dirname(path)
data = re.sub(
rx_import,
r"""@import \1%s/""" % (web_dir,),
data,
)
data = re.sub(
rx_url,
r"""url(\1%s/""" % (web_dir,),
data,
)
return data.encode('utf-8')
content, checksum = concat_files((f[0] for f in files), reader)
return make_conditional(
req, req.make_response(content, [('Content-Type', 'text/css')]),
last_modified, checksum)
@openerpweb.httprequest
def js(self, req, mods=None, db=None):
files = [f[0] for f in manifest_glob(req, 'js', addons=mods, db=db)]
last_modified = get_last_modified(files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_js(files)
return make_conditional(
req, req.make_response(content, [('Content-Type', 'application/javascript')]),
last_modified, checksum)
@openerpweb.httprequest
def qweb(self, req, mods=None, db=None):
files = [f[0] for f in manifest_glob(req, 'qweb', addons=mods, db=db)]
last_modified = get_last_modified(files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_xml(files)
return make_conditional(
req, req.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@openerpweb.jsonrequest
def bootstrap_translations(self, req, mods):
""" Load local translations from *.po files, as a temporary solution
until we have established a valid session. This is meant only
for translating the login page and db management chrome, using
the browser's language. """
# For performance reasons we only load a single translation, so for
# sub-languages (that should only be partially translated) we load the
# main language PO instead - that should be enough for the login screen.
lang = req.lang.split('_')[0]
translations_per_module = {}
for addon_name in mods:
if openerpweb.addons_manifest[addon_name].get('bootstrap'):
addons_path = openerpweb.addons_manifest[addon_name]['addons_path']
f_name = os.path.join(addons_path, addon_name, "i18n", lang + ".po")
if not os.path.exists(f_name):
continue
translations_per_module[addon_name] = {'messages': _local_web_translations(f_name)}
return {"modules": translations_per_module,
"lang_parameters": None}
@openerpweb.jsonrequest
def translations(self, req, mods, lang):
res_lang = req.session.model('res.lang')
ids = res_lang.search([("code", "=", lang)])
lang_params = None
if ids:
lang_params = res_lang.read(ids[0], ["direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep"])
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
ir_translation = req.session.model('ir.translation')
translations_per_module = {}
messages = ir_translation.search_read([('module','in',mods),('lang','=',lang),
('comments','like','openerp-web'),('value','!=',False),
('value','!=','')],
['module','src','value','lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod,{'messages':[]})
translations_per_module[mod]['messages'].extend({'id': m['src'],
'string': m['value']} \
for m in msg_group)
return {"modules": translations_per_module,
"lang_parameters": lang_params}
@openerpweb.jsonrequest
def version_info(self, req):
return openerp.service.web_services.RPC_VERSION_1
class Proxy(openerpweb.Controller):
_cp_path = '/web/proxy'
@openerpweb.jsonrequest
def load(self, req, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param req: OpenERP request
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
return Client(req.httprequest.app, BaseResponse).get(path).data
class Database(openerpweb.Controller):
_cp_path = "/web/database"
@openerpweb.jsonrequest
def get_list(self, req):
return db_list(req)
@openerpweb.jsonrequest
def create(self, req, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
return req.session.proxy("db").create_database(
params['super_admin_pwd'],
params['db_name'],
bool(params.get('demo_data')),
params['db_lang'],
params['create_admin_pwd'])
@openerpweb.jsonrequest
def duplicate(self, req, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
return req.session.proxy("db").duplicate_database(
params['super_admin_pwd'],
params['db_original_name'],
params['db_name'])
@openerpweb.jsonrequest
def duplicate(self, req, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
duplicate_attrs = (
params['super_admin_pwd'],
params['db_original_name'],
params['db_name'],
)
return req.session.proxy("db").duplicate_database(*duplicate_attrs)
@openerpweb.jsonrequest
def drop(self, req, fields):
password, db = operator.itemgetter(
'drop_pwd', 'drop_db')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return req.session.proxy("db").drop(password, db)
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
return {'error': e.faultCode, 'title': 'Drop Database'}
return {'error': _('Could not drop database !'), 'title': _('Drop Database')}
@openerpweb.httprequest
def backup(self, req, backup_db, backup_pwd, token):
try:
db_dump = base64.b64decode(
req.session.proxy("db").dump(backup_pwd, backup_db))
filename = "%(db)s_%(timestamp)s.dump" % {
'db': backup_db,
'timestamp': datetime.datetime.utcnow().strftime(
"%Y-%m-%d_%H-%M-%SZ")
}
return req.make_response(db_dump,
[('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', content_disposition(filename, req))],
{'fileToken': int(token)}
)
except xmlrpclib.Fault, e:
return simplejson.dumps([[],[{'error': e.faultCode, 'title': _('Backup Database')}]])
@openerpweb.httprequest
def restore(self, req, db_file, restore_pwd, new_db):
try:
data = base64.b64encode(db_file.read())
req.session.proxy("db").restore(restore_pwd, new_db, data)
return ''
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
raise Exception("AccessDenied")
@openerpweb.jsonrequest
def change_password(self, req, fields):
old_password, new_password = operator.itemgetter(
'old_pwd', 'new_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return req.session.proxy("db").change_admin_password(old_password, new_password)
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
return {'error': e.faultCode, 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
class Session(openerpweb.Controller):
_cp_path = "/web/session"
def session_info(self, req):
req.session.ensure_valid()
return {
"session_id": req.session_id,
"uid": req.session._uid,
"user_context": req.session.get_context() if req.session._uid else {},
"db": req.session._db,
"username": req.session._login,
}
@openerpweb.jsonrequest
def get_session_info(self, req):
return self.session_info(req)
@openerpweb.jsonrequest
def authenticate(self, req, db, login, password, base_location=None):
wsgienv = req.httprequest.environ
env = dict(
base_location=base_location,
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
req.session.authenticate(db, login, password, env)
return self.session_info(req)
@openerpweb.jsonrequest
def change_password (self,req,fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':_('You cannot leave any password empty.'),'title': _('Change Password')}
if new_password != confirm_password:
return {'error': _('The new password and its confirmation must be identical.'),'title': _('Change Password')}
try:
if req.session.model('res.users').change_password(
old_password, new_password):
return {'new_password':new_password}
except Exception:
return {'error': _('The old password you provided is incorrect, your password was not changed.'), 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
@openerpweb.jsonrequest
def sc_list(self, req):
return req.session.model('ir.ui.view_sc').get_sc(
req.session._uid, "ir.ui.menu", req.context)
@openerpweb.jsonrequest
def get_lang_list(self, req):
try:
return req.session.proxy("db").list_lang() or []
except Exception, e:
return {"error": e, "title": _("Languages")}
@openerpweb.jsonrequest
def modules(self, req):
# return all installed modules. Web client is smart enough to not load a module twice
return module_installed(req)
@openerpweb.jsonrequest
def save_session_action(self, req, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
saved_actions = req.httpsession.get('saved_actions')
if not saved_actions:
saved_actions = {"next":0, "actions":{}}
req.httpsession['saved_actions'] = saved_actions
# we don't allow more than 10 stored actions
if len(saved_actions["actions"]) >= 10:
del saved_actions["actions"][min(saved_actions["actions"])]
key = saved_actions["next"]
saved_actions["actions"][key] = the_action
saved_actions["next"] = key + 1
return key
@openerpweb.jsonrequest
def get_session_action(self, req, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
saved_actions = req.httpsession.get('saved_actions')
if not saved_actions:
return None
return saved_actions["actions"].get(key)
@openerpweb.jsonrequest
def check(self, req):
req.session.assert_valid()
return None
@openerpweb.jsonrequest
def destroy(self, req):
req.session._suicide = True
class Menu(openerpweb.Controller):
_cp_path = "/web/menu"
@openerpweb.jsonrequest
def get_user_roots(self, req):
""" Return all root menu ids visible for the session user.
:param req: A request object, with an OpenERP session attribute
:type req: < session -> OpenERPSession >
:return: the root menu ids
:rtype: list(int)
"""
s = req.session
Menus = s.model('ir.ui.menu')
# If a menu action is defined use its domain to get the root menu items
user_menu_id = s.model('res.users').read([s._uid], ['menu_id'],
req.context)[0]['menu_id']
menu_domain = [('parent_id', '=', False)]
if user_menu_id:
domain_string = s.model('ir.actions.act_window').read(
[user_menu_id[0]], ['domain'],req.context)[0]['domain']
if domain_string:
menu_domain = ast.literal_eval(domain_string)
return Menus.search(menu_domain, 0, False, False, req.context)
@openerpweb.jsonrequest
def load(self, req):
""" Loads all menu items (all applications and their sub-menus).
:param req: A request object, with an OpenERP session attribute
:type req: < session -> OpenERPSession >
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
Menus = req.session.model('ir.ui.menu')
fields = ['name', 'sequence', 'parent_id', 'action']
menu_root_ids = self.get_user_roots(req)
menu_roots = Menus.read(menu_root_ids, fields, req.context) if menu_root_ids else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
if not menu_roots:
return menu_root
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menu_ids = Menus.search([('id', 'child_of', menu_root_ids)], 0, False, False, req.context)
menu_items = Menus.read(menu_ids, fields, req.context)
# adds roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots)
menu_root['all_menu_ids'] = menu_ids # includes menu_root_ids!
# make a tree using parent_id
menu_items_map = dict(
(menu_item["id"], menu_item) for menu_item in menu_items)
for menu_item in menu_items:
if menu_item['parent_id']:
parent = menu_item['parent_id'][0]
else:
parent = False
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item)
# sort by sequence a tree using parent_id
for menu_item in menu_items:
menu_item.setdefault('children', []).sort(
key=operator.itemgetter('sequence'))
return menu_root
@openerpweb.jsonrequest
def load_needaction(self, req, menu_ids):
""" Loads needaction counters for specific menu ids.
:return: needaction data
:rtype: dict(menu_id: {'needaction_enabled': boolean, 'needaction_counter': int})
"""
return req.session.model('ir.ui.menu').get_needaction_data(menu_ids, req.context)
@openerpweb.jsonrequest
def action(self, req, menu_id):
# still used by web_shortcut
actions = load_actions_from_ir_values(req,'action', 'tree_but_open',
[('ir.ui.menu', menu_id)], False)
return {"action": actions}
class DataSet(openerpweb.Controller):
_cp_path = "/web/dataset"
@openerpweb.jsonrequest
def search_read(self, req, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(req, model, fields, offset, limit, domain, sort)
def do_search_read(self, req, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param req: a JSON-RPC request object
:type req: openerpweb.JsonRequest
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = req.session.model(model)
ids = Model.search(domain, offset or 0, limit or False, sort or False,
req.context)
if limit and len(ids) == limit:
length = Model.search_count(domain, req.context)
else:
length = len(ids) + (offset or 0)
if fields and fields == ['id']:
# shortcut read if we only want the ids
return {
'length': length,
'records': [{'id': id} for id in ids]
}
records = Model.read(ids, fields or False, req.context)
records.sort(key=lambda obj: ids.index(obj['id']))
return {
'length': length,
'records': records
}
@openerpweb.jsonrequest
def load(self, req, model, id, fields):
m = req.session.model(model)
value = {}
r = m.read([id], False, req.context)
if r:
value = r[0]
return {'value': value}
def call_common(self, req, model, method, args, domain_id=None, context_id=None):
return self._call_kw(req, model, method, args, {})
def _call_kw(self, req, model, method, args, kwargs):
# Temporary implements future display_name special field for model#read()
if method == 'read' and kwargs.get('context', {}).get('future_display_name'):
if 'display_name' in args[1]:
names = dict(req.session.model(model).name_get(args[0], **kwargs))
args[1].remove('display_name')
records = req.session.model(model).read(*args, **kwargs)
for record in records:
record['display_name'] = \
names.get(record['id']) or "%s#%d" % (model, (record['id']))
return records
return getattr(req.session.model(model), method)(*args, **kwargs)
@openerpweb.jsonrequest
def call(self, req, model, method, args, domain_id=None, context_id=None):
return self._call_kw(req, model, method, args, {})
@openerpweb.jsonrequest
def call_kw(self, req, model, method, args, kwargs):
return self._call_kw(req, model, method, args, kwargs)
@openerpweb.jsonrequest
def call_button(self, req, model, method, args, domain_id=None, context_id=None):
action = self._call_kw(req, model, method, args, {})
if isinstance(action, dict) and action.get('type') != '':
return clean_action(req, action)
return False
@openerpweb.jsonrequest
def exec_workflow(self, req, model, id, signal):
return req.session.exec_workflow(model, id, signal)
@openerpweb.jsonrequest
def resequence(self, req, model, ids, field='sequence', offset=0):
""" Re-sequences a number of records in the model, by their ids
The re-sequencing starts at the first model of ``ids``, the sequence
number is incremented by one after each record and starts at ``offset``
:param ids: identifiers of the records to resequence, in the new sequence order
:type ids: list(id)
:param str field: field used for sequence specification, defaults to
"sequence"
:param int offset: sequence number for first record in ``ids``, allows
starting the resequencing from an arbitrary number,
defaults to ``0``
"""
m = req.session.model(model)
if not m.fields_get([field]):
return False
# python 2.6 has no start parameter
for i, id in enumerate(ids):
m.write(id, { field: i + offset })
return True
class View(openerpweb.Controller):
_cp_path = "/web/view"
@openerpweb.jsonrequest
def add_custom(self, req, view_id, arch):
CustomView = req.session.model('ir.ui.view.custom')
CustomView.create({
'user_id': req.session._uid,
'ref_id': view_id,
'arch': arch
}, req.context)
return {'result': True}
@openerpweb.jsonrequest
def undo_custom(self, req, view_id, reset=False):
CustomView = req.session.model('ir.ui.view.custom')
vcustom = CustomView.search([('user_id', '=', req.session._uid), ('ref_id' ,'=', view_id)],
0, False, False, req.context)
if vcustom:
if reset:
CustomView.unlink(vcustom, req.context)
else:
CustomView.unlink([vcustom[0]], req.context)
return {'result': True}
return {'result': False}
class TreeView(View):
_cp_path = "/web/treeview"
@openerpweb.jsonrequest
def action(self, req, model, id):
return load_actions_from_ir_values(
req,'action', 'tree_but_open',[(model, id)],
False)
class Binary(openerpweb.Controller):
_cp_path = "/web/binary"
@openerpweb.httprequest
def image(self, req, model, id, field, **kw):
last_update = '__last_update'
Model = req.session.model(model)
headers = [('Content-Type', 'image/png')]
etag = req.httprequest.headers.get('If-None-Match')
hashed_session = hashlib.md5(req.session_id).hexdigest()
id = None if not id else simplejson.loads(id)
if type(id) is list:
id = id[0] # m2o
if etag:
if not id and hashed_session == etag:
return werkzeug.wrappers.Response(status=304)
else:
date = Model.read([id], [last_update], req.context)[0].get(last_update)
if hashlib.md5(date).hexdigest() == etag:
return werkzeug.wrappers.Response(status=304)
retag = hashed_session
try:
if not id:
res = Model.default_get([field], req.context).get(field)
image_base64 = res
else:
res = Model.read([id], [last_update, field], req.context)[0]
retag = hashlib.md5(res.get(last_update)).hexdigest()
image_base64 = res.get(field)
if kw.get('resize'):
resize = kw.get('resize').split(',')
if len(resize) == 2 and int(resize[0]) and int(resize[1]):
width = int(resize[0])
height = int(resize[1])
# resize maximum 500*500
if width > 500: width = 500
if height > 500: height = 500
image_base64 = openerp.tools.image_resize_image(base64_source=image_base64, size=(width, height), encoding='base64', filetype='PNG')
image_data = base64.b64decode(image_base64)
except (TypeError, xmlrpclib.Fault):
image_data = self.placeholder(req)
headers.append(('ETag', retag))
headers.append(('Content-Length', len(image_data)))
try:
ncache = int(kw.get('cache'))
headers.append(('Cache-Control', 'no-cache' if ncache == 0 else 'max-age=%s' % (ncache)))
except:
pass
return req.make_response(image_data, headers)
def placeholder(self, req, image='placeholder.png'):
addons_path = openerpweb.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', image), 'rb').read()
@openerpweb.httprequest
def saveas(self, req, model, field, id=None, filename_field=None, **kw):
""" Download link for files stored as binary fields.
If the ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param req: OpenERP request
:type req: :class:`web.common.http.HttpRequest`
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename_field: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
Model = req.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if id:
res = Model.read([int(id)], fields, req.context)[0]
else:
res = Model.default_get(fields, req.context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
return req.not_found()
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return req.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename, req))])
@openerpweb.httprequest
def saveas_ajax(self, req, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
Model = req.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = { field: data }
elif id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return req.make_response(filecontent,
headers=[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename, req))],
cookies={'fileToken': int(token)})
@openerpweb.httprequest
def upload(self, req, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception, e:
args = [False, e.message]
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@openerpweb.httprequest
def upload_attachment(self, req, callback, model, id, ufile):
Model = req.session.model('ir.attachment')
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
attachment_id = Model.create({
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': model,
'res_id': int(id)
}, req.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
except xmlrpclib.Fault, e:
args = {'error':e.faultCode }
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@openerpweb.httprequest
def company_logo(self, req, dbname=None):
# TODO add etag, refactor to use /image code for etag
uid = None
if req.session._db:
dbname = req.session._db
uid = req.session._uid
elif dbname is None:
dbname = db_monodb(req)
if uid is None:
uid = openerp.SUPERUSER_ID
if not dbname:
image_data = self.placeholder(req, 'logo.png')
else:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
user = registry.get('res.users').browse(cr, uid, uid)
if user.company_id.logo_web:
image_data = user.company_id.logo_web.decode('base64')
else:
image_data = self.placeholder(req, 'nologo.png')
headers = [
('Content-Type', 'image/png'),
('Content-Length', len(image_data)),
]
return req.make_response(image_data, headers)
class Action(openerpweb.Controller):
_cp_path = "/web/action"
@openerpweb.jsonrequest
def load(self, req, action_id, do_not_eval=False):
Actions = req.session.model('ir.actions.actions')
value = False
try:
action_id = int(action_id)
except ValueError:
try:
module, xmlid = action_id.split('.', 1)
model, action_id = req.session.model('ir.model.data').get_object_reference(module, xmlid)
assert model.startswith('ir.actions.')
except Exception:
action_id = 0 # force failed read
base_action = Actions.read([action_id], ['type'], req.context)
if base_action:
ctx = {}
action_type = base_action[0]['type']
if action_type == 'ir.actions.report.xml':
ctx.update({'bin_size': True})
ctx.update(req.context)
action = req.session.model(action_type).read([action_id], False, ctx)
if action:
value = clean_action(req, action[0])
return value
@openerpweb.jsonrequest
def run(self, req, action_id):
return_action = req.session.model('ir.actions.server').run(
[action_id], req.context)
if return_action:
return clean_action(req, return_action)
else:
return False
class Export(View):
_cp_path = "/web/export"
@openerpweb.jsonrequest
def formats(self, req):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return sorted([
controller.fmt
for path, controller in openerpweb.controllers_path.iteritems()
if path.startswith(self._cp_path)
if hasattr(controller, 'fmt')
], key=operator.itemgetter("label"))
def fields_get(self, req, model):
Model = req.session.model(model)
fields = Model.fields_get(False, req.context)
return fields
@openerpweb.jsonrequest
def get_fields(self, req, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
exclude=None):
if import_compat and parent_field_type == "many2one":
fields = {}
else:
fields = self.fields_get(req, model)
if import_compat:
fields.pop('id', None)
else:
fields['.id'] = fields.pop('id', {'string': 'ID'})
fields_sequence = sorted(fields.iteritems(),
key=lambda field: field[1].get('string', ''))
records = []
for field_name, field in fields_sequence:
if import_compat:
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
id = prefix + (prefix and '/'or '') + field_name
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(name.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name}
if not import_compat or field['type'] == 'one2many':
# m2m field in import_compat is childless
record['children'] = True
return records
@openerpweb.jsonrequest
def namelist(self,req, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = req.session.model("ir.exports").read([export_id])[0]
export_fields_list = req.session.model("ir.exports.line").read(
export['export_fields'])
fields_data = self.fields_info(
req, model, map(operator.itemgetter('name'), export_fields_list))
return [
{'name': field['name'], 'label': fields_data[field['name']]}
for field in export_fields_list
]
def fields_info(self, req, model, export_fields):
info = {}
fields = self.fields_get(req, model)
if ".id" in export_fields:
fields['.id'] = fields.pop('id', {'string': 'ID'})
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
req, fields[base]['relation'], base, fields[base]['string'],
subfields
))
else:
info[base] = fields[base]['string']
return info
def graft_subfields(self, req, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(req, model, export_fields).iteritems())
#noinspection PyPropertyDefinition
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from OpenERP's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
@openerpweb.httprequest
def index(self, req, data, token):
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain',
'import_compat')(
simplejson.loads(data))
Model = req.session.model(model)
ids = ids or Model.search(domain, 0, False, False, req.context)
field_names = map(operator.itemgetter('name'), fields)
import_data = Model.export_data(ids, field_names, req.context).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return req.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition',
content_disposition(self.filename(model), req)),
('Content-Type', self.content_type)],
cookies={'fileToken': int(token)})
class CSVExport(Export):
_cp_path = '/web/export/csv'
fmt = {'tag': 'csv', 'label': 'CSV'}
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = StringIO()
writer = csv.writer(fp, quoting=csv.QUOTE_ALL)
writer.writerow([name.encode('utf-8') for name in fields])
for data in rows:
row = []
for d in data:
if isinstance(d, basestring):
d = d.replace('\n',' ').replace('\t',' ')
try:
d = d.encode('utf-8')
except UnicodeError:
pass
if d is False: d = None
row.append(d)
writer.writerow(row)
fp.seek(0)
data = fp.read()
fp.close()
return data
class ExcelExport(Export):
_cp_path = '/web/export/xls'
fmt = {
'tag': 'xls',
'label': 'Excel',
'error': None if xlwt else "XLWT required"
}
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
style = xlwt.easyxf('align: wrap yes')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
if cell_value is False: cell_value = None
worksheet.write(row_index + 1, cell_index, cell_value, style)
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Reports(View):
_cp_path = "/web/report"
POLLING_DELAY = 0.25
TYPES_MAPPING = {
'doc': 'application/vnd.ms-word',
'html': 'text/html',
'odt': 'application/vnd.oasis.opendocument.text',
'pdf': 'application/pdf',
'sxw': 'application/vnd.sun.xml.writer',
'xls': 'application/vnd.ms-excel',
}
@openerpweb.httprequest
def index(self, req, action, token):
action = simplejson.loads(action)
report_srv = req.session.proxy("report")
context = dict(req.context)
context.update(action["context"])
report_data = {}
report_ids = context["active_ids"]
if 'report_type' in action:
report_data['report_type'] = action['report_type']
if 'datas' in action:
if 'ids' in action['datas']:
report_ids = action['datas'].pop('ids')
report_data.update(action['datas'])
report_id = report_srv.report(
req.session._db, req.session._uid, req.session._password,
action["report_name"], report_ids,
report_data, context)
report_struct = None
while True:
report_struct = report_srv.report_get(
req.session._db, req.session._uid, req.session._password, report_id)
if report_struct["state"]:
break
time.sleep(self.POLLING_DELAY)
report = base64.b64decode(report_struct['result'])
if report_struct.get('code') == 'zlib':
report = zlib.decompress(report)
report_mimetype = self.TYPES_MAPPING.get(
report_struct['format'], 'octet-stream')
file_name = action.get('name', 'report')
if 'name' not in action:
reports = req.session.model('ir.actions.report.xml')
res_id = reports.search([('report_name', '=', action['report_name']),],
0, False, False, context)
if len(res_id) > 0:
file_name = reports.read(res_id[0], ['name'], context)['name']
else:
file_name = action['report_name']
file_name = '%s.%s' % (file_name, report_struct['format'])
return req.make_response(report,
headers=[
('Content-Disposition', content_disposition(file_name, req)),
('Content-Type', report_mimetype),
('Content-Length', len(report))],
cookies={'fileToken': int(token)})
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| mrshelly/openerp71313 | openerp/addons/web/controllers/main.py | Python | agpl-3.0 | 66,071 | [
"VisIt"
] | fc5362f8591584fb90b20b1696016d47b5d729f26433f070a5334a1f7ba20e7d |
#!/usr/bin/env python3
# Coptyright 2016 (c) Brian McKean
'''
File name: set_host_os.py
Author: Brian McKean
Date created: 10/16/2016
Python Version: 3.5
input: a list of ip addresses, string
input: a keyfile for accessing AWS
output: string modified to instance:OS pair
dependencies:
uses getHosts.py
'''
from gethost import getHost
import subprocess
import sys
import argparse
## parse input for string and keyfile
parser = argparse.ArgumentParser(description="probes servers at ip addresses to get linux distro")
parser.add_argument('-i',type=string, action='store', dest='keyfile', help="-i keyfile")
parser.add_argument("server_list", type=string,
help="list of ip addresses to probe")
## Take list of servers and try each type of login
# unbuntu is for ubuntu
# ec2-user RHEL5 & Fedora & SUSE
# root for RHEL5 & SUSE
#
# From AWS web site
# Use the ssh command to connect to the instance.
# You'll specify the private key (.pem) file and user_name@public_dns_name.
# For Amazon Linux, the user name is ec2-user.
# For RHEL5, the user name is either root or ec2-user.
# For Ubuntu, the user name is ubuntu.
# For Fedora, the user name is either fedora or ec2-user.
# For SUSE Linux, the user name is either root or ec2-user.
# Otherwise, if ec2-user and root don't work, check with your AMI provider.
print(keyfile, serverlist)
## For now need to specify user id for ssh
## Need to specify command for packacge install
## get list of AWS hosts for account
#instance_list = []
#hosts = getHost()
#hosts = hosts.splitlines()
#for line in hosts:
# line_elements = line.split()
# #print(line, line_elements)
# if '#' in line_elements[0]:
# continue
# instance_list.append(line_elements[0])
#
#
#
### from list find instance names matching string
##print(instance_list)
#cmd = package_mgr+"install sysstat"
#for instance in instance_list:
# ## from this list ssh into machines using IP address
# ## Install monitor software
# print("Installing sar on {}".format(instance))
# cmd_string = "ssh -t -i {} {}@{} {}".format(key_file, login, instance, cmd)
# cmd_string = ["ssh","-t","-o","StrictHostKeyChecking=no",
# "-i",key_file,"ec2user@{}".format(instance),
# "sudo","yum","install","sysstat"]
# #print(cmd_string)
# ssh = subprocess.Popen(["ssh","-t","-o","StrictHostKeyChecking=no",
# "-i",key_file,"ec2-user@{}".format(instance),
# "sudo","yum","install","sysstat"],
# shell=False,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# result = ssh.stdout.readlines()
# if result == []:
# error = ssh.stderr.readlines()
# print ("ERROR: %s" % error)
# else:
# print (result)
| co-bri/quevedo | v2_scripts/set_host_os.py | Python | mit | 2,899 | [
"Brian"
] | 90d1e200534f7d63c7406b88c53bcbe93e044840047fbb17f92ccdf2dec977f9 |
from castle.cms import registration
from castle.cms.utils import get_email_from_address
from castle.cms.utils import send_email
from castle.cms.utils import verify_recaptcha
from castle.cms.widgets import ReCaptchaFieldWidget
from plone.app.users.browser.register import RegistrationForm as BaseRegistrationForm
from plone.app.users.schema import checkEmailAddress
from plone.autoform.form import AutoExtensibleForm
from plone.registry.interfaces import IRegistry
from plone.z3cform.fieldsets import utils as z3cform_utils
from plone.z3cform.fieldsets.utils import move
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone import PloneMessageFactory as _
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.statusmessages.interfaces import IStatusMessage
from z3c.form import button
from z3c.form import field
from z3c.form import form
from z3c.form import interfaces
from z3c.form.action import ActionErrorOccurred
from z3c.form.interfaces import WidgetActionExecutionError
from zope import schema
from zope.component import getUtility
from zope.component import queryUtility
from zope.event import notify
from zope.interface import Interface
from zope.interface import Invalid
class IEmailConfirmation(Interface):
email = schema.ASCIILine(
title=_(u'label_email', default=u'E-mail'),
description=u'',
required=True,
constraint=checkEmailAddress)
captcha = schema.TextLine(
title=u"Captcha",
required=False)
class EmailConfirmation(AutoExtensibleForm, form.Form):
label = u"Confirm your email address"
description = (u"Before you can begin the registration process, you need to "
u"verify your email address.")
formErrorsMessage = _('There were errors.')
ignoreContext = True
schema = IEmailConfirmation
enableCSRFProtection = True
template = ViewPageTemplateFile('templates/confirm-email.pt')
sent = False
def __init__(self, context, request):
super(EmailConfirmation, self).__init__(context, request)
registry = queryUtility(IRegistry)
self.has_captcha = registry.get('castle.recaptcha_private_key') is not None
portal_membership = getToolByName(self.context, 'portal_membership')
self.isAnon = portal_membership.isAnonymousUser()
def send_mail(self, email, item):
url = '%s/@@register?confirmed_email=%s&confirmed_code=%s' % (
self.context.absolute_url(), email, item['code'])
text = """
Copy and paste this url into your web browser to confirm your address: %s
""" % url
html = """
<p>You have requested registration, please
<a href="%s">confirm your email address by clicking on this link</a>.
</p>
<p>
If that does not work, copy and paste this urls into your web browser: %s
</p>""" % (url, url)
send_email(
[email], "Email Confirmation",
html=html, text=text)
def updateFields(self):
super(EmailConfirmation, self).updateFields()
if self.has_captcha and self.isAnon:
self.fields['captcha'].widgetFactory = ReCaptchaFieldWidget
else:
self.fields['captcha'].mode = interfaces.HIDDEN_MODE
move(self, 'email', before='*')
@button.buttonAndHandler(
_(u'label_verify', default=u'Verify'), name='verify'
)
def action_verify(self, action):
data, errors = self.extractData()
registry = queryUtility(IRegistry)
has_captcha = registry.get('castle.recaptcha_private_key') is not None
if has_captcha:
if not verify_recaptcha(self.request):
notify(
ActionErrorOccurred(
action,
WidgetActionExecutionError('captcha', Invalid('Invalid Recaptcha'))))
return
if not errors:
storage = registration.RegistrationStorage(self.context)
item = storage.add(data['email'])
self.send_mail(data['email'], item)
self.sent = True
IStatusMessage(self.request).addStatusMessage(
'Verification email has been sent to your email.', type='info')
class IHiddenVerifiedEmail(Interface):
confirmed_email = schema.TextLine()
confirmed_code = schema.TextLine()
class RegistrationForm(BaseRegistrationForm):
def get_confirmed_email(self):
req = self.request
return req.form.get('confirmed_email', req.form.get('form.widgets.confirmed_email', ''))
def get_confirmed_code(self):
req = self.request
return req.form.get(
'confirmed_code', req.form.get('form.widgets.confirmed_code', ''))
def verify(self):
email = self.get_confirmed_email()
code = self.get_confirmed_code()
if not email or not code:
return False
storage = registration.RegistrationStorage(self.context)
entry = storage.get(email)
if entry is None:
return False
if entry['code'] == code:
return True
return False
def updateWidgets(self):
if self.showForm:
super(RegistrationForm, self).updateWidgets()
else:
form.Form.updateWidgets(self)
self.widgets['confirmed_email'].value = self.get_confirmed_email()
self.widgets['confirmed_code'].value = self.get_confirmed_code()
def validate_registration(self, action, data):
super(RegistrationForm, self).validate_registration(action, data)
if 'email' in data and data['email'].lower() != self.get_confirmed_email().lower():
err_str = u'Email address you have entered does not match email used in verification'
notify(
ActionErrorOccurred(
action, WidgetActionExecutionError('email', Invalid(err_str))
)
)
del data['confirmed_email']
del data['confirmed_code']
def handle_join_success(self, data):
email = self.get_confirmed_email()
storage = registration.RegistrationStorage(self.context)
storage.remove(email)
registry = getUtility(IRegistry)
try:
review = registry['plone.review_registrations']
except KeyError:
review = False
pass
if review:
storage = registration.RegistrationReviewStorage(self.context)
storage.add(email, data)
self.send_email_to_admin_to_review(email)
self.request.response.redirect('%s/@@under-review?email=%s' % (
self.context.absolute_url(), email))
else:
return super(RegistrationForm, self).handle_join_success(data)
def send_email_to_admin_to_review(self, email):
url = '%s/@@review-registration-requests' % (
self.context.absolute_url())
text = """
Hi,
A new user with the email %(email)s has signed up.
You can review the request at %(url)s
""" % {
'url': url,
'email': email
}
html = """
<p>Hi,</p>
A new user with the email %(email)s has signed up.
Please <a href="%s">review the request</a>
</p>""" % {
'url': url,
'email': email
}
send_email(
[get_email_from_address()], "User registration needs review",
html=html, text=text)
def updateFields(self):
super(RegistrationForm, self).updateFields()
if self.showForm:
z3cform_utils.add(self, IHiddenVerifiedEmail, prefix="")
else:
self.fields = field.Fields(IHiddenVerifiedEmail, prefix="")
self.fields['confirmed_email'].mode = interfaces.HIDDEN_MODE
self.fields['confirmed_code'].mode = interfaces.HIDDEN_MODE
def __call__(self):
if not self.verify():
return self.request.response.redirect('%s/@@register-confirm-email' % (
self.context.absolute_url()))
return super(RegistrationForm, self).__call__()
class ReviewRequests(BrowserView):
def enabled(self):
registry = getUtility(IRegistry)
try:
return registry['plone.review_registrations']
except KeyError:
return False
def send_approve_mail(self, email, data):
data = data.copy()
text = """
Hello %(fullname)s,
The user with username "%(username)s" has been approved.
You can visit the site at: %(url)s
""" % data
html = """
<p>Hello %(fullname)s,</p>
<p>The user with username "%(username)s" has been approved.</p>
<p>You can visit the site at: <a href="%(url)s">%(url)s</a>
</p>""" % data
send_email(
[email], "User approved",
html=html, text=text)
def __call__(self):
storage = registration.RegistrationReviewStorage(self.context)
if self.request.REQUEST_METHOD == 'POST':
email = self.request.form.get('email')
if self.request.form.get('approve'):
data = storage.get(email).copy()
data.pop('code')
data.pop('created')
reg_form = BaseRegistrationForm(self.context, self.request)
reg_form.updateFields()
reg_form.updateWidgets()
reg_form.handle_join_success(data)
if data.get('password'):
# won't get an email so sent them out something about getting approved
self.send_approve_mail(email, data)
storage.remove(email)
elif self.request.form.get('deny'):
storage.remove(email)
elif self.request.form.get('enable'):
getUtility(IRegistry)['plone.review_registrations'] = True
elif self.request.form.get('disable'):
getUtility(IRegistry)['plone.review_registrations'] = False
self.storage = storage
self.data = storage._data
return self.index()
class UnderReview(BrowserView):
def __call__(self):
storage = registration.RegistrationReviewStorage(self.context)
self.data = storage.get(self.request.form.get('email'))
return self.index()
| castlecms/castle.cms | castle/cms/browser/registration.py | Python | gpl-2.0 | 10,263 | [
"VisIt"
] | 5daff79f5307d397128448059e8f42ef9f167452f7a3cd82af025ed941ecfc05 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Python 3.6.8
##########################################################################################
# Armoured Commander #
# The World War II Tank Commander Roguelike #
##########################################################################################
##########################################################################################
#
# Copyright 2015-2017 Gregory Adam Scott (sudasana@gmail.com)
#
# This file is part of Armoured Commander.
#
# Armoured Commander is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Armoured Commander is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Armoured Commander, in the form of a file named "LICENSE".
# If not, see <http://www.gnu.org/licenses/>.
#
# xp_loader.py is covered under a MIT License (MIT) and is Copyright (c) 2015 Sean Hagar
# see XpLoader_LICENSE.txt for more info.
#
##########################################################################################
import sys, os # for command line functions, for SDL window instruction, other OS-related stuff
if getattr(sys, 'frozen', False): # needed for pyinstaller
os.chdir(sys._MEIPASS)
os.environ['PYSDL2_DLL_PATH'] = os.getcwd() # set sdl2 dll path
##### Libraries #####
from datetime import datetime # for recording date and time in campaign journal
from math import atan2, degrees # more "
from math import pi, floor, ceil, sqrt # math functions
from operator import attrgetter # for list sorting
from textwrap import wrap # for breaking up game messages
import csv # for loading campaign info
import libtcodpy as libtcod # The Doryen Library
import random # for randomly selecting items from a list
import shelve # for saving and loading games
import time # for wait function
import xml.etree.ElementTree as xml # ElementTree library for xml
import xp_loader # for loading image files
import gzip # for loading image files
import zipfile, io # for loading from zip archive
MIXER_ACTIVE = True
try:
import sdl2.sdlmixer as mixer # sound effects
except:
MIXER_ACTIVE = False
from steamworks import STEAMWORKS # main steamworks library
from armcom_defs import * # general definitions
from armcom_vehicle_defs import * # vehicle stat definitions
##### Constants #####
DEBUG = False # enable in-game debug commands
NAME = 'Armoured Commander'
VERSION = '1.0' # determines saved game compatability
SUBVERSION = '9' # descriptive only, no effect on compatability
WEBSITE = 'www.armouredcommander.com'
GITHUB = 'github.com/sudasana/armcom'
COMPATIBLE_VERSIONS = ['Beta 3.0'] # list of older versions for which the savegame
# is compatible with this version
DATAPATH = 'data' + os.sep # path to data files
PI = pi
SCREEN_WIDTH = 149 # width of game window in characters
SCREEN_HEIGHT = 61 # height "
SCREEN_XM = int(SCREEN_WIDTH/2) # horizontal center "
SCREEN_YM = int(SCREEN_HEIGHT/2) # vertical "
TANK_CON_WIDTH = 73 # width of tank info console in characters
TANK_CON_HEIGHT = 37 # height "
MSG_CON_WIDTH = TANK_CON_WIDTH # width of message console in characters
MSG_CON_HEIGHT = 19 # height "
MAP_CON_WIDTH = 73 # width of encounter map console in characters
MAP_CON_HEIGHT = 51 # height "
MAP_CON_X = MSG_CON_WIDTH + 2 # x position of encounter map console
MAP_CON_Y = 2 # y "
MAP_X0 = int(MAP_CON_WIDTH/2) # centre of encounter map console
MAP_Y0 = int(MAP_CON_HEIGHT/2)
MAP_INFO_CON_WIDTH = MAP_CON_WIDTH # width of map info console in characters
MAP_INFO_CON_HEIGHT = 7 # height "
DATE_CON_WIDTH = TANK_CON_WIDTH # date, scenario type, etc. console
DATE_CON_HEIGHT = 1
MENU_CON_WIDTH = 139 # width of in-game menu console
MENU_CON_HEIGHT = 42 # height "
MENU_CON_XM = int(MENU_CON_WIDTH/2) # horizontal center of "
MENU_CON_YM = int(MENU_CON_HEIGHT/2) # vertical center of "
MENU_CON_X = SCREEN_XM - MENU_CON_XM # x and y location to draw
MENU_CON_Y = int(SCREEN_HEIGHT/2) - int(MENU_CON_HEIGHT/2) # menu console on screen
# text console, displays 84 x 50 characters
TEXT_CON_WIDTH = 86 # width of text display console (campaign journal, messages, etc.)
TEXT_CON_HEIGHT = 57 # height "
TEXT_CON_XM = int(TEXT_CON_WIDTH/2) # horizontal center "
TEXT_CON_X = SCREEN_XM - TEXT_CON_XM # x/y location to draw window
TEXT_CON_Y = 2
C_MAP_CON_WIDTH = 90 # width of campaign map console
C_MAP_CON_HEIGHT = 90 # height "
C_MAP_CON_WINDOW_W = 90 # width of how much of the campaign map is displayed on screen
C_MAP_CON_WINDOW_H = 57 # height "
C_MAP_CON_X = SCREEN_WIDTH - C_MAP_CON_WINDOW_W - 1 # x position of campaign map console
C_ACTION_CON_W = SCREEN_WIDTH - C_MAP_CON_WINDOW_W - 3 # width of campaign action console
C_ACTION_CON_H = 30 # height "
C_INFO_CON_W = SCREEN_WIDTH - C_MAP_CON_WINDOW_W - 3 # width of campaign info console
C_INFO_CON_H = SCREEN_HEIGHT - C_ACTION_CON_H - 4 # height "
C_INFO_CON_X = int(C_INFO_CON_W/2)
MAX_HS = 40 # maximum number of highscore entries to save
NAME_MAX_LEN = 17 # maximum length of crew names in characters
NICKNAME_MAX_LEN = 15 # maximum length of crew nicknames in characters
LIMIT_FPS = 50 # maximum screen refreshes per second
# Game defintions
EXTRA_AMMO = 30 # player tank can carry up to this many extra main gun shells
# Difficulty level
# Veteran=1(normal mode), Regular=2, Recruit=3
DIFFICULTY = 1
# Adjust leveling for difficulty level
BASE_EXP_REQ = int(30 / DIFFICULTY)
LVL_INFLATION = 10 / DIFFICULTY
# Adjust skill efficiency for difficulty level
for skill in SKILLS:
for k, v in enumerate(skill.levels):
skill.levels[k] *= DIFFICULTY
if skill.levels[k] > 100:
skill.levels[k] = 100
num_100 = len([x for x in skill.levels if x == 100])
if num_100 > 1:
skill.levels = skill.levels[:-(num_100 - 1)]
STONE_ROAD_MOVE_TIME = 30 # minutes required to move into a new area via an improved road
DIRT_ROAD_MOVE_TIME = 45 # " dirt road
NO_ROAD_MOVE_TIME = 60 # " no road
GROUND_MOVE_TIME_MODIFIER = 15 # additional time required if ground is muddy / rain / snow
# Colour Defintions
KEY_COLOR = libtcod.Color(255, 0, 255) # key color for transparency
# campaign map base colours
MAP_B_COLOR = libtcod.Color(100, 120, 100) # fields
MAP_D_COLOR = libtcod.Color(70, 90, 70) # woods
OPEN_GROUND_COLOR = libtcod.Color(100, 140, 100)
MUD_COLOR = libtcod.Color(80, 50, 30)
HEX_EDGE_COLOR = libtcod.Color(60, 100, 60)
ROAD_COLOR = libtcod.Color(160, 140, 100)
DIRT_COLOR = libtcod.Color(80, 50, 30)
CLEAR_SKY_COLOR = libtcod.Color(16, 180, 240)
OVERCAST_COLOR = libtcod.Color(150, 150, 150)
STONE_ROAD_COLOR = libtcod.darker_grey
FRONTLINE_COLOR = libtcod.red # highlight for hostile map areas
PLAYER_COLOR = libtcod.Color(10, 64, 10)
ENEMY_COLOR = libtcod.Color(80, 80, 80)
ROW_COLOR = libtcod.Color(30, 30, 30) # to highlight a line in a console
ROW_COLOR2 = libtcod.Color(20, 20, 20) # to highlight a line in a console
SELECTED_COLOR = libtcod.blue # selected option background
HIGHLIGHT_COLOR = libtcod.light_blue # to highlight important text
GREYED_COLOR = libtcod.Color(60, 60, 60) # greyed-out option
SKILL_ACTIVATE_COLOR = libtcod.Color(0, 255, 255) # skill activated message
MENU_TITLE_COLOR = libtcod.lighter_blue # title of menu console
KEY_HIGHLIGHT_COLOR = libtcod.Color(0, 200, 255) # highlight for key commands
HIGHLIGHT = (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP) # constant for highlight pair
TITLE_GROUND_COLOR = libtcod.Color(26, 79, 5) # color of ground in main menu
SOUNDS = {} # sound effects
##########################################################################################
# Classes #
##########################################################################################
# Bones Class
# records high scores and other info between play sessions
class Bones:
def __init__(self):
self.score_list = []
self.graveyard = []
# tribute to David Bowie
self.graveyard.append(['Major', 'Jack Celliers', 'Brixton', 'January 10', ''])
# flags for having displayed help text windows
self.tutorial_message_flags = {}
for key in TUTORIAL_TEXT:
self.tutorial_message_flags[key] = False
# Saved Game Info Class
# holds basic information about a saved game, only read by main menu and only written to
# by SaveGame, doesn't impact gameplay otherwise
class SavedGameInfo:
def __init__(self, game_version, campaign_name, commander_name, tank_name, current_date):
self.game_version = game_version
self.campaign_name = campaign_name
self.commander_name = commander_name
self.tank_name = tank_name
self.current_date = current_date
# Campaign Day Map Class
# holds information about the campaign map used for in an action day
class CampaignDayMap:
def __init__(self):
self.seed = 0 # seed used for map painting, set during map
# generation
self.nodes = [] # list of map nodes
self.blocked_nodes = set() # set of impassible map nodes
self.char_locations = dict() # dictionary for character location parent nodes
self.player_node = None # pointer to player location
# Map Node Class
# holds information about a single location on the campaign map
class MapNode:
def __init__(self, x, y):
self.x = x # x coordinate of the area centre
self.y = y # y "
self.edges = set() # set of edge locations w/in the area
self.links = [] # list of adjacent nodes
self.node_type = '' # node terrain type
self.village_radius = 0 # radius of village buildings if village node
self.dirt_road_links = [] # list of nodes linked to this one by a dirt road
self.stone_road_links = [] # " an improved road
self.road_end = False # any roads should be extended to the edge
# of the map
self.extended = False # flag to note that this node has had its
# road extended to edge of map
self.top_edge = False # this area is on the top edge of the map
self.bottom_edge = False # " bottom "
self.left_edge = False # " left "
self.right_edge = False # " right "
self.start = False # start node
self.exit = False # exit node
self.resistance = None # area resistance level
self.res_known = False # resistance level is known to the player
self.friendly_control = False # area is controlled by player forces
self.arty_strike = False # friendly artillery has hit this area
self.air_strike = False # friendly air forces have hit this area
self.advancing_fire = False # player used advancing fire moving into this area
# Pathfinding stuff
self.parent = None
self.g = 0
self.h = 0
self.f = 0
# quest stuff
self.quest_type = None # type of active quest for this node
self.quest_time_limit = None # time limit to complete quest
self.quest_vp_bonus = None # VP bonus awarded for completing quest
# reset pathfinding info for this node
def ClearPathInfo(self):
self.parent = None
self.g = 0
self.h = 0
self.f = 0
# Skill Record Class
# holds information about a crewman's skill and its activation level
class SkillRecord:
def __init__(self, name, level):
self.name = name
self.level = level
# Hit Class
# holds information about a hit on an enemy unit with the player's main gun
class MainGunHit:
def __init__(self, gun_calibre, ammo_type, critical, area_fire):
self.gun_calibre = gun_calibre
self.ammo_type = ammo_type
self.critical = critical
self.area_fire = area_fire
# Weather Class
# holds information about weather conditions
class Weather:
def __init__(self):
self.clouds = 'Clear'
self.fog = False
self.precip = 'None'
self.ground = 'Dry'
# record of precip accumilation
self.rain_time = 0
self.snow_time = 0
self.dry_time = 0
# generate a totally new set of weather conditions upon moving to a new area
def GenerateNew(self):
self.clouds = 'Clear'
self.fog = False
self.precip = 'None'
self.ground = 'Dry'
self.rain_time = 0
self.snow_time = 0
self.dry_time = 0
# cloud cover
d1, d2, roll = Roll2D6()
month = campaign.current_date[1]
if 3 <= month <= 11:
roll -= 1
if 5 <= month <= 8:
roll -= 1
if roll > 6:
self.clouds = 'Overcast'
# precipitation and/or fog
if self.clouds == 'Overcast':
d1, d2, roll = Roll2D6()
if roll <= 4:
if month <= 2 or month == 12:
self.precip = 'Snow'
elif 5 <= month <= 9:
self.precip = 'Rain'
else:
# small chance of snow in march/april, oct/nov
d1, d2, roll = Roll2D6()
if roll >= 11:
self.precip = 'Snow'
else:
self.precip = 'Rain'
# fog
d1, d2, roll = Roll2D6()
if self.precip != 'None':
roll -= 2
if roll >= 10:
self.fog = True
# ground cover
d1, d2, roll = Roll2D6()
if self.precip == 'Snow':
if roll >= 11:
self.ground = 'Deep Snow'
elif roll >= 5:
self.ground = 'Snow'
elif self.precip == 'Rain':
if roll >= 8:
self.ground = 'Mud'
else:
# deep winter
if month <= 2 or month == 12:
if roll == 12:
self.ground = 'Deep Snow'
elif roll >= 8:
self.ground = 'Snow'
# warmer months
elif 5 <= month <= 9:
if roll >= 10:
self.ground = 'Mud'
# spring/autumn
else:
if roll == 12:
self.ground = 'Snow'
elif roll >= 8:
self.ground = 'Mud'
# check to see if weather changes, and apply effects if so
def CheckChange(self):
d1, d2, roll = Roll2D6()
month = campaign.current_date[1]
# check to see if precip stops; if so, this will be only change
# this cycle
if self.precip != 'None':
if roll <= 3:
if self.precip == 'Rain':
PopUp('The rain stops.')
else:
PopUp('The snow stops falling.')
self.precip = 'None'
return
# otherwise, if overcast, see if precip starts
elif self.clouds == 'Overcast':
if roll <= 3:
if month <= 2 or month == 12:
self.precip = 'Snow'
PopUp('Snow starts falling')
elif 5 <= month <= 9:
self.precip = 'Rain'
PopUp('Rain starts falling')
else:
# small chance of snow in march/april, oct/nov
d1, d2, roll = Roll2D6()
if roll >= 11:
self.precip = 'Snow'
PopUp('Snow starts falling')
else:
self.precip = 'Rain'
PopUp('Rain starts falling')
return
# if no precip change, check to see if cloud cover / fog changes
d1, d2, roll = Roll2D6()
if self.clouds == 'Clear':
if roll <= 3:
self.clouds = 'Overcast'
PopUp('Clouds roll in and the weather turns overcast')
return
else:
if roll <= 5:
# if foggy, fog lifts instead
if self.fog:
self.fog = False
PopUp('The fog lifts.')
return
# otherwise, the sky clears, stopping any precip
self.clouds = 'Clear'
self.precip = 'None'
PopUp('The sky clears.')
return
# chance of fog rolling in
d1, d2, roll = Roll2D6()
if roll <= 3 and not self.fog:
self.fog = True
PopUp('Fog rolls in.')
# check for a change in ground cover based on accumilated precip
# or lack thereof
def CheckGround(self, minutes_passed):
change = False
if self.precip == 'Rain':
if self.ground != 'Mud':
self.rain_time += minutes_passed
if self.rain_time >= 120:
PopUp('The rain has turned to ground to mud.')
self.ground = 'Mud'
self.dry_time = 0
change = True
elif self.precip == 'Snow':
if self.ground != 'Deep Snow':
self.snow_time += minutes_passed
if self.snow_time >= 120:
if self.ground == 'Snow':
PopUp('The snow on the ground has become deep.')
self.ground = 'Deep Snow'
change = True
elif self.ground in ['Dry', 'Mud']:
PopUp('The ground is covered in snow.')
self.ground = 'Snow'
change = True
else:
if self.ground == 'Mud':
self.dry_time += minutes_passed
if self.dry_time >= 120:
PopUp('The muddy ground dries out.')
self.ground = 'Dry'
self.rain_time = 0
change = True
# if there was a change, update consoles
if change:
if battle is not None:
UpdateMapOverlay()
else:
campaign.BuildActionList()
UpdateCActionCon()
UpdateCInfoCon(mouse.cx, mouse.cy)
# Hex Class
# holds information on a hex location in the battle encounter map
class MapHex:
def __init__(self, hx, hy, rng, sector):
self.hx = hx
self.hy = hy
self.rng = rng
self.sector = sector
self.smoke_factors = 0 # current number of smoke factors
self.x, self.y = Hex2Screen(hx, hy) # set x and y location to draw on screen
# smoke factor class
# holds information about a smoke factor on the battle encounter map
class SmokeFactor:
def __init__(self, hx, hy, num_factors):
self.hx = hx
self.hy = hy
self.num_factors = num_factors
# change position as a result of tank rotating
def RotatePosition(self, clockwise):
# convert present coordinate from axial to cube
x = self.hx
z = self.hy
y = -x-z
# do the rotation
if clockwise:
new_x = -y
new_z = -x
else:
new_x = -z
new_z = -y
# set the new hex location
self.hx = new_x
self.hy = new_z
# change position based on player tank moving forward or backward
def YMove(self, y_change):
# two special cases, if unit would end up in player hex
if self.hx == 0 and self.hy + y_change == 0:
if y_change == -1:
y_change = -2
else:
y_change = 2
self.hy = self.hy + y_change
# Campaign Class
# holds information on an ongoing campaign
class Campaign:
def __init__(self):
# Info set by the campaign xml file: defines the parameters of the campaign
# selected by the player
self.campaign_name = '' # name of the campaign (eg. Patton's Best)
self.campaign_file = '' # filename of the campaign xml file
self.player_nation = '' # three-letter code for player's nation
self.enemy_nation = '' # " for enemy nation
self.map_file = '' # XP file of campaign map
self.player_veh_list = [] # list of permitted player vehicle types
self.mission_activations = [] # list of activation chance dictionaries
# for advance, battle, counterattack
# missions
self.activation_modifiers = [] # list of activation modifiers
self.class_activations = [] # list of unit type and activation chance
# tuples for each unit class
# first item is always unit class name
self.ranks = None # list of ranks for current nation
self.decorations = None # list of decorations "
self.days = [] # list of calendar days: each one is
# a dictionary with keys and values
self.over = False # flag set when campaign has finished
(self.fs_res_x, self.fs_res_y) = FS_RES_LIST[0] # full screen resolution
self.fullscreen = False # full screen preference
self.exiting = False # flag for exiting out to main menu
self.mouseover = (-1, -1) # keeps track of mouse position
self.color_scheme = None # campaign map colour scheme, set at
# map generation
# campaign options
self.unlimited_tank_selection = False # freedom to select any available tank model
self.casual_commander = False # can replace commander and continue playing
self.difficulty = DIFFICULTY # campaign difficulty level
self.start_date = 0 # index of date in the calendar to start campaign
# game settings
self.animations = True # in-game animations
self.sounds = True # in-game sound effects
self.pause_labels = True # wait for enter after displaying a label
self.tutorial_message = True # display tutorial message windows
self.current_date = [0,0,0] # current year, month, date
self.day_vp = 0 # vp gained this campaign day
self.vp = 0 # current total player victory points
self.action_day = False # flag if player sees action today
self.saw_action = False # flag if player saw action already today
self.day_in_progress = False # flag if player is in the campaign map interface
self.scen_res = '' # string description of expected resistance for this day
self.scen_type = '' # " mission type
self.tank_on_offer = '' # new sherman model available during refitting
self.selected_crew = None # selected crew member
self.weather = Weather() # set up a new weather object
self.ClearAmmo() # clear rare ammo supplies\
self.gyro_skill_avail = False # gyrostabilier skill is available
self.stats = {} # campaign statistics, for display at end of campaign
self.campaign_journal = [] # list of text descriptions of campaign
self.record_day_vp = 0 # highest one-day VP score this month
def ResetForNewDay(self):
# Following are reset for each new campaign day where player sees action
self.day_map = None # day map, will be generated later
self.weather.GenerateNew() # reset weather for a new day
self.hour = 0 # current hour in 24-hour format
self.minute = 0 # current minute
self.c_map_y = 0 # offset for displaying campaign map on screen
self.selected_crew = None # pointer to currently selected crewmember
self.resupply = False # currently in resupply mode
self.input_mode = 'None' # current input mode in campaign
self.selected_node = None # selected node on the day map
self.adjacent_nodes = [] # list of adjacent nodes for moving, checking
self.free_check = False # player gets a free check adjacent area action
self.messages = [] # list of campaign messages
self.sunset = False # flag that the combat day is over
self.exiting = False # flag to exit to main menu
self.arty_chance = 9 # chance of calling in artillery
self.air_chance = 7 # chance of calling in air strike
self.time_of_last_event = (0,0) # hour, minute of last triggered event;
# 0,0 if no event has occured today
self.quest_active = False # campaign quest currently active
self.action_list = []
self.BuildActionList()
# check for enemy advances during a counterattack mission day
def DoEnemyAdvance(self):
# build list of candidate nodes
nodes = []
for node in self.day_map.nodes:
if node == campaign.day_map.player_node: continue
if not node.friendly_control: continue
if node in self.day_map.blocked_nodes: continue
if node.top_edge:
nodes.append(node)
continue
# check if adjacent to an enemy-held node
for link_node in node.links:
if not link_node.friendly_control:
nodes.append(node)
break
# if no candidate nodes, return
if len(nodes) == 0:
return
# run through candidate nodes and see if they get taken over
for node in nodes:
chance = 0
for link_node in node.links:
if not link_node.friendly_control:
if link_node.resistance == 'Light' and chance < 3:
chance = 3
elif link_node.resistance == 'Medium' and chance < 5:
chance = 5
elif link_node.resistance == 'Heavy' and chance < 7:
chance = 7
# at beginning of day, top-edge nodes won't have any enemy-held
# nodes adjacent, so we need to generate random chances for
# these ones
if chance == 0:
chance = random.choice([3, 5, 7])
# do advance roll
d1, d2, roll = Roll2D6()
# control is lost
if roll <= chance:
node.friendly_control = False
node.res_known = True
campaign.MoveViewTo(node)
UpdateCOverlay(highlight_node=node)
RenderCampaign()
Wait(500)
PopUp('A map area has been captured by an enemy advance.')
UpdateCOverlay()
RenderCampaign()
# if no possible path to 'exit' node, player is moved to nearest
# friendly node, spends 1-20 HE shells
if not campaign.day_map.player_node.exit:
for node in campaign.day_map.nodes:
if node.exit:
if len(GetPath(campaign.day_map.player_node, node, enemy_blocks=True)) > 0:
return
break
# HE shells expended during move
ammo_expended = Roll1D10() * 2
if tank.general_ammo['HE'] < ammo_expended:
ammo_expended = tank.general_ammo['HE']
tank.general_ammo['HE'] -= ammo_expended
# time required for move
time_req = 60
if campaign.weather.ground != 'Dry' or campaign.weather.precip != 'None' or campaign.weather.fog:
time_req += 15
campaign.SpendTime(0, time_req)
PopUp('You have been cut off from your allies and must reposition. You travel' +
' off-road to the nearest friendly map area, expending ' + str(ammo_expended) +
' HE shells to cover your withdrawl. This takes ' + str(time_req) +
' minutes.')
# player node captured by enemy
campaign.day_map.player_node.friendly_control = False
campaign.day_map.player_node.res_known = True
# find the target node
closest = None
closest_dist = 9000
for node in campaign.day_map.nodes:
if node.friendly_control:
dist = GetDistance(campaign.day_map.player_node.x,
campaign.day_map.player_node.y, node.x,
node.y)
if dist < closest_dist:
closest = node
closest_dist = dist
if closest is None:
print ('ERROR: Could not find a friendly node to move to')
return
# do the move
campaign.day_map.player_node = closest
campaign.MoveViewTo(closest)
UpdateCOverlay()
RenderCampaign()
Wait(500)
# check for sunset
campaign.CheckSunset()
# check to see if a random campaign event is triggered
def RandomCampaignEvent(self):
# highlight and move player view to event node
def ShowNode(node):
campaign.MoveViewTo(node)
UpdateCOverlay(highlight_node=node)
RenderCampaign()
Wait(1000)
# if current day mission is Counterattack, don't trigger any campaign events
if campaign.scen_type == 'Counterattack': return
# if sunset has already happened
if campaign.sunset: return
roll = Roll1D100()
# if no event yet today, set current time as 'time of last event' and return
if self.time_of_last_event == (0,0):
self.time_of_last_event = (self.hour, self.minute)
return
else:
h1, m1 = self.time_of_last_event
h, m = GetTimeUntil(h1, m1, self.hour, self.minute)
if h == 0:
# No event
return
elif h == 1:
if m <= 15:
roll -= 30
elif m <= 30:
roll -= 25
elif m <= 45:
roll -= 10
# No event
if roll <= 50:
return
# Ammo supply Discovered
elif roll <= 55:
WriteJournal('Friendly supply truck discovered.')
if PopUp('You have encountered a friendly supply truck. Restock ' +
'your ammunition? (15 mins.)', confirm=True):
self.SpendTime(0, 15)
tank.smoke_grenades = 6
tank.smoke_bombs = 15
self.resupply = True
MainGunAmmoMenu()
self.resupply = False
RenderCampaign()
self.time_of_last_event = (self.hour, self.minute)
return
# Quest Triggered
if roll <= 75:
# don't trigger another one if there's currently one in progress
if self.quest_active:
return
# determine quest type
d1, d2, roll = Roll2D6()
if roll <= 3:
# like CAPTURE but with a time limit
quest_type = 'RESCUE'
vp_bonus = 15
elif roll <= 7:
# enter an enemy-held area, automatic battle encounter
quest_type = 'CAPTURE'
vp_bonus = 10
elif roll <= 10:
# check an enemy-held area for resistance level
quest_type = 'RECON'
vp_bonus = 5
else:
# enter a friendly-held area, wait for attack
quest_type = 'DEFEND'
vp_bonus = 15
# find quest map node
player_y = campaign.day_map.player_node.y
nodes = []
for node in campaign.day_map.nodes:
if node in campaign.day_map.blocked_nodes: continue
# skip marshland nodes; should have been done by
# previous line but still seems to be getting through
if node.node_type == 'E': continue
if node == campaign.day_map.player_node: continue
if node.y > player_y: continue
if quest_type != 'DEFEND' and node.friendly_control: continue
if quest_type == 'DEFEND' and not node.friendly_control: continue
if quest_type == 'RECON' and node.res_known: continue
# node must be close to player
if len(GetPath(campaign.day_map.player_node, node)) > 3: continue
nodes.append(node)
if len(nodes) == 0:
return
WriteJournal(quest_type + ' quest triggered')
# set quest active flag
self.quest_active = True
# add campaign stat
campaign.AddStat('Quests Assigned', 1)
# select quest node
node = random.choice(nodes)
# set quest node settings
node.quest_type = quest_type
node.quest_vp_bonus = vp_bonus
if quest_type == 'RESCUE':
# determine time limit for quest
h = campaign.hour + libtcod.random_get_int(0, 2, 4)
m = campaign.minute
node.quest_time_limit = (h, m)
text = ('Commander, you are requested to head to the highlighted ' +
'map location. Allied units are pinned down in the area ' +
'and require your help. If completed at or before ' +
str(h) + ':' + str(m).zfill(2) + ', you will receive ' +
'a bonus of ' + str(vp_bonus) + ' VP.')
elif quest_type == 'RECON':
text = ('Commander, you are requested to head to the highlighted ' +
'map location and check it for estimated enemy resistance. ' +
'If completed, you will receive a bonus of ' +
str(vp_bonus) + ' VP.')
elif quest_type == 'CAPTURE':
text = ('Commander, you are requested to head to the highlighted ' +
'map location and capture it from enemy forces. ' +
'If completed, you will receive a bonus of ' +
str(vp_bonus) + ' VP.')
elif quest_type == 'DEFEND':
text = ('Commander, you are requested to head to the highlighted ' +
'map location and defend it from an anticipated enemy ' +
'counterattack. If completed, you will receive a bonus of ' +
str(vp_bonus) + ' VP.')
ShowNode(node)
PopUp(text)
# Exit Area Changed
elif roll <= 80:
# don't move if player within 2 nodes of current exit
for node in campaign.day_map.nodes:
if node.exit:
if len(GetPath(campaign.day_map.player_node, node)) <= 4:
return
break
# build list of top edge nodes that are reachable from current player
# location, note the pre-existing exit node but don't include it in the list
nodes = []
old_exit = None
for node in campaign.day_map.nodes:
if node.top_edge:
if node.friendly_control: continue
if node.exit:
old_exit = node
elif GetPath(campaign.day_map.player_node, node) != []:
nodes.append(node)
# no candidates found
if len(nodes) == 0: return
# select a random node from the list, make it the exit node, and clear the
# exit flag of the old exit node
node = random.choice(nodes)
node.exit = True
old_exit.exit = False
ShowNode(node)
PopUp('HQ has ordered us to proceed to a different target area.')
# Reconnaissance Report: Reveals expected resistance level in an adjacent area
elif roll <= 85:
# build list of possible nodes
nodes = []
for node in campaign.day_map.nodes:
if node in campaign.day_map.player_node.links and not node.res_known:
if not node.friendly_control and node.quest_type is None:
nodes.append(node)
# no candidates found
if len(nodes) == 0: return
# select a random node and reveal its resistance level
node = random.choice(nodes)
node.res_known = True
ShowNode(node)
PopUp('Reconnaissance teams have reported on a nearby area.')
# Enemy Reinforcements: Previously known resistance level is increased
elif roll <= 90:
# build list of possible nodes
nodes = []
for node in campaign.day_map.nodes:
if node in campaign.day_map.player_node.links and node.res_known:
if not node.friendly_control and node.resistance != 'Heavy':
nodes.append(node)
# no candidates found
if len(nodes) == 0: return
# select a random node and increase its resistance level
node = random.choice(nodes)
if node.resistance == 'Light':
node.resistance = 'Medium'
else:
node.resistance = 'Heavy'
ShowNode(node)
PopUp('We have received reports of enemy reinforcement in a ' +
'nearby area.')
# Enemy Advance: Previously captured area is lost
elif roll <= 95:
# build list of possible nodes
nodes = []
for node in campaign.day_map.nodes:
if node.friendly_control and node != campaign.day_map.player_node:
if node.quest_type is None:
nodes.append(node)
# no candidates found
if len(nodes) == 0: return
# select a random node and revert it to enemy control
node = random.choice(nodes)
node.friendly_control = False
ShowNode(node)
PopUp('A map area has been recaptured by an enemy advance.')
# Friendly Advance: Nearby area is captured
else:
# build list of possible nodes
nodes = []
for node in campaign.day_map.nodes:
if node in campaign.day_map.player_node.links:
if node.exit: continue
if not node.friendly_control and node != campaign.day_map.player_node:
if node.quest_type is None:
nodes.append(node)
# no candidates found
if len(nodes) == 0: return
# select a random node and change it to friendly control
node = random.choice(nodes)
node.friendly_control = True
ShowNode(node)
PopUp('A nearby area has been captured by friendly forces.')
# return view to player node and continue
UpdateCOverlay()
campaign.MoveViewTo(campaign.day_map.player_node)
RenderCampaign()
# record that an event occured
self.time_of_last_event = (self.hour, self.minute)
# return a text description of node terrain
def GetTerrainDesc(self, node):
if node.node_type == 'A':
return 'Farms & Fields'
elif node.node_type == 'B':
return 'Fields'
elif node.node_type == 'C':
return 'Village'
elif node.node_type == 'D':
return 'Woods'
elif node.node_type == 'F':
return 'Bocage'
print ('ERROR: unknown node terrain')
return 'Unknown'
# return the rarity factor for a given vehicle type for the current date
# if not historically available for this date, returns 0
def GetRF(self, vehicle_type):
for v in VEHICLE_TYPES:
if v[0] == vehicle_type:
for (k, value) in v[1:]:
if k == 'rarity':
CAMPAIGN_MONTHS = [(8,1944), (9,1944), (10,1944), (11,1944), (12,1944),
(1,1945), (2,1945), (3,1945), (4,1945)]
# new campaign, assume earliest month
if self.current_date == [0,0,0]:
n = 0
else:
# no rarity yet for first month, use August instead
month = self.current_date[1]
year = self.current_date[0]
if month == 7 and year == 1944:
n = 0
else:
n = CAMPAIGN_MONTHS.index((month, year))
return value[n]
return 0
# move the campaign day map y offset to immediately show given node
def MoveViewTo(self, node):
if node is None: return
while self.c_map_y > node.y - 10:
self.c_map_y -= 10
while self.c_map_y < node.y - C_MAP_CON_WINDOW_H + 10:
self.c_map_y += 10
self.CheckYOffset()
RenderCampaign()
# record a campaign stat, either creating a new entry or increasing one already existing
def AddStat(self, stat_name, value):
if stat_name not in self.stats:
self.stats[stat_name] = value
else:
previous_value = self.stats[stat_name]
self.stats[stat_name] = previous_value + value
# build list of possible campaign day actions and their time cost
def BuildActionList(self):
self.action_list = []
# calculate time increase for weather
if self.weather.ground != 'Dry' or self.weather.precip != 'None' or self.weather.fog:
ti = GROUND_MOVE_TIME_MODIFIER
else:
ti = 0
if self.scen_type == 'Counterattack':
self.action_list.append(('[%cA%c]wait Enemy Counterattack'%HIGHLIGHT, None))
self.action_list.append(('[%cE%c]nter adjacent friendly area, Improved Road'%HIGHLIGHT, STONE_ROAD_MOVE_TIME+ti))
self.action_list.append(('[%cE%c]nter adjacent friendly area, Dirt Road'%HIGHLIGHT, DIRT_ROAD_MOVE_TIME+ti))
self.action_list.append(('[%cE%c]nter adjacent friendly area, No Road'%HIGHLIGHT, NO_ROAD_MOVE_TIME+ti))
self.action_list.append(('Attempt [%cR%c]esupply'%HIGHLIGHT, 15))
else:
self.action_list.append(('[%cC%c]heck adjacent area'%HIGHLIGHT, 15))
self.action_list.append(('[%cE%c]nter adjacent area, Improved Road'%HIGHLIGHT, STONE_ROAD_MOVE_TIME+ti))
self.action_list.append(('[%cE%c]nter adjacent area, Dirt Road'%HIGHLIGHT, DIRT_ROAD_MOVE_TIME+ti))
self.action_list.append(('[%cE%c]nter adjacent area, No Road'%HIGHLIGHT, NO_ROAD_MOVE_TIME+ti))
self.action_list.append(('Call for [%cA%c]rtillery or Air Strike'%HIGHLIGHT, 15))
# air strike conditions
if self.weather.clouds != 'Overcast' and not self.weather.fog and self.weather.precip != 'Snow':
self.action_list.append(('Call for [%cA%c]ir Strike on adjacent area'%HIGHLIGHT, 30))
self.action_list.append(('Attempt [%cR%c]esupply'%HIGHLIGHT, 60))
# always have this option
self.action_list.append(('[%cV%c]iew Tank'%HIGHLIGHT, None))
# option of ending day
if [i for i in ENDING_DAMAGES if i in tank.damage_list]:
self.action_list.append(('Return to [%cH%c]Q'%HIGHLIGHT, None))
# generate amount of limited ammo available during morning briefing
def GenerateAmmo(self):
self.hcbi = libtcod.random_get_int(0, 1, 10)
self.hvap = libtcod.random_get_int(0, 1, 3)
# ADPS ammo easier to get by start of 1945
if self.current_date[0] == 1945:
max_adps = 5
min_adps = 2
else:
max_adps = 3
min_adps = 1
self.apds = libtcod.random_get_int(0, min_adps, max_adps)
# check for scrounger skill
if GetCrewByPosition('Loader').SkillCheck('Scrounger'):
self.hcbi += libtcod.random_get_int(0, 1, 3)
self.hvap += 1
self.apds += libtcod.random_get_int(0, 1, 2)
# clear available limited ammo supplies
def ClearAmmo(self):
self.hcbi = 0
self.hvap = 0
self.apds = 0
# keeps the campaign map offset within bounds
def CheckYOffset(self):
if self.c_map_y < 0:
self.c_map_y = 0
elif self.c_map_y > C_MAP_CON_HEIGHT - C_MAP_CON_WINDOW_H:
self.c_map_y = C_MAP_CON_HEIGHT - C_MAP_CON_WINDOW_H
# award vp for capturing an area
def AwardCaptureVP(self, node, counterattack=False):
if node.node_type in ['A', 'B']:
vp = 1
elif node.node_type in ['C', 'F']:
vp = 3
elif node.node_type == 'D':
vp = 2
# exit area gives extra VP
if node.exit:
vp += 20
# advance mission gives bonus VP if not already increased b/c exit node
elif self.scen_type == 'Advance':
vp = vp * 2
self.day_vp += vp
text = 'You are awarded ' + str(vp) + ' VP for '
if counterattack:
text += 'defending'
else:
text += 'capturing'
text += ' this area.'
PopUp(text)
# returns a text description of current date and time, or given date in campaign
# calendar
def GetDate(self, lookup_date=None):
MONTHS = ['', 'January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September', 'October',
'November', 'December'
]
if lookup_date is None:
lookup_date = self.current_date
text = MONTHS[lookup_date[1]]
date = str(lookup_date[2])
text += ' ' + date
if date in ['1', '21', '31']:
text += 'st'
elif date in ['2', '22']:
text += 'nd'
elif date in ['3', '23']:
text += 'rd'
else:
text += 'th'
text += ', ' + str(lookup_date[0])
return text
# returns the hour and minute of sunrise for current month
def GetSunrise(self):
SUNRISE = [(0,0), (7,45), (7,15), (6,15), (5,15),
(5,0), (5,0), (5,0), (5,0), (5,30),
(6,30), (7,15), (7,45)
]
return SUNRISE[self.current_date[1]]
# returns the hour and minute of sunset for current month
def GetSunset(self):
SUNSET = [(0,0), (16,30), (17,30), (18,00), (19,00),
(19,15), (19,15), (19,15), (19,15), (18,15),
(17,15), (16,15), (16,00)
]
return SUNSET[self.current_date[1]]
# advances clock by given amount of time
def SpendTime(self, hours, minutes):
# add minutes
self.minute += minutes
# roll over extra minutes into hours
while self.minute >= 60:
self.hour += 1
self.minute -= 60
# add hours
self.hour += hours
UpdateDateCon()
# check for rain accumilation for mud, dry weather for dry ground,
# or snow for snow / deep snow cover
self.weather.CheckGround(((hours*60) + minutes))
# check for weather change, 10% per 15 mins
checks = (hours * 4) + int(ceil(float(minutes) / 15.0))
for c in range(checks):
if libtcod.random_get_int(0, 1, 10) == 1:
self.weather.CheckChange()
# in case there was a change, update consoles
if battle is not None:
PaintMapCon()
else:
campaign.BuildActionList()
UpdateCActionCon()
UpdateCInfoCon(mouse.cx, mouse.cy)
# check for quest time limit, eg. RESCUE
if self.quest_active:
for node in self.day_map.nodes:
if node.quest_time_limit is not None:
(h,m) = node.quest_time_limit
if self.hour > h or (self.hour == h and self.minute > m):
# cancel quest
text = ('Time has run out to complete ' +
node.quest_type + ' quest.')
PopUp(text)
WriteJournal(text)
node.quest_type = None
node.quest_vp_bonus = None
node.quest_time_limit = None
self.quest_active = False
if battle is None:
UpdateCOverlay()
break
# check to see if it is at or past sunset, and trigger campaign day end if true
def CheckSunset(self):
# don't check if already triggered
if self.sunset: return
(h, m) = self.GetSunset()
if self.hour > h or (self.hour == h and self.minute >= m):
PopUp('The sun has set and this day of combat is over.')
WriteJournal('The sun set at ' + str(h) + ':' + str(m).zfill(2) + ' and the action day ended')
campaign.sunset = True
RenderCampaign()
self.EndOfDay()
# commander chose to head back to HQ because of a damaged tank
def HeadHome(self):
RenderCampaign()
PopUp('You have chosen to head back to HQ, ending your combat day.')
WriteJournal('The commander chose to return to HQ at ' + str(self.hour) + ':' + str(self.minute) + ' because of tank damage')
campaign.sunset = True
RenderCampaign()
self.EndOfDay()
# do end of campaign day stuff
def EndOfDay(self):
# award exp for the day to crew
for crew in tank.crew:
d1, d2, roll = Roll2D6()
crew.AwardExp(roll)
# check to see if any crew have gone up one or more levels
self.CheckCrewLevels()
# show campaign menu for summary
CampaignMenu()
# check to see if any tank crew have gained one or more levels
def CheckCrewLevels(self):
for crewman in tank.crew:
while crewman.level < LEVEL_CAP:
if crewman.exp >= GetExpReq(crewman.level+1):
crewman.level += 1
crewman.skill_pts += 1
text = crewman.name + ' gained a level, is now level ' + str(crewman.level)
PopUp(text)
WriteJournal(text)
else:
break
# PlayerTank Class
# holds information about a player and their tank; can specify a model of tank during creation
class PlayerTank:
def __init__(self, tank_type):
if tank_type is None:
self.unit_type = 'M4 Turret A' # default type of tank
else:
self.unit_type = tank_type
self.unit_class = 'TANK'
self.crew = [] # list of crewmen in the tank
self.Setup()
# set up a new tank
def Setup(self):
self.alive = True # tank is not destroyed or disabled
self.swiss_cheese = False # tank is damaged to the point where it
# must be abandoned after encounter is over
self.name = '' # tank name
self.general_ammo = {} # ammo types and numbers in general stores
self.rr_ammo = {} # " ready rack
self.ammo_load = 'HE' # current shell in main gun
self.ammo_reload = 'HE' # type of shell to use for reload
self.use_rr = True # use ready rack to reload
self.fired_main_gun = False # tank fired its main gun last turn
self.smoke_grenades = 6 # current number of smoke grenades, max 6
self.smoke_bombs = 15 # current number of smoke bombs for mortar
# (includes one loaded in mortar itself)
self.turret_facing = 4 # sector that turret is facing
self.old_t_facing = 4 # used when rotating the turret
self.new_facing = 4 # used for pivoting the tank
self.bogged_down = False # tank is bogged down
self.immobilized = False # tank has thrown a track
self.hull_down = False # tank is hull down
self.moving = False # tank is moving
self.lead_tank = False # tank is lead of column
# flags for MGs
self.coax_mg_can_fire = False
self.bow_mg_can_fire = False
self.aa_mg_can_fire = False
self.active_mg = -1
self.has_rof = False # tank currently has maintained RoF
##### List of Active Minor Damage #####
self.damage_list = []
# reset the tank for a new encounter turn
def Reset(self):
# record the current turret facing
self.old_t_facing = self.turret_facing
# reset MG flags
self.coax_mg_can_fire = False
self.bow_mg_can_fire = False
self.aa_mg_can_fire = False
self.active_mg = -1
# reset the tank after an encounter
def ResetAfterEncounter(self):
self.Reset()
self.turret_facing = 4
self.old_t_facing = 4
self.bogged_down = False
self.hull_down = False
self.moving = False
self.fired_main_gun = False
# reset crew: orders for all crew, reset bailed out flag
for crew_member in self.crew:
crew_member.ResetOrder(reset_all=True)
crew_member.bailed_out = False
crew_member.bail_mod = 0
# apply a given randomly-determined type of minor damage, or randomly determine
# what type of damage to apply
def TakeDamage(self, damage_type=None, light_weapons=False, large_gun=False):
if damage_type is None:
# do damage roll
d1, d2, roll = Roll2D6()
# apply any modifiers
if light_weapons:
roll += 5
elif large_gun:
roll -= 2
# determine type of damage
d6roll = Roll1D6()
if roll <= 2:
if d6roll <= 3:
damage_type = 'Gun Sight Broken'
else:
damage_type = 'Engine Knocked Out'
elif roll == 3:
if d6roll == 1:
damage_type = 'Main Gun Broken'
else:
damage_type = 'Main Gun Malfunction'
elif roll == 4:
if d6roll <= 2:
damage_type = 'Turret Traverse Broken'
else:
damage_type = 'Turret Traverse Malfunction'
elif roll == 5:
if d6roll <= 3:
damage_type = 'Radio Broken'
else:
damage_type = 'Radio Malfunction'
elif roll == 6:
if d6roll <= 1:
damage_type = 'Intercom Broken'
else:
damage_type = 'Intercom Malfunction'
elif roll <= 8:
if d6roll <= 2:
if 'aa_mg' not in tank.stats:
return
damage_type = 'AA MG'
elif d6roll <= 4:
if 'co_ax_mg' not in tank.stats:
return
damage_type = 'Co-ax MG'
else:
if 'bow_mg' not in tank.stats:
return
damage_type = 'Bow MG'
if Roll1D6() <= 2:
damage_type += ' Broken'
else:
damage_type += ' Malfunction'
elif roll <= 10:
if d6roll == 1:
damage_type = 'Commander'
elif d6roll == 2:
damage_type = 'Gunner'
elif d6roll == 3:
damage_type = 'Loader'
elif d6roll == 4:
damage_type = 'Driver'
elif d6roll == 5:
damage_type = 'Asst. Driver'
else:
# no damage
return
damage_type += ' Periscope Broken'
else:
# no damage
return
# don't apply if tank already has this type of damage
if damage_type in self.damage_list:
return
# don't apply if the worse result is already present
damage_result = GetDamageType(damage_type)
if damage_result is not None:
if damage_result.break_result in self.damage_list:
return
# remove less bad result
mal_result = damage_type.replace('Broken', 'Malfunction')
if mal_result in self.damage_list:
self.damage_list.remove(mal_result)
self.damage_list.append(damage_type)
PopUp('Your tank has been damaged: ' + damage_type)
# apply any additional effects
if damage_type == 'Engine Knocked Out':
self.immobilized = True
# rebuild orders list and reset spotting ability
for crewman in tank.crew:
crewman.BuildOrdersList(no_reset=True)
crewman.SetSpotAbility()
UpdateTankCon()
# select first mg that can fire
def SelectFirstMG(self):
if self.coax_mg_can_fire:
self.active_mg = 0
elif self.bow_mg_can_fire:
self.active_mg = 1
elif self.aa_mg_can_fire:
self.active_mg = 2
else:
# no MGs can fire
self.active_mg = -1
# set a new name for the player tank
def SetName(self, new_name):
self.name = new_name
# show a menu to change the shell load of the main gun
def ChangeGunLoadMenu(self):
# determine valid options: any ammo type that has at least one shell
choice_list = []
for ammo_type in AMMO_TYPES:
if ammo_type in tank.general_ammo:
if tank.general_ammo[ammo_type] > 0:
choice_list.append(ammo_type)
elif tank.rr_ammo[ammo_type] > 0:
choice_list.append(ammo_type)
# no shells available
if len(choice_list) == 0:
PopUp("No shells remaining, can't change gun load")
return
# show the menu and get the selection
text = 'Select type of shell to load in main gun'
choice = GetChoice(text, choice_list)
if choice is None: return
# replace any current gun shell in general stores
if self.ammo_load != 'None':
tank.general_ammo[self.ammo_load] += 1
# set the new shell type; try from general first, then rr
if tank.general_ammo[choice] > 0:
tank.general_ammo[choice] -= 1
else:
tank.rr_ammo[choice] -= 1
self.ammo_load = choice
# cycle the selected ammo type to reload
def CycleReload(self):
type_list = []
for ammo_type in AMMO_TYPES:
if ammo_type in tank.general_ammo:
type_list.append(ammo_type)
n = type_list.index(tank.ammo_reload)
if n >= len(type_list)-1:
tank.ammo_reload = type_list[0]
else:
tank.ammo_reload = type_list[n+1]
# toggle the hatch status of given crew member if possible, and change their spot
# ability accordingly
def ToggleHatch(self, crewman):
# only allow if a crew member is selected
if crewman is None: return
# if crew member has no hatch, return
if crewman.hatch == 'None': return
# otherwise, toggle state
if crewman.hatch == 'Open':
crewman.hatch = 'Shut'
PlaySound('hatch_close')
else:
crewman.hatch = 'Open'
PlaySound('hatch_open')
# update spotting ability
crewman.SetSpotAbility()
# re-build list of possible orders
crewman.BuildOrdersList(no_reset=True)
# set lead tank status for the day
def SetLeadTank(self):
# fireflies never lead
if tank.stats['vehicle_type'] == 'Sherman VC':
return
if not 'M4A3E2' in tank.stats['vehicle_type']:
# if not a jumbo and was lead tank last combat day, automatically
# not lead tank today
if self.lead_tank:
self.lead_tank = False
return
lead_tank = 11
else:
lead_tank = 8
d1, d2, roll = Roll2D6()
if roll >= lead_tank:
self.lead_tank = True
PopUp('Your tank is the Lead Tank for the day')
WriteJournal('"' + tank.name + '" is assigned to be Lead Tank for the day')
else:
self.lead_tank = False
# set movement status upon deployment into an encounter
def SetDeployment(self):
# counterattack scenario is special, no roll required
if campaign.scen_type == 'Counterattack' or battle.counterattack:
self.hull_down = True
return
# determine chances of starting hull down and/or stopped
if tank.stats['vehicle_type'] == 'Sherman VC':
hull_down = 7
stopped = 9
elif 'M4A3E2' in tank.stats['vehicle_type']:
hull_down = 3
stopped = 5
else:
hull_down = 5
stopped = 7
# terrain modifier
if campaign.day_map.player_node.node_type == 'F':
hull_down += 2
# check for 'Cautious Driver' skill activation
crew_member = GetCrewByPosition('Driver')
if crew_member.SkillCheck('Cautious Driver'):
hull_down += 2
d1, d2, roll = Roll2D6()
if roll <= hull_down:
self.hull_down = True
Message('Your tank is Hull Down')
elif roll <= stopped:
Message('Your tank is Stopped')
else:
self.moving = True
Message('Your tank is Moving')
# tank has suffered a penetrating hit, determine effects
def Penetrate(self, hit_location, sector, gun_type, critical=False):
# determine base modifiers
mod = 0
if hit_location == 'Hull':
mod -= 1
# only handles special cases of 88s or PF for now
large_gun = False
if gun_type in ['88L', '88LL', 'Panzerfaust']:
mod -= 2
large_gun = True
# do initial roll
d1, d2, roll = Roll2D6()
# if original to-kill roll was less than half of what was required, or
# a natural 2, then two results are rolled and the worse of the two is applied
if critical:
d1, d2, roll2 = Roll2D6()
if roll2 < roll:
roll = roll2
# apply any secondary modifiers
if roll + mod in [3,4,5,11,12]:
if 'wet_stowage' not in tank.stats:
mod -= 1
# extra ammo
total = 0
for ammo_type in AMMO_TYPES:
if ammo_type in tank.general_ammo:
total += tank.general_ammo[ammo_type]
total -= tank.stats['main_gun_rounds']
if total > 0:
if libtcod.random_get_int(0, 1, 100) <= total:
mod -= 1
# determine final effect
roll += mod
# Explodes
if roll <= 2:
PlaySound('tank_knocked_out')
text = 'Your tank explodes, killing the entire crew!'
PopUp(text)
WriteJournal(text)
campaign.AddStat('Tanks Lost', 1)
self.alive = False
battle.result = 'Tank Lost'
for crewman in tank.crew:
crewman.ResolveKIA()
# Knocked Out
elif roll <= 7:
PlaySound('tank_knocked_out')
text = 'Your tank has been knocked out, and the crew must bail out.'
PopUp(text)
WriteJournal(text)
campaign.AddStat('Tanks Lost', 1)
self.alive = False
battle.result = 'Tank Lost'
# work out crew casualties and bailing out
ResolveCrewFate(hit_location, sector, (gun_type == 'Panzerfaust'))
# Ricochet
elif roll <= 8:
text = ('Fragments of shell and armour ricochet throughout the ' +
'tank, causing multiple wounds and damage.')
PopUp(text)
WriteJournal(text)
tank.swiss_cheese = True
# generate 3 minor damage results
for n in range(3):
self.TakeDamage(large_gun=large_gun)
# 2 possible wounds per crewman
for crewman in tank.crew:
for n in range(2):
text = crewman.TakeWound(hit_location, sector)
if text is not None:
text = crewman.name + ' is wounded! Result: ' + text
PopUp(text)
# Spalling
elif roll <= 9:
text = ("The shell impact causes spalling in the tank's armour, " +
"sending metal fragments bursting into the crew " +
"compartment.")
PopUp(text)
WriteJournal(text)
tank.swiss_cheese = True
# generate 2 minor damage results
for n in range(2):
self.TakeDamage(large_gun=large_gun)
# 1 possible wound per crewman
for crewman in tank.crew:
text = crewman.TakeWound(hit_location, sector)
if text is not None:
text = crewman.name + ' is wounded! Result: ' + text
PopUp(text)
# Fire
elif roll <= 10:
text = ('The shell does no serious damage, but ignites a fire ' +
'inside the crew compartment which is quickly extinguished.')
PopUp(text)
WriteJournal(text)
# generate 1 minor damage result
self.TakeDamage(large_gun=large_gun)
# one possible crew wound
crewman = random.choice(tank.crew)
text = crewman.TakeWound(None, None, collateral=True)
if text is not None:
text = crewman.name + ' is wounded! Result: ' + text
PopUp(text)
# Minor Damage
else:
text = ('The shell impacts a well-protected area, causing only ' +
'minor damage.')
PopUp(text)
WriteJournal(text)
# generate 1 minor damage result
self.TakeDamage(large_gun=large_gun)
# abandon tank
def AbandonTank(self):
text = 'Your crew abandons your tank and bails out.'
PopUp(text)
WriteJournal(text)
campaign.AddStat('Tanks Lost', 1)
self.alive = False
battle.result = 'Tank Lost'
# crew get a chance to recover from negative status effects
for crewman in tank.crew:
crewman.RecoveryRoll()
ResolveCrewFate(None, None, False, abandoned=True)
# take a light weapons attack and apply effects
def LWAttack(self):
# check for exposed crew wound
hit_result = False
for crewman in tank.crew:
if crewman.hatch == 'Open':
text = crewman.TakeWound(None, None, collateral=True)
if text is not None:
hit_result = True
text = crewman.name + ' is hit! Result: ' + text
PopUp(text)
if not hit_result:
ShowLabel(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y, 'No crewmembers affected.')
# check for minor damage
if Roll1D6() == 1:
self.TakeDamage(light_weapons=True)
# take a minefield attack
def MinefieldAttack(self):
# do D10 roll
roll = Roll1D10()
if roll >= 3:
PopUp('Luckily your tanks do not trigger any mines.')
return
elif roll <= 1:
PopUp('One friendly tank is knocked out by a mine blast.')
battle.tanks_lost += 1
return
# player tank disabled!
text = 'Your tank triggers a mine which explodes, disabling it!'
PopUp(text)
WriteJournal(text)
tank.moving = False
tank.immobilized = True
# rebuild list of orders for driver, reset to Stop
GetCrewByPosition('Driver').BuildOrdersList()
UpdateTankCon()
# roll for effect on crew
roll = Roll1D10()
if roll <= 8:
PopUp('Luckily, none of your crew is injured by the blast.')
return
# driver or assistant driver possibly wounded
if roll == 9:
crew = GetCrewByPosition('Driver')
else:
crew = GetCrewByPosition('Asst. Driver')
# no crew in this position
if crew is None:
result = None
else:
result = crew.TakeWound(None, None, minefield=True)
if result is None:
PopUp('Luckily, none of your crew is injured by the blast.')
return
# otherwise, show the wound result
text = crew.name + ' is hit by the blast! Result: ' + result
PopUp(text)
WriteJournal(text)
# draw player tank to the map console
def DrawMe(self):
# return the x, y location and character to use to draw the tank turret
# based on turret facing
def GetTurretChar(facing):
if facing == 0: # down and right
x = MAP_X0 + 1
y = MAP_Y0 + 1
char = '\\'
elif facing == 1: # down
x = MAP_X0
y = MAP_Y0 + 1
char = '|'
elif facing == 2: # down and left
x = MAP_X0 - 1
y = MAP_Y0 + 1
char = '/'
elif facing == 3: # up and left
x = MAP_X0 - 1
y = MAP_Y0 - 1
char = '\\'
elif facing == 4: # up
x = MAP_X0
y = MAP_Y0 - 1
char = '|'
else: # up and right
x = MAP_X0 + 1
y = MAP_Y0 - 1
char = '/'
return x, y, char
libtcod.console_set_default_foreground(overlay_con, libtcod.white)
libtcod.console_put_char(overlay_con, MAP_X0, MAP_Y0, libtcod.CHAR_RADIO_UNSET, flag=libtcod.BKGND_SET)
libtcod.console_set_char_background(overlay_con, MAP_X0, MAP_Y0, PLAYER_COLOR, flag=libtcod.BKGND_SET)
# draw turret based on current facing
x, y, char = GetTurretChar(self.turret_facing)
col = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, char, libtcod.white, col)
# if turret has been rotated and we're rotating the turret or
# firing main gun or MGs, draw old facing too
if battle.phase in ['Fire Main Gun', 'Fire MGs', 'Rotate Turret'] and self.old_t_facing != self.turret_facing:
x, y, char = GetTurretChar(self.old_t_facing)
col = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, char, libtcod.dark_grey, col)
# if we're pivoting the tank, display what the new facing would be
elif battle.phase == 'Pivot Tank':
if tank.new_facing == 0:
x = MAP_X0 + 3
y = MAP_Y0 + 1
char = 224
col1 = libtcod.console_get_char_background(map_con, x, y)
col2 = libtcod.white
elif tank.new_facing == 1:
x = MAP_X0
y = MAP_Y0 + 2
char = 31
col1 = libtcod.white
col2 = libtcod.console_get_char_background(map_con, x, y)
elif tank.new_facing == 2:
x = MAP_X0 - 3
y = MAP_Y0 + 1
char = 225
col1 = libtcod.console_get_char_background(map_con, x, y)
col2 = libtcod.white
elif tank.new_facing == 3:
x = MAP_X0 - 3
y = MAP_Y0 - 1
char = 224
col1 = libtcod.white
col2 = libtcod.console_get_char_background(map_con, x, y)
elif tank.new_facing == 4:
x = MAP_X0
y = MAP_Y0 - 2
char = 30
col1 = libtcod.white
col2 = libtcod.console_get_char_background(map_con, x, y)
else:
x = MAP_X0 + 3
y = MAP_Y0 - 1
char = 225
col1 = libtcod.white
col2 = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, char, col1, col2)
# Crewman Class
# holds information about a single crewman in the tank
class Crewman:
def __init__(self):
self.name = '' # crewman name
self.nickname = '' # nickname, set by player
self.hometown = '' # crewman's hometown
self.rank_level = 0 # rank level (private, etc.)
self.position = '' # position in the tank normally occupied
self.order = 'None' # current order
self.hatch = 'None' # hatch status
self.spot = 'None' # spot status
self.spot_sector = 4 # sector spotting in if limited to any one
self.orders_list = [] # list of possible orders for this crewman
### Experience Points (EXP) and skills ###
self.level = 1 # crew experience level
self.exp = 0 # current experience points
self.skill_pts = 1 # current skill points
self.skills = [] # list of skills (skill name, activation chance)
# decorations
self.decorations = [] # medals, etc.
# injury flags
self.alive = True # crewman is alive
self.light_wound = False # " has received a light wound
self.serious_wound = False # " serious wound
self.v_serious_wound = False # " very serious wound
# status flags
self.stunned = False # " has been stunned by injury or near miss
self.unconscious = False # " knocked unconscious
self.bailed_out = False # has bailed out of tank
self.next = None # pointer to next crewman in list
self.prev = None # pointer to previous "
# resolves effects of crewman being killed
def ResolveKIA(self):
self.no_bail = True
self.alive = False
self.unconscious = False
self.stunned = False
self.light_wound = False
self.serious_wound = False
self.v_serious_wound = False
self.SetSpotAbility()
self.ResetOrder()
self.AddHeadStone()
# generates a text report of the crewman's status and history, used when crewman is
# KIA, sent home, or at end of campaign
def GenerateReport(self):
lines = ['', '****', 'Crewman Report']
lines.append(self.GetRank())
lines.append(self.name)
if self.nickname != '':
lines.append(self.nickname)
lines.append(' ' + self.hometown)
lines.append('')
if not self.alive:
lines.append('KIA, ' + campaign.GetDate())
lines.append('')
elif self.v_serious_wound:
lines.append('Sent Home, ' + campaign.GetDate())
lines.append('')
# skills
lines.append('Skills:')
if len(self.skills) == 0:
lines.append('None')
else:
for skill_record in self.skills:
string = ' ' + skill_record.name
if skill_record.level < 100:
string += ': ' + str(skill_record.level) + '%'
lines.append(string)
lines.append('')
# decorations
lines.append('Decorations:')
if len(self.decorations) == 0:
lines.append('None')
else:
for dec_name in self.decorations:
lines.append(dec_name)
return lines
# add an entry to the bones file recording this crewman's demise
def AddHeadStone(self):
try:
# open bones file
save = shelve.open('bones')
bones = save['bones']
save.close()
# add the entry
# get most recent decortation
if len(self.decorations) > 0:
decoration_text = self.decorations[-1]
else:
decoration_text = ''
bones.graveyard.append([self.GetRank(), self.name, self.hometown, campaign.GetDate(), decoration_text])
# save the bones file
save = shelve.open('bones')
save['bones'] = bones
save.close()
except:
print('ERROR: Could not open bones file')
# award a decoration to this crewman, and display a window with information about
# the decoration
def AwardDecoration(self, dec_name):
def UpdateScreen():
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
Wait(400)
WriteJournal(self.name + ' awarded new decoration: ' + dec_name)
# add to crewman's list of decorations
self.decorations.append(dec_name)
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
# clear console
libtcod.console_set_default_background(menu_con, libtcod.black)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_clear(menu_con)
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
# display frame and title
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH-1, MENU_CON_HEIGHT-1,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print(menu_con, MENU_CON_XM, 2, 'Decoration Award')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# crewman name
text = self.GetRank() + ' ' + self.name
libtcod.console_print(menu_con, MENU_CON_XM, 5, text)
if self.nickname != '':
libtcod.console_print(menu_con, MENU_CON_XM, 6, '"' + self.nickname + '"')
UpdateScreen()
text = 'has been '
if not self.alive:
text += 'posthumously '
text += 'awarded the'
libtcod.console_print(menu_con, MENU_CON_XM, 7, text)
UpdateScreen()
# name of decoration
libtcod.console_print(menu_con, MENU_CON_XM, 11, dec_name)
UpdateScreen()
# decoration description
# get description from definitions file
if dec_name == 'Purple Heart':
text = 'for wounds received in action'
else:
for (name, text, vp_req) in campaign.decorations:
if name == dec_name:
break
libtcod.console_print(menu_con, MENU_CON_XM, 15, text)
UpdateScreen()
# date
text = 'this ' + campaign.GetDate()
libtcod.console_print(menu_con, MENU_CON_XM, 17, text)
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-3, '[%cEnter%c] to continue'%HIGHLIGHT)
UpdateScreen()
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
if key.vk == libtcod.KEY_ENTER:
exit_menu = True
# update screen
libtcod.console_flush()
# returns true if crewman unable to perform actions
def NoActions(self):
return not self.alive or self.stunned or self.unconscious
# roll to recover from status effects
def RecoveryRoll(self):
if self.unconscious:
d1, d2, roll = Roll2D6()
if self.SkillCheck('True Grit'):
roll -= 1
if roll <= 8:
self.unconscious = False
self.stunned = True
Message(self.name + ' regains consciousness but remains Stunned.')
elif self.stunned:
d1, d2, roll = Roll2D6()
if self.SkillCheck('True Grit'):
roll -= 1
if roll <= 8:
self.stunned = False
Message(self.name + ' recovers from being Stunned.')
# immediately set crew's level, granting skill and experience points
def SetLevel(self, new_level):
if new_level == self.level: return
self.skill_pts += new_level - self.level
self.level = new_level
self.exp = GetExpReq(new_level)
# award a given number of exp to the crewman
def AwardExp(self, exp_gain):
# don't award EXP to dead or out of action crew
if not self.alive or self.v_serious_wound:
return
self.exp += exp_gain
# upgrade the given skill to the given level
def UpgradeSkill(self, skill_name, skill_level):
for crew_skill in self.skills:
if crew_skill.name == skill_name:
crew_skill.level = skill_level
return
# display a small box with crew info, used for crew info screen and skill
# additions / upgrades
def DisplayCrewInfo(self, console, x, y, highlight):
if highlight:
libtcod.console_set_default_foreground(console, SELECTED_COLOR)
else:
libtcod.console_set_default_foreground(console, libtcod.light_grey)
libtcod.console_print_frame(menu_con, x, y, 27, 30,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(console, libtcod.white)
libtcod.console_print(console, x+1, y+1, self.position)
libtcod.console_set_default_foreground(console, libtcod.light_grey)
libtcod.console_print(console, x+1, y+2, self.GetRank())
libtcod.console_set_default_foreground(console, libtcod.white)
libtcod.console_print(console, x+1, y+3, self.name)
text = self.nickname
if text != '':
libtcod.console_set_default_foreground(console, libtcod.light_blue)
text = ' "' + text + '"'
libtcod.console_print(console, x+1, y+4, text)
# hometown
libtcod.console_set_default_foreground(console, libtcod.light_grey)
libtcod.console_print(console, x+1, y+5, self.hometown)
libtcod.console_set_default_foreground(console, libtcod.white)
# status
if self.NoActions():
libtcod.console_set_default_foreground(console, libtcod.light_red)
if not self.alive:
text = 'Dead'
elif self.unconscious:
text = 'Unconscious'
else:
text = 'Stunned'
libtcod.console_print(console, x+2, y+6, text)
# wounds
if self.v_serious_wound:
text = 'Very Serious Wound'
elif self.serious_wound:
text = 'Serious Wound'
elif self.light_wound:
text = 'Light Wound'
else:
text = ''
libtcod.console_set_default_foreground(console, libtcod.red)
libtcod.console_print(console, x+2, y+7, text)
# display decorations
if len(self.decorations) > 0:
# starting x position
x1 = x+1
for dec_name in self.decorations[:20]:
# USA decorations
if dec_name == 'Purple Heart':
libtcod.console_put_char_ex(console, x1, y+9,
31, libtcod.purple, libtcod.black)
libtcod.console_put_char_ex(console, x1, y+10,
libtcod.CHAR_HEART, libtcod.purple, libtcod.black)
elif dec_name == 'Bronze Star':
libtcod.console_put_char_ex(console, x1, y+9,
31, libtcod.red, libtcod.black)
libtcod.console_put_char_ex(console, x1, y+10,
15, libtcod.light_yellow, libtcod.black)
elif dec_name == 'Silver Star':
libtcod.console_put_char_ex(console, x1, y+9,
31, libtcod.blue, libtcod.black)
libtcod.console_put_char_ex(console, x1, y+10,
15, libtcod.light_yellow, libtcod.black)
elif dec_name == 'Distinguished Service Cross':
libtcod.console_put_char_ex(console, x1, y+9,
31, libtcod.blue, libtcod.black)
libtcod.console_put_char_ex(console, x1, y+10,
197, libtcod.dark_yellow, libtcod.black)
elif dec_name == 'Congressional Medal of Honor':
libtcod.console_put_char_ex(console, x1, y+9,
254, libtcod.light_blue, libtcod.black)
libtcod.console_put_char_ex(console, x1, y+10,
42, libtcod.yellow, libtcod.black)
# UKC decorations
elif dec_name == 'Military Medal':
libtcod.console_put_char_ex(console, x1, y+9,
186, libtcod.white, libtcod.blue)
libtcod.console_put_char_ex(console, x1, y+10,
42, libtcod.light_grey, libtcod.black)
elif dec_name == 'Military Cross':
libtcod.console_put_char_ex(console, x1, y+9,
179, libtcod.purple, libtcod.white)
libtcod.console_put_char_ex(console, x1, y+10,
31, libtcod.light_grey, libtcod.black)
elif dec_name == 'Distinguished Service Order':
libtcod.console_put_char_ex(console, x1, y+9,
179, libtcod.red, libtcod.blue)
libtcod.console_put_char_ex(console, x1, y+10,
31, libtcod.white, libtcod.black)
elif dec_name == 'Victoria Cross':
libtcod.console_put_char_ex(console, x1, y+9,
178, libtcod.red, libtcod.black)
libtcod.console_put_char_ex(console, x1, y+10,
31, libtcod.dark_yellow, libtcod.black)
x1 += 1
libtcod.console_set_default_foreground(console, libtcod.light_grey)
libtcod.console_print(console, x+1, y+12, 'Level:')
libtcod.console_print(console, x+1, y+13, 'Exp:')
libtcod.console_print(console, x+1, y+14, 'Skill Pts:')
libtcod.console_set_default_foreground(console, libtcod.white)
libtcod.console_print(console, x+8, y+12, str(self.level))
# display current exp and exp required for next level
libtcod.console_set_default_foreground(console, libtcod.white)
text = str(self.exp) + '/' + str(GetExpReq(self.level+1))
libtcod.console_print(console, x+6, y+13, text)
# display skill points
if self.skill_pts > 0:
libtcod.console_set_default_foreground(console, libtcod.light_blue)
libtcod.console_print(console, x+12, y+14, str(self.skill_pts))
libtcod.console_set_default_foreground(console, libtcod.white)
# build a list of possible orders based on crewman status, etc.
def BuildOrdersList(self, no_reset=False):
self.orders_list = []
for order in CREW_ORDERS:
# skip if this order not allowed to this crew member's position
if self.position not in order.position_list: continue
# only show Smoke Mortar order if tank has a smoke mortar
if order.name == 'Fire Smoke Mortar':
if 'smoke_mortar' not in tank.stats: continue
# skip movement orders if tank is immobilized
if tank.immobilized or tank.bogged_down:
if 'Driver' in order.position_list and order.name not in ['Stop', 'Attempt Unbog', 'Abandon Tank']:
continue
# only allow unbog attempt for driver if bogged
if order.name == 'Attempt Unbog':
if not tank.bogged_down: continue
# only allow throw smoke grenade if has hatch and tank
# has at least one smoke grenade remaining
if order.name == 'Throw Smoke Grenade':
if self.hatch != 'Open': continue
if tank.smoke_grenades == 0: continue
# disable reload order if no shell in main gun
if order.name == 'Reload':
if tank.ammo_load == 'None': continue
# AA MG
if order.name in ['Fire AA MG', 'Repair AA MG']:
if 'aa_mg' not in tank.stats: continue
if self.position == 'Loader' and tank.stats['loader_hatch'] != 'Split': continue
if self.hatch != 'Open': continue
if order.name == 'Fire AA MG':
if 'AA MG Malfunction' in tank.damage_list or 'AA MG Broken' in tank.damage_list: continue
if order.name == 'Repair AA MG':
if 'AA MG Malfunction' not in tank.damage_list: continue
# Co-ax MG
if order.name == 'Fire Co-Axial MG':
if 'co_ax_mg' not in tank.stats: continue
if 'Co-ax MG Malfunction' in tank.damage_list or 'Co-ax MG Broken' in tank.damage_list: continue
if order.name == 'Repair Co-ax MG':
if 'co_ax_mg' not in tank.stats: continue
if 'Co-ax MG Malfunction' not in tank.damage_list: continue
# Bow MG
if order.name == 'Fire Bow MG':
if 'bow_mg' not in tank.stats: continue
if tank.hull_down: continue
if 'Bow MG Malfunction' in tank.damage_list or 'Bow MG Broken' in tank.damage_list: continue
if order.name == 'Repair Bow MG':
if 'bow_mg' not in tank.stats: continue
if 'Bow MG Malfunction' not in tank.damage_list: continue
# Firing Main Gun, Directing Main Gun Fire, or changing gun load
if order.name in ['Fire Main Gun', 'Direct Main Gun Fire', 'Change Gun Load']:
if 'Main Gun Malfunction' in tank.damage_list or 'Main Gun Broken' in tank.damage_list: continue
if 'Gun Sight Broken' in tank.damage_list: continue
# check turret traverse gear
if 'Turret Traverse Malfunction' in tank.damage_list or 'Turret Traverse Broken' in tank.damage_list:
if order.name in ['Rotate Turret']: continue
# check for tank intercom broken
if 'Intercom Malfunction' in tank.damage_list or 'Intercom Broken' in tank.damage_list:
if order.name in ['Direct Movement', 'Direct Bow MG Fire']:
continue
# only allow repairs if the system is malfunctioning
if order.name == 'Repair Main Gun':
if 'Main Gun Malfunction' not in tank.damage_list: continue
elif order.name == 'Repair Turret Traverse':
if 'Turret Traverse Malfunction' not in tank.damage_list: continue
elif order.name == 'Repair Radio':
if 'Radio Malfunction' not in tank.damage_list: continue
elif order.name == 'Repair Intercom':
if 'Intercom Malfunction' not in tank.damage_list: continue
# only allow abandoning the tank if one or more crewmen very
# seriously wounded or worse, or tank is immobile
if order.name == 'Abandon Tank':
crew_qualify = False
if tank.immobilized:
crew_qualify = True
elif tank.swiss_cheese:
crew_qualify = True
else:
for crewman in tank.crew:
if crewman.v_serious_wound or not crewman.alive:
crew_qualify = True
break
if not crew_qualify: continue
self.orders_list.append(order)
# set None order for inactive crew, default order for Loader and Driver
if not no_reset:
self.ResetOrder()
return
# otherwise, we still need to check that our current order is allowed
# if not, try to set to default
if not self.CurrentOrderOK():
self.ResetOrder()
# see if this crew has the specified skill and, if so, roll to activate it
def SkillCheck(self, skill_name):
if not self.alive or self.unconscious or self.stunned:
return False
for skill in self.skills:
if skill.name == skill_name:
roll = Roll1D100()
# check for battle leadership effect if in battle
if battle is not None:
if battle.battle_leadership:
roll -= 5
if roll <= skill.level and roll <= 95:
# only display message if skill is not automatic
if skill.level < 100:
text = self.name + ' activates ' + skill_name + ' skill!'
Message(text, color=SKILL_ACTIVATE_COLOR)
# TODO for debugging
#PopUp(text)
return True
break
return False
# return the full or short form of this crewman's rank
def GetRank(self, short=False):
a, b, c = campaign.ranks[self.rank_level]
if short:
return a
return b
# generate a random name for a crewman
def GenerateName(self):
good_name = False
while not good_name:
name = random.choice(FIRST_NAMES)
name += ' ' + random.choice(LAST_NAMES)
# make sure name isn't too long
if len(name) > NAME_MAX_LEN:
continue
# make sure name isn't the same as an already existing one
for crewman in tank.crew:
if crewman.name == name:
continue
# set name
self.name = name
good_name = True
# check that this crew's hatch status matches what is possible for this tank
# used after switching tanks, since it may or may not have a loader hatch
def CheckHatch(self):
# if this model has a loader hatch
if self.position == 'Loader':
# if no hatch in this tank, crew hatch must be set to none
if tank.stats['loader_hatch'] == 'None':
self.hatch = 'None'
else:
# otherwise, if hatch used to be none, set it to shut now
if self.hatch == 'None':
self.hatch = 'Shut'
# return a list of text strings with info on this crewman
def GetInfo(self):
info_list = []
info_list.append(self.name)
info_list.append(self.position)
# if we're in a battle, return current order
if battle is not None:
info_list.append(self.order)
else:
info_list.append('')
info_list.append(self.hatch)
info_list.append(self.spot)
info_list.append(self.nickname)
return info_list
# set spotting ability of this crewman based on location, hatch status, and order
def SetSpotAbility(self):
if self.NoActions():
self.spot = 'None'
return
# check order spot effects
for order in CREW_ORDERS:
if order.name == self.order:
if not order.spot:
self.spot = 'None'
return
break
if self.position == 'Commander':
if 'vision_cupola' in tank.stats:
self.spot = 'All'
elif self.hatch == 'Open':
self.spot = 'All'
else:
if 'Commander Periscope Broken' in tank.damage_list:
self.spot = 'None'
else:
self.spot = 'Any One Sector'
elif self.position == 'Gunner':
if 'Gunner Periscope Broken' in tank.damage_list:
self.spot = 'None'
else:
self.spot = 'Turret Front'
elif self.position == 'Loader':
if tank.fired_main_gun:
self.spot = 'None'
else:
if tank.stats['loader_hatch'] != 'None':
if self.hatch == 'Open':
self.spot = 'All'
else:
self.spot = 'Any One Sector'
else:
self.spot = 'Any One Sector'
if self.spot == 'Any One Sector' and 'Loader Periscope Broken' in tank.damage_list:
self.spot = 'None'
elif self.position in ['Driver', 'Asst. Driver']:
if self.hatch == 'Open':
self.spot = 'All Except Rear'
else:
if self.position == 'Driver' and 'Driver Periscope Broken' in tank.damage_list:
self.spot = 'None'
elif self.position == 'Asst. Driver' and 'Asst. Driver Periscope Broken' in tank.damage_list:
self.spot = 'None'
else:
self.spot = 'Tank Front'
else:
self.spot = 'None' # should not be used
# check that our current order is allowed
def CurrentOrderOK(self):
for order in self.orders_list:
if order.name == self.order: return True
return False
# sets default order for loader and driver, or set to no order if out of action
# if reset_all, resets order for all crew
def ResetOrder(self, reset_all=False):
if not self.alive or self.unconscious or self.stunned:
self.order = 'None'
elif reset_all or self.position in ['Loader', 'Driver']:
self.order = self.default_order
# check that our default order is possible, otherwise set to none
if not self.CurrentOrderOK():
self.order = 'None'
# test to see if this crewman is stunned as a result of a hit
# different events will supply different base scores to beat in order to save
def StunCheck(self, base_score):
# can't be stunned if already hurt
if not self.alive or self.stunned or self.unconscious:
return False
d1, d2, roll = Roll2D6()
# 6,6 always fails
if roll != 12:
if self.SkillCheck('True Grit'):
roll -= 1
if roll <= base_score:
return False
self.stunned = True
return True
# works out the effect of a wound and returns a string description
def TakeWound(self, hit_location, sector, collateral=False, minefield=False):
# can't suffer further wounds if dead
if not self.alive:
return None
# stunned and unconscious crew are not subject to collateral damage
# idea is that they are not exposed enough to suffer injury
if (self.stunned or self.unconscious) and collateral:
return None
d1, d2, roll = Roll2D6()
# unmodified 6,6 is crewman killed
if roll == 12:
if self.SkillCheck('Pocket Bible'):
WriteJournal(self.name + ' was saved by Pocket Bible.')
return 'Saved by Pocket Bible'
if collateral:
if self.SkillCheck('Lightning Reflexes'):
WriteJournal(self.name + ' was saved by Lightning Reflexes.')
return 'Saved by Lightning Reflexes'
text = 'Killed!'
self.ResolveKIA()
UpdateTankCon()
return text
# work out any hit location crew wound modifiers
if hit_location is not None and sector is not None:
d, a, g, l, c = 0, 0, 0, 0, 0
# turret hit
if hit_location == 'turret':
# all directions
d -= 2
a -= 2
# right side
if sector in [5, 0]:
g += 1
l -= 1
# left side
elif sector in [2, 3]:
g -= 1
l += 1
# rear
elif sector == 1:
c += 1
# hull hit
else:
# front right
if sector == 5:
d -= 1
l -= 1
a += 1
g += 1
# back center, right, or left
elif sector in [0, 1, 2]:
d -= 3
a -= 3
g -= 2
l -= 2
c -= 2
# front left
elif sector == 3:
d += 1
l += 1
a -= 1
g -= 1
# apply modifiers based on position in tank
if self.position == 'Commander':
roll += c
elif self.position == 'Gunner':
roll += g
elif self.position == 'Loader':
roll += l
elif self.position == 'Driver':
roll += d
elif self.position == 'Asst. Driver':
roll += a
# already wounded
if self.v_serious_wound:
roll += 3
elif self.serious_wound:
roll += 2
elif self.light_wound:
roll += 1
# minefield damage less likely to be serious
if minefield:
roll -= 2
# collateral damage less likely to be serious
if collateral:
roll -= 3
# if crewman is outside tank, normal damage is much less severe, but
# collateral damage much more dangerous
if self.order == 'Fire AA MG':
if collateral:
roll += 4
elif hit_location is not None and sector is not None:
roll -= 2
if self.SkillCheck('True Grit'):
roll -= 1
##### Check modified roll for result #####
# No Effect
if roll <= 6:
return None
# if collateral, crewman might have chance to ignore
if collateral:
if self.SkillCheck('Lightning Reflexes'):
return 'Saved by Lightning Reflexes'
# Light Wound, chance of being stunned
if roll == 7:
text = 'Light Wound'
self.light_wound = True
if self.StunCheck(10):
text += ', Stunned'
# Light Wound, greater chance of being stunned
elif roll == 8:
text = 'Light Wound'
self.light_wound = True
if self.StunCheck(7):
text += ', Stunned'
# Serious Wound, chance of being stunned
elif roll == 9:
text = 'Serious Wound'
self.serious_wound = True
if self.StunCheck(5):
text += ', Stunned'
# Serious Wound, automatically stunned
elif roll == 10:
text = 'Serious Wound, Stunned'
self.serious_wound = True
self.stunned = True
# Very Serious Wound, Unconscious
elif roll == 11:
text = 'Very Serious Wound, Unconscious'
self.v_serious_wound = True
self.unconscious = True
# overrides any lesser effects
self.stunned = False
# Dead
else:
if self.SkillCheck('Pocket Bible'):
WriteJournal(self.name + ' was saved by Pocket Bible.')
return 'Saved by Pocket Bible'
text = 'Killed!'
self.ResolveKIA()
# update spot ability
if self.NoActions():
self.BuildOrdersList()
self.SetSpotAbility()
self.ResetOrder()
UpdateTankCon()
WriteJournal(self.name + ' was wounded, result: ' + text)
return text
# attempt to bail out of tank, return string description of outcome
def BailOut(self):
if self.unconscious:
return 'Cannot bail out'
# easy to bail out when you're outside the tank already
if self.order == 'Fire AA MG':
self.bailed_out = True
return 'Passed'
d1, d2, roll = Roll2D6()
bail_mod = 0
if self.hatch == 'None':
bail_mod += 1
if self.stunned:
bail_mod += 1
if self.SkillCheck('Gymnast'):
bail_mod -= 2
roll += bail_mod
if roll <= 10:
self.bailed_out = True
return 'Passed'
else:
return 'Failed'
# Battle Class
# holds information relating to an encounter on the battle board
# can make this a counterattack battle and/or change the resistance level of the
# battle if variables are passed on init
class Battle:
def __init__(self, counterattack=False, res_level=None):
self.maphexes = [] # list of hexes on the battle map
self.smoke_factors = [] # list of active smoke factor hexes
# (hx, hy, smoke level)
self.messages = [] # list of game messages
self.enemy_units = [] # list of active enemy units
self.vp_total = 0 # current total player VP for encounter
self.mouseover = (-1, -1) # keeps track of mouse position
# if a unit of a given class has already been spawned, record it here
self.tank_type = None
self.spg_type = None
self.at_gun_type = None
# special flag: play this encounter as if day were a counterattack mission
self.counterattack = counterattack
# friendly ambush flag, for counterattack mission
self.friendly_ambush = False
# current phase in encounter turn
self.phase = 'None'
self.selected_crew = None # pointer to currently selected crewmember
self.orders = [] # list of possible orders for a selected crew member
# marks index number of selected order, used in Issue Order input mode
self.selected_order = None
self.area_fire = False # area fire mode flag
self.target = None # pointer to current target enemy unit
# used to trigger an end to current player input and move to next
# phase or sub-phase
self.trigger_phase = False
self.battle_leadership = False # battle leadership skill is in play for this
# encounter turn
##### Battle Record Stats #####
# enemy units destroyed by player tank, destroyed by friendly units
# units forced off board by player movement
# LW and MG, Truck, APCs and ACs, SPG, PzKw IV H, PzKw V G, PzKw V GI, AT Gun
self.tank_ko_record = [0, 0, 0, 0, 0, 0, 0, 0]
self.friendly_ko_record = [0, 0, 0, 0, 0, 0, 0, 0]
self.left_behind = [0, 0, 0, 0, 0, 0, 0, 0]
self.tanks_lost = 0 # friendly tanks lost
self.inf_lost = 0 # friendly infantry squads lost
self.enemy_reinforcements = 0 # total number of enemy reinforcements
# that have arrived in this battle
self.rounds_passed = 1 # total number of game rounds that have
# passed in this battle encounter
# current encounter result
self.result = 'Undetermined'
##### Generate Map Hexes #####
for (hx, hy, rng, sector) in HEXES:
self.maphexes.append(MapHex(hx, hy, rng, sector))
# Enemy Units
class EnemyUnit:
def __init__(self):
self.alive = True # set to false if destroyed
self.map_hex = None # hex location
self.x = 0 # x position in map console
self.y = 0 # y "
self.animating = False # we are currently animating this unit's movement, so
# don't draw terrain around it
self.facing = '' # unit facing: front, side, or rear
# not used for infantry units
self.moving = False # movement status
self.terrain = '' # terrain
self.pf = False # unit is armed with a panzerfaust
self.spotted = False # unit has been spotted by player tank
self.identified = False # unit has been identified; only required for
# Tanks, SPGs, and AT Guns
self.hidden = False # unit is hidden
self.spotted_lr = False # unit was spotted last round (increased
# chance of identifying)
self.spotted_tr = False # temp flag for setting spotted_lr for next round
self.immobile = False # unit has been immobilized by a track hit
self.shot_at = False # unit was fired at by player earlier this turn
self.fired = False # unit fired last turn; cleared after spotting phase
self.acquired = 0 # level of acquired target for player tank
self.acquired_player = 0 # level that this unit has acquired the
# player tank
self.full_apc = False # unit is an APC carrying infantry
self.unit_class = '' # unit class
self.unit_type = '' # unit type
self.morale = self.SetMorale() # unit morale level
self.pinned = False # morale status flags
self.stunned = False
self.hit_record = [] # list of unresolved hits against this unit
# check to see if we do a panzerfaust attack, returning True if attack happened
def PFAttack(self):
if not self.pf or self.map_hex.rng != 0 or self.terrain in ['Building', 'Fortification']:
return False
if self.hidden or self.pinned:
return False
# see if attack occurs
roll = Roll1D6()
year = campaign.current_date[0]
month = campaign.current_date[1]
if year >= 1945 or (year == 1944 and month == 12):
roll -= 1
if tank.moving: roll -= 1
if tank.lead_tank: roll -= 1
if self.map_hex.sector in [0,1,2]: roll -= 1
if campaign.scen_type == 'Advance':
target_roll = 2
elif campaign.scen_type == 'Battle':
target_roll = 3
else:
target_roll = 1
if roll > target_roll: return False
# firing a pf means that unit is revealed
self.spotted = True
# play sound effect
PlaySound('panzerfaust_firing')
text = 'An infantry squad fires a Panzerfaust at you!'
WriteJournal(text)
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, text)
# create roll action to hold details about the action
roll_action = RollAction()
# input details
roll_action.attacker_unit_type = 'Light Weapons Infantry'
roll_action.attacker = 'Infantry Squad'
roll_action.attack_type = 'Panzerfaust'
roll_action.target_unit_type = tank.unit_type
roll_action.target = tank.stats['vehicle_type'] + ' "' + tank.name + '"'
roll_action.rng = 'Close'
roll_action.score_req = 6
# calculate DRM
drm = []
if tank.moving:
drm.append(('Target moving', 2))
smoke_factors = GetSmokeFactors(0, 0, self.map_hex.hx, self.map_hex.hy)
if smoke_factors > 0:
drm.append(('Smoke Factors', smoke_factors*2))
roll_action.drm = drm
roll_action.CalculateTotalDRM()
roll_action.roll_req = roll_action.score_req - roll_action.total_drm
##### To-hit Roll #####
d1, d2, roll = Roll2D6()
roll_action.d1 = d1
roll_action.d2 = d2
roll_action.roll = roll
hit = False
# critical miss
if roll == 12:
roll_action.result = 'Panzerfaust explodes, squad scattered!'
self.RecordKO()
self.alive = False
elif roll > roll_action.roll_req:
roll_action.result = 'Attack missed!'
else:
# determine hit location
hit_location = GetHitLocation(tank.hull_down)
if hit_location == 'Miss':
roll_action.result = 'The shot misses because your tank is hull down.'
elif hit_location == 'Track':
roll_action.result = 'Your tank is hit in the track and is immobilized.'
tank.moving = False
tank.immobilized = True
else:
# hit in turret or hull
hit = True
roll_action.result = 'The panzerfaust hits your tank in the ' + hit_location + '!'
WriteJournal(roll_action.result)
# display to-hit result to player
DisplayRoll(roll_action)
UpdateMapOverlay()
RenderEncounter()
if hit:
##### Resolve Hit on Player #####
if hit_location == 'Turret':
if tank.turret_facing == self.map_hex.sector:
facing = 'Front'
elif GetSectorDistance(self.map_hex.sector, tank.turret_facing) == 3:
facing = 'Rear'
else:
facing = 'Side'
else:
if self.map_hex.sector == 4:
facing = 'Front'
elif self.map_hex.sector == 1:
facing = 'Rear'
else:
facing = 'Side'
# get To Kill number and update roll action
(base_tk, roll_req, drm) = CalcTK(self, tank, facing, 'PF', False, False, hit_location)
# if no chance to knock out, display that instead
if roll_req < 2:
ShowLabel(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y, 'No chance to destroy.')
del roll_action
return
roll_action.hit_location = hit_location
roll_action.score_req = base_tk
roll_action.drm = drm
roll_action.CalculateTotalDRM()
roll_action.roll_req = roll_req
##### To kill Roll #####
d1, d2, roll = Roll2D6()
roll_action.d1 = d1
roll_action.d2 = d2
roll_action.roll = roll
if roll < roll_req:
roll_action.result = "Your tank's armour is penetrated by the hit!"
else:
roll_action.result = 'Your tank is unharmed.'
# display to-kill result to player
WriteJournal(roll_action.result)
DisplayRoll(roll_action, tk_roll=True)
RenderEncounter()
# play armour saved sound if appropriate
if roll >= roll_req:
PlaySound('armour_save')
else:
# determine whether it was a critical penetration
crit = False
if roll == 2 or roll < int(roll_req / 2):
crit = True
tank.Penetrate(hit_location, self.map_hex.sector, 'Panzerfaust', critical=crit)
UpdateTankCon()
RenderEncounter()
del roll_action
return True
# do a pin test or automatically pin
def PinTest(self, auto=False, modifier=0):
if self.MoraleTest(modifier=modifier) and not auto:
return
# already pinned: double pin, so must pass a morale check or broken
if self.pinned:
if not self.MoraleTest(break_test=True):
self.RecordKO()
self.alive = False
self.pinned = True
self.moving = False
return
# do a morale test
def MoraleTest(self, modifier=0, break_test=False):
d1, d2, roll = Roll2D6()
# apply terrain modifiers if break test
if break_test:
if self.unit_class == 'AT_GUN' and self.terrain != 'Fortification':
roll -= 2
elif self.terrain == 'Woods':
roll -= 1
elif self.terrain == 'Building':
roll -= 2
elif self.terrain == 'Fortification':
roll -= 3
# natural 12 is always a fail
if roll != 12 and roll <= self.morale + modifier:
return True
return False
# set morale level, only done at spawn
def SetMorale(self):
d1, d2, result = Roll2D6()
if result <= 3:
return 10
elif result <= 5:
return 9
elif result <= 8:
return 8
return 7
# reset unit for a new turn
def Reset(self):
self.shot_at = False
# draw this unit on the map overlay
def DrawMe(self):
# skip if inactive
if not self.alive: return
# set colours based on spotting status
# hidden
if self.hidden:
libtcod.console_set_default_background(overlay_con, libtcod.dark_grey)
libtcod.console_set_default_foreground(overlay_con, libtcod.black)
# unknown
elif not self.spotted:
libtcod.console_set_default_background(overlay_con, ENEMY_COLOR)
libtcod.console_set_default_foreground(overlay_con, libtcod.darker_grey)
# spotted but unidentifed
elif self.unit_class in ['TANK', 'SPG', 'APC', 'AC'] and not self.identified:
libtcod.console_set_default_background(overlay_con, ENEMY_COLOR)
libtcod.console_set_default_foreground(overlay_con, libtcod.lighter_grey)
# spotted
else:
libtcod.console_set_default_background(overlay_con, ENEMY_COLOR)
libtcod.console_set_default_foreground(overlay_con, libtcod.white)
# if selected as target, highlight
if battle.target == self and battle.phase in ['Fire Main Gun', 'Fire MGs']:
libtcod.console_set_default_background(overlay_con, SELECTED_COLOR)
# select character to use
if self.unit_class == 'TANK':
char = libtcod.CHAR_RADIO_UNSET
elif self.unit_class == 'SPG':
char = '#'
elif self.unit_class == 'APC':
char = libtcod.CHAR_BULLET_INV
elif self.unit_class == 'AC':
char = libtcod.CHAR_RADIO_SET
elif self.unit_class == 'AT_GUN':
char = 'X'
elif self.unit_class == 'MG':
char = 'x'
elif self.unit_class == 'LW':
char = libtcod.CHAR_BLOCK1
else:
char = libtcod.CHAR_BULLET_SQUARE # TRUCK
# print the character
libtcod.console_put_char(overlay_con, self.x, self.y, char, flag=libtcod.BKGND_SET)
if self.hidden or not self.spotted: return
# skip drawing terrain if animating
if self.animating: return
# add terrain indicator if any
if self.terrain not in ['Hull Down', 'Woods', 'Building']: return
if self.terrain == 'Hull Down':
char = libtcod.CHAR_ARROW2_N
elif self.terrain == 'Woods':
char = libtcod.CHAR_SPADE
elif self.terrain == 'Building':
char = libtcod.CHAR_DVLINE
bc = libtcod.console_get_char_background(map_con, self.x, self.y)
fc = bc * libtcod.light_grey
for x in [-1, 1]:
libtcod.console_put_char_ex(overlay_con, self.x+x, self.y, char, fc, bc)
# rotate this unit's hex position around the player, used when player tank pivots
def RotatePosition(self, clockwise):
# convert present coordinate from axial to cube
x = self.map_hex.hx
z = self.map_hex.hy
y = -x-z
# do the rotation
if clockwise:
new_x = -y
new_z = -x
else:
new_x = -z
new_z = -y
# find the new hex location
for map_hex in battle.maphexes:
if map_hex.hx == new_x and map_hex.hy == new_z:
self.map_hex = map_hex
(self.x, self.y) = self.GetCharLocation()
return
print ('ERROR: could not find hex ' + str(new_x) + ',' + str(new_z))
# record this unit's destruction in the battle record
def RecordKO(self, friendly=False, left_behind=False, advance_fire=False):
if not left_behind and friendly:
text = self.GetDesc() + ' was destroyed by friendly action'
WriteJournal(text)
# determine index number for this unit in list of units destroyed
index = -1
if self.unit_class in ['LW', 'MG']:
index = 0
elif self.unit_class == 'TRUCK':
index = 1
elif self.unit_class in ['APC', 'AC']:
index = 2
elif self.unit_class == 'SPG':
index = 3
elif self.unit_type == 'PzKw IV H':
index = 4
elif self.unit_type == 'PzKw V G':
index = 5
elif self.unit_type in ['PzKw VI E', 'PzKw VI B']:
index = 6
elif self.unit_class == 'AT_GUN':
index = 7
if index < 0:
print ('RecordKO() error: could not find unit type')
return
if friendly:
battle.friendly_ko_record[index] += 1
elif left_behind:
battle.left_behind[index] += 1
else:
battle.tank_ko_record[index] += 1
# award exp if destroyed by player tank or by advancing fire
if (not friendly and not left_behind) or advance_fire:
for crew in tank.crew:
crew.AwardExp(1)
# add to campaign stats
if not left_behind:
if index == 0:
if not friendly:
campaign.AddStat('Infantry Destroyed by Player', 1)
else:
campaign.AddStat('Infantry Destroyed by Allies', 1)
elif index == 7:
if not friendly:
campaign.AddStat('AT Guns Destroyed by Player', 1)
else:
campaign.AddStat('AT Guns Destroyed by Allies', 1)
elif index > 2 and index < 7:
if not friendly:
campaign.AddStat('Tanks & SPGs Destroyed by Player', 1)
else:
campaign.AddStat('Tanks & SPGs Destroyed by Allies', 1)
else:
if not friendly:
campaign.AddStat('Other Vehicles Destroyed by Player', 1)
else:
campaign.AddStat('Other Vehicles Destroyed by Allies', 1)
# set or redetermine the facing for this unit
# if facing has changed, return True
def SetFacing(self):
result = Roll1D10()
if self.unit_class in ['SPG', 'AT_GUN']:
if result <= 6:
new_facing = 'Front'
elif result <= 9:
new_facing = 'Side'
else:
new_facing = 'Rear'
elif self.unit_class == 'TANK':
if result <= 5:
new_facing = 'Front'
elif result <= 9:
new_facing = 'Side'
else:
new_facing = 'Rear'
elif self.unit_class in ['TRUCK', 'APC', 'AC']:
if result <= 3:
new_facing = 'Front'
elif result <= 7:
new_facing = 'Side'
else:
new_facing = 'Rear'
# otherwise, don't need to set facing
else:
return False
# set facing if different and report back that it changed
if new_facing != self.facing:
self.facing = new_facing
return True
return False
# set or redetermine the terrain for this unit
def SetTerrain(self):
# list of terrain types
TERRAIN_TYPE = [
'Hull Down', 'Woods', 'Building', 'Open'
]
# infantry unit chart
INFANTRY_TERRAIN = [
[0, 2, 8, 10], # Area A: farm buildings and fields
[0, 3, 5, 10], # Area B: fields
[0, 1, 6, 10], # Area C: village
[0, 6, 7, 10], # Area D: Woods
[0, 4, 5, 10], # Area F: Bocage
]
# vehicle unit chart
VEHICLE_TERRAIN = [
[4, 6, 0, 10], # Area A
[2, 3, 0, 10], # Area B
[5, 6, 0, 10], # Area C
[2, 7, 0, 10], # Area D
[7, 8, 0, 10] # Area F
]
# determine table row to use
AREA_TYPES = ['A', 'B', 'C', 'D', 'F']
table_row = AREA_TYPES.index(campaign.day_map.player_node.node_type)
# do roll
result = Roll1D10()
# modifier for counterattack missions
if campaign.scen_type == 'Counterattack' or battle.counterattack:
result += 2
if result > 10:
result = 10
# infantry units
if self.unit_class in ['LW', 'MG', 'AT_GUN']:
n = 0
for value in INFANTRY_TERRAIN[table_row]:
if result <= value:
terrain = TERRAIN_TYPE[n]
break
n += 1
# fortification case
if campaign.scen_type == 'Battle' and campaign.day_map.player_node.node_type in ['B', 'D']:
if n == 2:
terrain = 'Fortification'
# special for LW
elif self.unit_class == 'LW' and result == 10:
self.moving = True
# vehicle units
else:
n = 0
for value in VEHICLE_TERRAIN[table_row]:
if result <= value:
terrain = TERRAIN_TYPE[n]
break
n += 1
if 9 <= result <= 10:
self.moving = True # moving if not already
# set unit terrain type
self.terrain = terrain
# returns a short string of text to describe this unit
# if just spawned, return a simpler description
def GetDesc(self, new_spawn=False):
# return a descriptive string for this unit's class, or type if does
# not need to be identified
def GetClassDesc():
# LW and MG are simple
if self.unit_class in ['LW', 'MG']:
return self.unit_type
# AT Guns are a little more complicated
elif self.unit_class == 'AT_GUN':
if not self.identified:
return 'Anti-Tank Gun'
if self.unit_type == '50L':
text = 'PaK 38'
elif self.unit_type == '75L':
text = 'PaK 40'
elif self.unit_type == '88LL':
text = 'PaK 43'
return text + ' Anti-Tank Gun'
# Tanks
elif self.unit_class == 'TANK':
if not self.identified:
return 'Tank'
return self.unit_type + ' Tank'
# SPGs
elif self.unit_class == 'SPG':
if not self.identified:
return 'Self-propelled Gun'
return self.unit_type + ' Self-propelled Gun'
# Trucks
elif self.unit_class == 'TRUCK':
return self.unit_type + ' Truck'
# APCs
elif self.unit_class == 'APC':
return self.unit_type + ' Armoured Personel Carrier'
# ACs
elif self.unit_class == 'AC':
return self.unit_type + ' Armoured Car'
# should never get as far as this point, but just in case
return ''
# just appeared, return a simple description
if new_spawn:
return GetClassDesc()
# unit is hidden
if self.hidden:
return 'Hidden ' + GetClassDesc()
# unit is spotted but needs to be identified
if self.spotted and self.unit_class in ['TANK', 'SPG', 'AT_GUN'] and not self.identified:
return 'Unidentified ' + GetClassDesc()
# unit is not spotted but had been previously identified
if not self.spotted:
return 'Unspotted ' + GetClassDesc()
# unit is spotted, and is either identified or doesn't need to be
return GetClassDesc()
# the player tank has moved forward or backward, so shift this enemy unit accordingly
def YMove(self, y_change):
# two special cases, if unit would end up in player hex
if self.map_hex.hx == 0 and self.map_hex.hy + y_change == 0:
if y_change == -1:
y_change = -2
else:
y_change = 2
new_hy = self.map_hex.hy + y_change
for map_hex in battle.maphexes:
if map_hex.hx == self.map_hex.hx and map_hex.hy == new_hy:
# move is ok, proceed
self.map_hex = map_hex
self.moving = True
# re-determine draw location
(self.x, self.y) = self.GetCharLocation()
# clear any hidden flag
if self.hidden:
self.hidden = False
# redraw the screen to reflect new position
UpdateMapOverlay()
RenderEncounter()
return
# unit was moved off board
Message(self.GetDesc() + ' is no longer in the area')
self.alive = False
self.RecordKO(left_behind=True)
UpdateMapOverlay()
RenderEncounter()
return
# unit moves closer to or further away from the player tank
def DistMove(self, dist):
# if immobile, can't move
if self.immobile: return False
# if ground conditions are deep snow or mud, chance that action will be re-rolled
if campaign.weather.ground in ['Mud', 'Deep Snow']:
if Roll1D6() <= 3: return False
# if new range is 3, unit will move off map
if self.map_hex.rng + dist == 3:
# chance that this action will be re-rolled
if Roll1D6() <= 3: return False
self.alive = False
Message(self.GetDesc() + ' has left the area.')
UpdateMapOverlay()
RenderEncounter()
return True
# otherwise, find a hex to move into
move_hexes = []
# skip over player hex if moving close and in range band 0
if self.map_hex.rng == 0 and dist == -1:
for map_hex in battle.maphexes:
if self.map_hex.rng == map_hex.rng and not IsAdjacent(self.map_hex, map_hex) and self.map_hex != map_hex:
move_hexes.append(map_hex)
else:
for map_hex in battle.maphexes:
if self.map_hex.rng + dist == map_hex.rng and IsAdjacent(self.map_hex, map_hex):
move_hexes.append(map_hex)
# couldn't find a good hex to move to
if len(move_hexes) == 0:
return False
# do the move
old_x, old_y = self.x, self.y
self.map_hex = random.choice(move_hexes)
# show the animation
self.MoveAnimation(old_x, old_y)
# apply effects of moving
self.MoveEffects()
return True
# unit moves laterally around the player, clockwise or counter clockwise
def LateralMove(self):
# if immobile, can't move
if self.immobile: return False
# if ground conditions are deep snow or mud, chance that action will be re-rolled
if campaign.weather.ground in ['Mud', 'Deep Snow']:
if Roll1D6() <= 3: return False
move_hexes = []
for map_hex in battle.maphexes:
if self.map_hex.rng == map_hex.rng and IsAdjacent(self.map_hex, map_hex):
move_hexes.append(map_hex)
# couldn't find a good hex to move to
if len(move_hexes) == 0:
return False
# do the move
old_x, old_y = self.x, self.y
self.map_hex = random.choice(move_hexes)
# show the animation
self.MoveAnimation(old_x, old_y)
# apply effects of moving
self.MoveEffects()
return True
# show an animation of the unit moving, also triggers calculation of new character location
def MoveAnimation(self, old_x, old_y):
# get the unit's new x and y position
new_x, new_y = self.GetCharLocation()
# play movement sound
if self.unit_class not in ['AT_GUN', 'MG', 'LW']:
PlaySound('engine_noise')
elif self.unit_class in ['LW', 'MG']:
PlaySound('infantry_moving')
# skip if animations are off
if not campaign.animations:
self.x = new_x
self.y = new_y
UpdateMapOverlay()
RenderEncounter()
return
# set the animation flag
self.animating = True
# do the animation
line = GetLine(old_x, old_y, new_x, new_y)
for (x,y) in line:
self.x = x
self.y = y
UpdateMapOverlay()
RenderEncounter()
Wait(50)
self.animating = False
UpdateMapOverlay()
RenderEncounter()
# apply effects of unit movement
def MoveEffects(self):
self.moving = True
self.spotted = False
self.hidden = False
self.acquired = 0
self.acquired_player = 0
if self.unit_class == 'AC':
self.spotting_player = False
self.SetFacing()
self.SetTerrain()
# do an attack against friendly infantry
def AttackInfantry(self):
x, y = self.x+MAP_CON_X, self.y+MAP_CON_Y
# if unit is Hidden, can't attack
if self.hidden: return False
# if unit is at medium or long range and weather is foggy or falling snow,
# can't attack
if self.map_hex.rng > 0 and (campaign.weather.fog or campaign.weather.precip == 'Snow'):
return False
# vehicle if unit is not facing player, turn instead
if self.unit_class not in ['LW', 'MG'] and self.facing != 'Front':
if self.immobile: return False
ShowLabel(x, y, self.GetDesc() + ' turns to face you.')
self.facing = 'Front'
return True
ShowLabel(x, y, self.GetDesc() + ' fires at friendly infantry.')
self.moving = False
if self.unit_class == 'LW':
PlaySound('german_rifle_fire')
elif self.unit_class in ['TANK', 'SPG', 'MG', 'APC', 'AC']:
PlaySound('german_mg_fire')
# display firing animation
MGAnimation(self.x+MAP_CON_X, self.y+MAP_CON_Y,
MAP_X0 + MAP_CON_X, MAP_Y0 + MAP_CON_Y)
# reset aquired player as target
self.acquired_player = 0
# set flag for spotting
self.fired = True
# do roll
result = Roll1D100()
# automatic kill
if result <= 3:
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' destroys a friendly infantry squad.')
battle.inf_lost += 1
return
# otherwise, determine tk number
if self.unit_class in ['TANK', 'SPG']:
if self.map_hex.rng == 0:
tk_num = 65 # close range
elif self.map_hex.rng == 1:
tk_num = 40 # medium range
else:
tk_num = 10 # long range
elif self.unit_class in ['AC', 'LW']:
if self.map_hex.rng == 0:
tk_num = 30
elif self.map_hex.rng == 1:
tk_num = 20
else:
tk_num = 3
else:
# MG / APC
if self.map_hex.rng == 0:
tk_num = 55
elif self.map_hex.rng == 1:
tk_num = 30
else:
tk_num = 3
# apply smoke modifier
smoke_factors = GetSmokeFactors(0, 0, self.map_hex.hx, self.map_hex.hy)
if smoke_factors > 0:
tk_num = int(ceil(float(tk_num) * float(0.5**smoke_factors)))
# check roll against tk number
if result <= tk_num:
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' destroys a friendly infantry squad.')
battle.inf_lost += 1
else:
ShowLabel(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y, 'No effect.')
return True
# do an attack against a friendly tank
def AttackTank(self):
x, y = self.x+MAP_CON_X, self.y+MAP_CON_Y
# if unit is Hidden, can't attack
if self.hidden: return False
# if unit is at medium or long range and weather is foggy or falling snow,
# can't attack
if self.map_hex.rng > 0 and (campaign.weather.fog or campaign.weather.precip == 'Snow'):
return False
# only tanks, SPGs, and AT Guns will attack friendly tanks
if self.unit_class not in ['TANK', 'SPG', 'AT_GUN']:
return False
# vehicle if unit is not facing player, turn instead
if self.facing != 'Front':
if self.immobile: return False
ShowLabel(x, y, self.GetDesc() + ' turns to face you.')
self.facing = 'Front'
return True
ShowLabel(x, y, self.GetDesc() + ' fires at a friendly tank.')
self.moving = False
# get the main gun type and play firing sound
gun_type = self.stats['main_gun']
soundfile = GetFiringSound(gun_type)
if soundfile is not None:
PlaySound(soundfile)
# do firing animation
MainGunAnimation(self.x + MAP_CON_X, self.y + MAP_CON_Y,
MAP_X0 + MAP_CON_X, MAP_Y0 + MAP_CON_Y)
# reset aquired player as target
self.acquired_player = 0
# set flag for spotting
self.fired = True
# determine tk number
if gun_type == '50L':
if self.map_hex.rng == 0:
tk_num = 15
elif self.map_hex.rng == 1:
tk_num = 5
else:
tk_num = 1
elif gun_type == '75L':
if self.map_hex.rng == 0:
tk_num = 52
elif self.map_hex.rng == 1:
tk_num = 40
else:
tk_num = 22
elif gun_type == '75LL':
if self.map_hex.rng == 0:
tk_num = 68
elif self.map_hex.rng == 1:
tk_num = 66
else:
tk_num = 61
elif gun_type == '88L':
if self.map_hex.rng == 0:
tk_num = 68
elif self.map_hex.rng == 1:
tk_num = 63
else:
tk_num = 43
elif gun_type == '88LL':
if self.map_hex.rng == 0:
tk_num = 68
elif self.map_hex.rng == 1:
tk_num = 66
else:
tk_num = 61
else:
Message('ERROR: Unrecognized gun type in AttackTank()')
return
# apply AT gun rotation modifier
if self.unit_class == 'AT_GUN':
if self.facing == 'Side':
if self.unit_type == '88LL':
tk_num -= 10
else:
tk_num -= 20
elif self.facing == 'Rear':
if self.unit_type == '88LL':
tk_num -= 20
else:
tk_num -= 30
self.facing = 'Front'
# apply smoke modifier
smoke_factors = GetSmokeFactors(0, 0, self.map_hex.hx, self.map_hex.hy)
if smoke_factors > 0:
tk_num = int(ceil(float(tk_num) * float(0.5**smoke_factors)))
# do roll and check against tk number
result = Roll1D100()
if result <= tk_num:
PlaySound('tank_knocked_out')
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' destroys a friendly tank.')
battle.tanks_lost += 1
else:
ShowLabel(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y, 'No effect.')
# NEW: if unit is unidentified, a crewmember indicates the calibre of gun heard
if self.hidden or not self.identified:
CrewTalk('That sounded like a ' + gun_type[:2] + 'mm gun!')
return True
# attack the player tank
def AttackPlayer(self):
x, y = self.x+MAP_CON_X, self.y+MAP_CON_Y
# if unit is Hidden, can't attack
if self.hidden: return False
# if unit is at medium or long range and weather is foggy or falling snow,
# can't attack
if self.map_hex.rng > 0 and (campaign.weather.fog or campaign.weather.precip == 'Snow'):
return False
# vehicle if unit is not facing player, turn instead
if self.unit_class not in ['LW', 'MG'] and self.facing != 'Front':
if self.immobile: return False
ShowLabel(x, y, self.GetDesc() + ' turns to face you.')
self.facing = 'Front'
return True
# if LW armed with PF, may do a PF attack
if self.unit_class == 'LW':
if self.PFAttack(): return
ShowLabel(x, y, self.GetDesc() + ' fires at you!')
self.moving = False
# flag for type of crew reponse at end
shot_missed = False
armour_saved = False
if self.unit_class in ['TANK', 'SPG', 'AT_GUN']:
# play firing sound
soundfile = GetFiringSound(self.stats['main_gun'])
if soundfile is not None:
PlaySound(soundfile)
# do main gun firing animation
MainGunAnimation(self.x + MAP_CON_X, self.y + MAP_CON_Y,
MAP_X0 + MAP_CON_X, MAP_Y0 + MAP_CON_Y)
elif self.unit_class == 'LW':
PlaySound('german_rifle_fire')
# display firing animation
MGAnimation(self.x+MAP_CON_X, self.y+MAP_CON_Y,
MAP_X0 + MAP_CON_X, MAP_Y0 + MAP_CON_Y)
elif self.unit_class in ['MG', 'APC', 'AC']:
PlaySound('german_mg_fire')
# display firing animation
MGAnimation(self.x+MAP_CON_X, self.y+MAP_CON_Y,
MAP_X0 + MAP_CON_X, MAP_Y0 + MAP_CON_Y)
# set flag for spotting
self.fired = True
# if unit is APC, AC, MG, or LW, apply light weapons damage instead
if self.unit_class not in ['TANK', 'SPG', 'AT_GUN']:
tank.LWAttack()
return True
# calculate modifiers and final DR required
(base_th, roll_req, drm) = CalcTH(self, tank, False, 'AP')
# if attacker is AT Gun, turn to face player
if self.unit_class == 'AT_GUN':
self.facing = 'Front'
# if hit not possible
if roll_req < 2:
ShowLabel(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y, 'The attack cannot hit you.')
return True
# create roll action to hold details about the action
roll_action = RollAction()
# input details
roll_action.attacker_unit_type = self.unit_type
# mark if attacker is unspotted or needs to be identified
if not self.spotted or (self.unit_class in ['TANK', 'SPG', 'AT_GUN'] and not self.identified):
roll_action.attacker_unidentified = True
roll_action.attacker = self.GetDesc()
roll_action.attack_type = self.stats['main_gun'].replace('L', '') + 'mm AP'
roll_action.target_unit_type = tank.unit_type
roll_action.target = tank.stats['vehicle_type'] + ' "' + tank.name + '"'
if self.map_hex.rng == 0:
roll_action.rng = 'Close'
elif self.map_hex.rng == 1:
roll_action.rng = 'Medium'
else:
roll_action.rng = 'Long'
roll_action.score_req = base_th
roll_action.drm = drm
roll_action.CalculateTotalDRM()
roll_action.roll_req = roll_req
##### To-hit Roll #####
d1, d2, roll = Roll2D6()
roll_action.d1 = d1
roll_action.d2 = d2
roll_action.roll = roll
if roll <= roll_req:
# determine hit location
hit_location = GetHitLocation(tank.hull_down)
if hit_location == 'Miss':
roll_action.result = 'The shot misses because your tank is hull down.'
elif hit_location == 'Track':
roll_action.result = 'Your tank is hit in the track and is immobilized.'
tank.moving = False
tank.immobilized = True
# re-build orders list for Driver to disable movement orders
GetCrewByPosition('Driver').BuildOrdersList()
else:
# hit in turret or hull
roll_action.result = 'Your tank is hit in the ' + hit_location
# display to player
DisplayRoll(roll_action)
UpdateTankCon()
RenderEncounter()
##### Resolve Hit on Player #####
# determine side of tank that is hit
# if hit in turret, facing depends on turret facing
if hit_location == 'Turret':
if tank.turret_facing == self.map_hex.sector:
facing = 'Front'
elif GetSectorDistance(self.map_hex.sector, tank.turret_facing) == 3:
facing = 'Rear'
else:
facing = 'Side'
else:
if self.map_hex.sector == 4:
facing = 'Front'
elif self.map_hex.sector == 1:
facing = 'Rear'
else:
facing = 'Side'
# get To Kill number and update roll action
(base_tk, roll_req, drm) = CalcTK(self, tank, facing, 'AP', False, False, hit_location)
# if no chance to knock out, display that instead
if roll_req < 2:
ShowLabel(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y, 'No chance to destroy.')
del roll_action
return
roll_action.hit_location = hit_location
roll_action.score_req = base_tk
roll_action.drm = drm
roll_action.CalculateTotalDRM()
roll_action.roll_req = roll_req
##### To kill Roll #####
d1, d2, roll = Roll2D6()
roll_action.d1 = d1
roll_action.d2 = d2
roll_action.roll = roll
if roll < roll_req:
roll_action.result = "Your tank's armour is penetrated by the hit!"
else:
roll_action.result = 'Your tank is unharmed.'
armour_saved = True
DisplayRoll(roll_action, tk_roll=True)
# play armour saved or ap hit sound
if roll >= roll_req:
PlaySound('armour_save')
else:
PlaySound('ap_hit')
UpdateTankCon()
RenderEncounter()
if roll < roll_req:
# determine whether it was a critical penetration
crit = False
if roll == 2 or roll < int(roll_req / 2):
crit = True
tank.Penetrate(hit_location, self.map_hex.sector, self.stats['main_gun'], critical=crit)
else:
# save vs. stun for crew
for crewman in tank.crew:
if crewman.StunCheck(10):
PopUp(crewman.name + ' is Stunned from the impact!')
del roll_action
return True
else:
roll_action.result = 'Shot misses!'
shot_missed = True
# display to player then delete roll action object
DisplayRoll(roll_action)
del roll_action
UpdateTankCon()
RenderEncounter()
if shot_missed:
PlaySound('main_gun_miss')
# NEW: if unit is unidentified, a crewmember indicates the calibre of gun heard
if self.hidden or not self.identified:
text = self.stats['main_gun']
CrewTalk('That sounded like a ' + text[:1] + 'mm gun!')
# possible crew talk
roll = Roll1D10()
if armour_saved and roll <= 4:
CrewTalk(random.choice(CREW_TALK_ARMOUR_SAVED))
elif shot_missed and roll <= 2:
CrewTalk(random.choice(CREW_TALK_SHOT_MISSED))
return True
# resolve outstanding hits against this unit
def ResolveHits(self):
# set flag for infantry/vehicle unit
if self.unit_class in ['LW', 'MG', 'AT_GUN']:
vehicle = False
else:
vehicle = True
for gun_hit in self.hit_record:
# if a previous hit has destroyed us, skip all remaining hit resolution
if not self.alive: continue
# WP hits have no effect on vehicles
if gun_hit.ammo_type == 'WP' and vehicle:
continue
text = 'Resolving '
if gun_hit.critical:
text += 'critical '
text += gun_hit.ammo_type + ' hit against ' + self.GetDesc()
Message(text)
# resolve WP hit on infantry
if gun_hit.ammo_type == 'WP':
self.PinTest()
if not self.alive:
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' surrenders!')
elif self.pinned:
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' is pinned!')
else:
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, 'WP has no additional effect.')
continue
if not vehicle:
# if ammo was AP type, no effect
if gun_hit.ammo_type in ['AP', 'HVAP', 'APDS']:
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, 'AP has no effect against infantry targets.')
continue
# get TK roll info from IFT
(base_tk, roll_req, drm) = CalcIFT(tank, self, gun_hit.gun_calibre, gun_hit.critical, gun_hit.area_fire)
# unit is vehicle
else:
# determine hit location and check for hull down / immobilized
hit_location = GetHitLocation((self.terrain == 'Hull Down'))
if hit_location == 'Miss':
PopUp(self.GetDesc() + ' is hulldown and was unharmed by hit.')
continue
elif hit_location == 'Track':
PopUp(self.GetDesc() + ' is immobilized.')
WriteJournal(self.GetDesc() + ' was immobilized by a ' + gun_hit.ammo_type + ' hit from ' + tank.name)
self.immobile = True
self.moving = False
if self.unit_class == 'APC':
if self.full_apc:
self.DismountInfantry(under_fire=True)
continue
# get TK roll info
(base_tk, roll_req, drm) = CalcTK(tank, self, self.facing, gun_hit.ammo_type, gun_hit.critical, gun_hit.area_fire, hit_location)
# create roll action to hold details about the action
roll_action = RollAction()
# input details to roll action object
roll_action.attacker_unit_type = tank.unit_type
roll_action.attacker = tank.stats['vehicle_type'] + ' "' + tank.name + '"'
roll_action.attack_type = gun_hit.gun_calibre + 'mm ' + gun_hit.ammo_type
# mark if self as target is unspotted or needs to be identified
if not self.spotted or (self.unit_class in ['TANK', 'SPG', 'AT_GUN'] and not self.identified):
roll_action.target_unidentified = True
roll_action.target_unit_type = self.unit_type
roll_action.target = self.GetDesc()
if self.map_hex.rng == 0:
roll_action.rng = 'Close'
elif self.map_hex.rng == 1:
roll_action.rng = 'Medium'
else:
roll_action.rng = 'Long'
if vehicle:
roll_action.hit_location = hit_location
roll_action.score_req = base_tk
roll_action.drm = drm
roll_action.CalculateTotalDRM()
roll_action.roll_req = roll_req
# if KO is impossible or auto
if roll_req <= 2 or roll_req > 12:
if roll_req <= 2:
roll_action.nc = True
else:
roll_action.auto_ko = True
DisplayRoll(roll_action, tk_roll=True)
del roll_action
if roll_req > 12:
WriteJournal(self.GetDesc() + ' was destroyed by a ' + gun_hit.ammo_type + ' hit from ' + tank.name)
self.RecordKO()
self.alive = False
if self.unit_class == 'APC':
if self.full_apc:
self.DismountInfantry(under_fire=True)
UpdateMapOverlay()
else:
# play armour save sound if appropriate
if gun_hit.ammo_type in ['AP', 'HVAP', 'APDS'] and vehicle:
PlaySound('armour_save')
RenderEncounter()
continue
##### To-Kill Roll #####
d1, d2, roll = Roll2D6()
roll_action.d1 = d1
roll_action.d2 = d2
roll_action.roll = roll
# Destroyed
if roll < roll_req:
roll_action.result = self.GetDesc() + ' is destroyed!'
WriteJournal(self.GetDesc() + ' was destroyed by a ' + gun_hit.ammo_type + ' hit from ' + tank.name)
self.RecordKO()
self.alive = False
if self.unit_class == 'APC':
if self.full_apc:
self.DismountInfantry(under_fire=True)
# Pinned / Stunned
# Infantry is easier to pin if shot at with MG
elif roll == roll_req or (not vehicle and roll == roll_req + 1):
# automatic pin for infantry, automatic stun for vehicles
if not vehicle:
self.PinTest(auto=True)
if not self.alive:
roll_action.result = self.GetDesc() + ' is Broken and destroyed!'
WriteJournal(self.GetDesc() + ' was broken and destroyed by a ' + gun_hit.ammo_type + ' hit from ' + tank.name)
else:
roll_action.result = self.GetDesc() + ' is Pinned.'
else:
# double stun
if self.stunned:
if not self.MoraleTest(break_test=True):
roll_action.result = self.GetDesc() + ' is Stunned again and abandoned!'
WriteJournal(self.GetDesc() + ' was Stunned by a ' + gun_hit.ammo_type + ' hit from ' + tank.name + ' and abandoned')
self.RecordKO()
self.alive = False
else:
roll_action.result = self.GetDesc() + ' remains Stunned.'
else:
roll_action.result = self.GetDesc() + ' is Stunned!'
self.stunned = True
self.moving = False
# ACs stop spotting the player tank
if self.unit_class == 'AC':
if self.spotting_player:
self.spotting_player = False
# did not penetrate; infantry must test to avoid pinning
else:
roll_action.result = self.GetDesc() + ' is unharmed'
if not vehicle:
# apply difference in scores as modifier
self.PinTest(modifier = roll - roll_req)
if self.pinned:
roll_action.result += ' but pinned'
roll_action.result += '.'
# display results to player then delete attack object
DisplayRoll(roll_action, tk_roll=True)
del roll_action
# play tank knocked out sound if appropriate
if vehicle and not self.alive:
PlaySound('tank_knocked_out')
# play armour save sound if appropriate
elif vehicle and self.alive and gun_hit.ammo_type in ['AP', 'HVAP', 'APDS']:
PlaySound('armour_save')
UpdateMapOverlay()
RenderEncounter()
# clear hit record
self.hit_record = []
# work out friendly action against this unit
def FriendlyAction(self, flanking_fire=False, artillery=False, air_strike=False, advance_fire=False):
# do base dice roll
d1, d2, roll = Roll2D6()
# natural 2 always destroys
if roll == 2:
if self.unit_class in ['TANK', 'SPG']:
PlaySound('tank_knocked_out')
text = self.GetDesc() + ' is destroyed by friendly action!'
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, text)
self.RecordKO(friendly=True, advance_fire=advance_fire)
self.alive = False
UpdateMapOverlay()
RenderEncounter()
return True
# apply modifiers
mod_roll = roll
# flanking fire attack from Random Events table
if flanking_fire:
mod_roll -= 1
# artillery attack against infantry unit in woods (air bursts)
if artillery and self.unit_class in ['LW', 'MG', 'AT_GUN']:
if self.terrain == 'Woods':
mod_roll -= 1
# attack vs. LW or MG if friendly infantry squad have been lost
if self.unit_class in ['LW', 'MG']:
if 2 < battle.inf_lost < 4:
mod_roll += 1
elif battle.inf_lost >= 4:
mod_roll += 2
# attack vs. vehicle if friendly tanks have been lost
# or air strike modifier
if self.unit_class in ['TANK', 'SPG', 'APC', 'AC', 'TRUCK']:
if not air_strike:
if 1 < battle.tanks_lost < 3:
mod_roll += 1
elif battle.tanks_lost >= 3:
mod_roll += 2
else:
mod_roll -= 1
# artillery fire or advancing fire against vehicles, except trucks
if (artillery or advance_fire) and self.unit_class in ['TANK', 'SPG', 'APC', 'AC']:
mod_roll += 2
# advancing fire is less effective, especially at longer ranges
if advance_fire:
if self.map_hex.rng == 0:
mod_roll += 1
elif self.map_hex.rng == 1:
mod_roll += 2
else:
mod_roll += 4
# smoke factors in target's zone unless air strike or artillery
if not air_strike and not artillery:
total_smoke = 0
for map_hex in battle.maphexes:
if self.map_hex.rng == map_hex.rng and self.map_hex.sector == map_hex.sector:
total_smoke += map_hex.smoke_factors
if total_smoke > 0:
mod_roll += total_smoke
# air strikes less effective than targeted fire
if air_strike:
mod_roll += 2
# turret radio broken
if 'Radio Malfunction' in tank.damage_list or 'Radio Broken' in tank.damage_list:
mod_roll += 2
# British and Commonwealth forces get a bonus to arty attacks
if artillery and campaign.player_nation in ['CAN', 'UK']:
mod_roll -= 2
# calculate TK number required
if self.unit_class == 'TRUCK':
tk_num = 7
elif self.unit_class == 'APC':
tk_num = 6
elif self.unit_class in ['AC', 'LW', 'MG']:
tk_num = 5
elif self.unit_class == 'AT_GUN':
tk_num = 4
elif self.unit_type in ['Marder II', 'Marder III H']:
tk_num = 6
elif self.unit_type in ['PzKw IV H', 'STuG III G']:
tk_num = 5
elif self.unit_type in ['PzKw V G', 'JgdPzKw IV', 'JgdPz 38(t)']:
tk_num = 4
elif self.unit_type in ['PzKw VI E', 'PzKw VI B']:
tk_num = 3
else:
print ('FriendlyAction() Error: Unrecognized unit type')
return
# check modified roll against tk number
if mod_roll <= tk_num:
if self.unit_class in ['TANK', 'SPG']:
PlaySound('tank_knocked_out')
text = self.GetDesc() + ' is destroyed by friendly action!'
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, text)
self.RecordKO(friendly=True, advance_fire=advance_fire)
self.alive = False
UpdateMapOverlay()
RenderEncounter()
return True
# check original roll with new modifiers for smoke placement
mod_roll = roll
if self.unit_class in ['TANK', 'SPG', 'AT_GUN']:
mod_roll += 1
if artillery:
mod_roll += 1
# air strikes and advance fire pin instead of smoke
if air_strike or advance_fire:
if mod_roll >= 11 and self.unit_class in ['LW', 'MG', 'AT_GUN']:
text = self.GetDesc() + ' is pinned.'
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, text)
self.pinned = True
RenderEncounter()
return True
# place smoke if not already there
else:
# movable infantry units less likely to be smoked
if self.unit_class in ['LW', 'MG']:
mod_roll -= 2
if mod_roll >= 11 and self.map_hex.smoke_factors == 0:
text = self.GetDesc() + ' is hit with smoke from friendly units.'
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, text)
PlaceSmoke(self.map_hex, 1)
RenderEncounter()
return True
# no effect
return False
# dismount infantry from an APC; if under_fire, new unit starts pinned
def DismountInfantry(self, under_fire=False):
self.full_apc = False
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' dismounts infantry!')
if Roll1D6() <= 4:
new_unit = SpawnEnemy('LW', self.map_hex)
else:
new_unit = SpawnEnemy('MG', self.map_hex)
if under_fire:
# pin the new infantry unit
new_unit.PinTest(auto=True)
# find a suitable character location relative to hex centre to draw unit
def GetCharLocation(self):
# don't bother with inactive units
if not self.alive: return (-1, -1)
# try to find a location within the hex that is not occupied by another enemy unit
for tries in range(100):
y_mod = libtcod.random_get_int(0, -2, 2)
if abs(y_mod) == 2:
x_limit = 2
elif abs(y_mod) == 1:
x_limit = 3
else:
x_limit = 4
x_mod = libtcod.random_get_int(0, -x_limit, x_limit)
x = self.map_hex.x + x_mod
y = self.map_hex.y + y_mod
matched = False
for unit in battle.enemy_units:
if unit.y == y and (abs(unit.x + x) <= 2):
matched = True
break
# if this spot already occupied, continue with next try
if matched:
continue
return (x, y)
# do an action for this unit
def DoAction(self, ambush=False):
# if pinned or stunned, unit can only recover or do nothing this turn
if self.pinned:
if self.MoraleTest():
text = ' recovers from being Pinned.'
self.pinned = False
if self.morale > 2:
self.morale -= 1
else:
text = ' remains Pinned and does nothing.'
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + text)
return
elif self.stunned:
if self.MoraleTest():
text = ' recovers from being Stunned.'
self.stunned = False
if self.morale > 2:
self.morale -= 1
else:
text = ' remains Stunned and does nothing.'
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + text)
return
# build odds table for this unit:
# do nothing, move closer, move laterally, move away, attack infantry,
# attack friendly tank (player if shot at), attack player tank, attack lead tank
if self.unit_class in ['TANK', 'SPG']:
if campaign.scen_type == 'Advance':
ranges = [10,30,40,60,65,80,85]
elif campaign.scen_type == 'Battle':
ranges = [10,20,25,35,40,85,90]
else:
ranges = [10,50,60,70,75,95,100]
# APCs will not normally choose to attack armoured battlegroups. Their main
# mission is to drop their crew if possible and then retreat to a safe distance
elif self.unit_class == 'APC':
# chance to dismount infantry
if self.full_apc and self.map_hex.rng <= 1:
if Roll1D6() <= 4:
self.moving = False
self.DismountInfantry()
return
if campaign.scen_type == 'Advance':
ranges = [10,20,30,40,60,65,80]
elif campaign.scen_type == 'Battle':
ranges = [10,15,20,25,35,40,85]
else:
ranges = [10,40,50,60,70,75,80]
# Trucks move around a lot but don't do very much
elif self.unit_class == 'TRUCK':
ranges = [30,40,75,110,0,0,0]
# Armoured Cars can attack friendly infantry, and can spot the player
# tank to help other enemy attacks
elif self.unit_class == 'AC':
# if AC is already spotting the player, less chance of acting
if self.spotting_player:
if Roll1D6() > 1:
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' continues spotting your tank.')
self.moving = False
return
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' lost sight of your tank!')
self.spotting_player = False
self.moving = False
return
ranges = [15,25,50,60,75,85,100]
# Anti-tank Gun
elif self.unit_class == 'AT_GUN':
if campaign.scen_type == 'Advance':
ranges = [30,0,0,0,0,65,70]
elif campaign.scen_type == 'Battle':
ranges = [30,0,0,0,0,80,90]
# Light Weapons or Machine Gun Infantry Squad
elif self.unit_class in ['LW', 'MG']:
if campaign.scen_type in ['Advance', 'Battle']:
ranges = [10,20,40,60,95,0,0]
else:
ranges = [10,40,60,70,95,0,100]
###################################################################
# try to roll an action result that is possible for the unit #
###################################################################
for i in range(300):
# do action roll and apply ambush modifier if any
result = Roll1D100()
if ambush:
result += 10
if self.unit_class in ['LW', 'MG', 'AT_GUN']: result += 10
# check final result against odds table
# do nothing
if result <= ranges[0]:
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + " doesn't appear to do anything.")
self.moving = False
return
# move closer
elif result <= ranges[1]:
if self.DistMove(-1):
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' moves towards you.')
return
# move laterally
elif result <= ranges[2]:
if self.LateralMove():
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' shifts its position.')
return
# move further away
elif result <= ranges[3]:
if self.DistMove(1):
ShowLabel(self.x+MAP_CON_X, self.y+MAP_CON_Y, self.GetDesc() + ' moves further away.')
return
# attack friendly infantry
elif result <= ranges[4]:
if self.AttackInfantry(): return
# fire at friendly tank, perhaps fire at player or move
elif result <= ranges[5]:
# if unit was fired upon last turn, might act differently
if self.shot_at:
if self.facing == 'Front':
if self.AttackPlayer(): return
else:
if self.DistMove(1): return
# attack friendly tank
if self.AttackTank(): return
# fire - your tank
elif result <= ranges[6]:
if self.AttackPlayer(): return
# fire - lead tank or perhaps player tank
else:
# special - some lead tank shots can be redirected to a firefly tank instead
if tank.stats['vehicle_type'] == 'Sherman VC':
if Roll1D6() <= 2:
if self.AttackPlayer(): return
if tank.lead_tank:
if self.AttackPlayer(): return
else:
if self.AttackTank(): return
# holds information about a modified roll and its outcomes
class RollAction():
def __init__(self):
self.attacker_unit_type = '' # unit type of attacker
self.attacker = '' # description of attacking unit
self.attack_type = '' # description of type of attack (HE, AP, etc.)
self.target_unit_type = '' # unit type of target
self.target = '' # description of target unit
self.attacker_unidentified = False # attacking unit is unknown or unidentified
self.target_unidentified = False # target unit is unidentified
self.rng = '' # range to target
self.hit_location = '' # hit location if any
self.score_req = 0 # base score required on 2D6
self.auto_ko = False # target is automatically knocked out
self.nc = False # target has no chance of being knocked out
self.drm = [] # list of dice roll modifiers
self.total_drm = 0 # sum of all drm
self.roll_req = 0 # final roll required
self.roll = 0 # actual roll result
self.d1 = 0 # d1 result
self.d2 = 0 # d2 result
self.result = '' # description of roll result
self.rof_result = '' # rate of fire result if any
# add up all dice roll modifiers
def CalculateTotalDRM(self):
self.total_drm = 0
for (text, mod) in self.drm:
self.total_drm += mod
##########################################################################################
# General Functions #
##########################################################################################
# return hours and minutes between two given times
def GetTimeUntil(h1, m1, h2, m2):
hours = h2 - h1
if m1 > m2:
hours -= 1
m2 += 60
return (hours, m2-m1)
# add a line to the campaign journal
def WriteJournal(text):
campaign.campaign_journal.append(text)
# output the completed campaign journal to a text file
def RecordJournal():
# add final crew reports
for crewman in tank.crew:
lines = crewman.GenerateReport()
for line in lines:
WriteJournal(line)
filename = 'Armoured_Commander_Journal_' + datetime.now().strftime("%H_%M_%d-%m-%Y") + '.txt'
with open(filename, 'a') as f:
for line in campaign.campaign_journal:
f.write(line + '\n')
# returns the crew member in the given tank position
def GetCrewByPosition(position):
for crewman in tank.crew:
if crewman.position == position:
return crewman
return None
# get the required number of exp to be at this level
def GetExpReq(level):
x = (level-1) * BASE_EXP_REQ
if level > 2:
for l in range(3, level+1):
x += LVL_INFLATION * (l-2)
return int(x)
# display a window of help text. if help text is disabled in campaign settings, or we have
# already shown this text before, skip it
def TutorialMessage(key):
if campaign is not None:
if not campaign.tutorial_message: return
try:
save = shelve.open('bones')
bones = save['bones']
# check to see if bones file already has key
if key in bones.tutorial_message_flags:
# key is already set to true
if bones.tutorial_message_flags[key]:
save.close()
return
# mark that this text has been displayed in the bones file
# will also add the key if bones file did not already have it
bones.tutorial_message_flags[key] = True
save['bones'] = bones
save.close
# display the text
for w in range(3, MENU_CON_WIDTH, 6):
h = int(w/3) - 3
if h < 3: h = 3
libtcod.console_rect(0, SCREEN_XM-int(w/2), SCREEN_YM-int(h/2), w, h, False,
flag=libtcod.BKGND_SET)
libtcod.console_print_frame(0, SCREEN_XM-int(w/2), SCREEN_YM-int(h/2), w, h,
clear=True, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_flush()
libtcod.console_clear(menu_con)
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 1,
libtcod.BKGND_NONE, libtcod.CENTER, 'Tutorial Message')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
lines = wrap(TUTORIAL_TEXT[key], 60, subsequent_indent = ' ')
y = int(MENU_CON_HEIGHT / 2) - int(len(lines)/2)
for line in lines:
libtcod.console_print(menu_con, 40, y, line)
y += 1
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2, libtcod.BKGND_NONE,
libtcod.CENTER, '[%cEnter%c] to Continue'%HIGHLIGHT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
WaitForEnter()
# re-blit original display console to screen
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
except:
print('ERROR: Could not open bones file')
# display help interface
def ShowHelp():
# select the first help dictionary item that starts with this character
# don't change selection if no entry with this character
def GetEntry(key_code):
for (topic, text) in help_list:
# if lower or upper case match
if topic[0] == chr(key_code) or topic[0] == chr(key_code - 32):
return (topic, text)
return None
# sort the list of help terms alphabetically by keyword
help_list = sorted(HELP_TEXT, key=lambda tup: tup[0])
# select first entry by default
selected = help_list[0]
x = 30
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
exit_view = False
while not exit_view:
# generate and display menu
libtcod.console_clear(menu_con)
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 1,
libtcod.BKGND_NONE, libtcod.CENTER, 'Help Topics')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 2, libtcod.BKGND_NONE, libtcod.CENTER, 'Type a letter to jump to its entry, arrow keys to scroll')
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cESC%c] Return'%HIGHLIGHT)
libtcod.console_hline(menu_con, 1, 3, MENU_CON_WIDTH-2, flag=libtcod.BKGND_DEFAULT)
libtcod.console_hline(menu_con, 1, MENU_CON_HEIGHT-3, MENU_CON_WIDTH-2, flag=libtcod.BKGND_DEFAULT)
libtcod.console_vline(menu_con, x, 4, MENU_CON_HEIGHT-7, flag=libtcod.BKGND_DEFAULT)
# display list of help topics, with selected one at yc
n = 0
s = help_list.index(selected)
yc = int(MENU_CON_HEIGHT/2)
for (topic, text) in help_list:
y = yc - (s-n)
n += 1
if y < 6 or y > MENU_CON_HEIGHT-9:
continue
if (topic, text) == selected:
libtcod.console_set_default_background(menu_con, SELECTED_COLOR)
libtcod.console_rect(menu_con, 2, y, x-4, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(menu_con, libtcod.black)
libtcod.console_print(menu_con, 2, y, topic)
# display explanational text of selected item, double spaced
(topic, text) = selected
lines = wrap(text, MENU_CON_WIDTH-50-x, subsequent_indent = ' ')
y = yc - int(len(lines)/2)
for line in lines:
libtcod.console_print(menu_con, x+4, y, line)
y += 1
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed(): sys.exit()
if key.vk == libtcod.KEY_ESCAPE:
exit_view = True
break
elif key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_DOWN:
n = help_list.index(selected)
if key.vk == libtcod.KEY_UP:
if n > 0:
selected = help_list[n-1]
refresh = True
else:
if n < len(help_list)-1:
selected = help_list[n+1]
refresh = True
# lowercase alphabetical character
if 92 <= key.c <= 122:
# pass code to function
result = GetEntry(key.c)
if result is not None:
selected = result
refresh = True
libtcod.console_flush()
# re-draw screen if needed
if campaign.day_in_progress:
if battle is None:
RenderCampaign()
else:
RenderEncounter()
# display player tank info; if select_tank is True, we can scroll through all available
# tank models and choose one
def ShowTankInfo(select_tank = False):
# check list of permitted player vehicle types, and only select those that are
# available in the current calendar month
if select_tank:
tank_list = []
for vehicle_name in campaign.player_veh_list:
if campaign.GetRF(vehicle_name) > 0:
tank_list.append(vehicle_name)
selected_tank = tank_list[0]
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
exit_menu = False
while not exit_menu:
# generate and display menu
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
if select_tank:
text = 'Select a Tank Model'
else:
text = 'Player Tank Info'
libtcod.console_print_ex(menu_con, MENU_CON_XM, 2,
libtcod.BKGND_NONE, libtcod.CENTER, text)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# display tank info
if select_tank:
show_tank = selected_tank
else:
show_tank = tank.unit_type
ShowVehicleTypeInfo(show_tank, menu_con, 34, 8)
# display possible actions
if select_tank:
text = '[%cA/D/Left/Right%c] Scroll '%HIGHLIGHT
text += '[%cEnter%c] Select Tank'%HIGHLIGHT
else:
text = '[%cESC%c] Return'%HIGHLIGHT
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2,
libtcod.BKGND_NONE, libtcod.CENTER, text)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
if key.vk == libtcod.KEY_ESCAPE and not select_tank:
exit_menu = True
break
elif key.vk == libtcod.KEY_ENTER and select_tank:
exit_menu = True
break
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
# input for selecting a tank model
if select_tank:
# get pressed key
key_char = chr(key.c)
if key_char in ['a', 'A'] or key.vk == libtcod.KEY_LEFT:
i = tank_list.index(selected_tank)
if i > 0:
selected_tank = tank_list[i-1]
else:
selected_tank = tank_list[-1]
refresh = True
elif key_char in ['d', 'D'] or key.vk == libtcod.KEY_RIGHT:
i = tank_list.index(selected_tank)
if i < len(tank_list)-1:
selected_tank = tank_list[i+1]
else:
selected_tank = tank_list[0]
refresh = True
libtcod.console_flush()
# re-draw screen if needed
if campaign.day_in_progress:
if battle is None:
RenderCampaign()
else:
RenderEncounter()
# return tank selection if we're doing that
if select_tank:
return selected_tank
# display crew info and allow player to select crew to view/add/upgrade skills
def ShowCrewInfo():
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
# select first crew member by default
selected_crew = tank.crew[0]
exit_menu = False
while not exit_menu:
# clear console
libtcod.console_set_default_background(menu_con, libtcod.black)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_clear(menu_con)
# display frame, title, and commands
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH-1, MENU_CON_HEIGHT-1,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 2,
libtcod.BKGND_NONE, libtcod.CENTER, 'Tank Crew Info')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-7,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cA/D/Left/Right%c] Change Crew Selection'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-6,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cEnter%c] View/Add/Upgrade Skills'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-5,
libtcod.BKGND_NONE, libtcod.CENTER,
'Change [%cN%c]ame'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-4,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cK%c] Set/Reset Nickname'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-3,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cESC%c] Return'%HIGHLIGHT)
# display crew info
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
x = 2
for crew_member in tank.crew:
highlight=False
if selected_crew.position == crew_member.position:
highlight=True
crew_member.DisplayCrewInfo(menu_con, x, 4, highlight)
x += 27
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
# exit view
if key.vk == libtcod.KEY_ESCAPE:
exit_menu = True
break
elif key.vk == libtcod.KEY_ENTER:
ShowSkills(selected_crew)
refresh = True
key_char = chr(key.c)
if key_char in ['d', 'D'] or key.vk == libtcod.KEY_RIGHT:
selected_crew = selected_crew.next
refresh = True
elif key_char in ['a', 'A'] or key.vk == libtcod.KEY_LEFT:
selected_crew = selected_crew.prev
refresh = True
elif key_char in ['k', 'K']:
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, con, MENU_CON_X, MENU_CON_Y)
text = 'Enter new nickname for ' + selected_crew.name
selected_crew.nickname = GetInput(0, text, 25, NICKNAME_MAX_LEN)
refresh = True
elif key_char in ['n', 'N']:
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, con, MENU_CON_X, MENU_CON_Y)
text = 'Enter new name for ' + selected_crew.name
selected_crew.name = GetInput(0, text, 25, NAME_MAX_LEN, get_name=True)
refresh = True
libtcod.console_flush()
# re-draw screen if needed
if campaign.day_in_progress:
if battle is None:
RenderCampaign()
else:
RenderEncounter()
# allow player to view skills and spend skill points for a crew member
def ShowSkills(crew_member):
if not crew_member.alive: return
# select first skill as default
selected_skill = 0
exit_menu = False
while not exit_menu:
# build a list of possible skills for this crewman
# if crewman already has a level in this skill, record it, otherwise record as 0
skill_list = []
for skill in SKILLS:
if len(skill.restrictions) > 0:
if crew_member.position not in skill.restrictions:
continue
# only allow stabilizer skill to US forces
if skill.name == 'Gyrostabilizer' and campaign.player_nation != 'USA':
continue
level = 0
for crew_skill in crew_member.skills:
if crew_skill.name == skill.name:
level = crew_skill.level
break
skill_list.append((skill, level))
# clear console
libtcod.console_set_default_background(menu_con, libtcod.black)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_clear(menu_con)
# display frame, title, and commands
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH-1, MENU_CON_HEIGHT-1,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 2,
libtcod.BKGND_NONE, libtcod.CENTER, 'Crew Skills')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-5,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cW/S/Up/Down%c] Change Skill Selection'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-4,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cEnter%c] Add/Upgrade Skill'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-3,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cESC%c] Return/Continue'%HIGHLIGHT)
# display crew info
crew_member.DisplayCrewInfo(menu_con, 29, 4, False)
# draw selected skill info box
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print_frame(menu_con, 83, 4, 40, 30,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_print(menu_con, 86, 9, 'Description')
libtcod.console_print(menu_con, 86, 25, 'Activation Levels')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# now display list of all possible skills, highlighting ones the crew
# member already has at least one level of
# also display detailed info about selected skill
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print_frame(menu_con, 56, 4, 27, 30,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_print(menu_con, 59, 5, 'Skills')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
y = 7
n = 0
for (skill, level) in skill_list:
if n == selected_skill:
libtcod.console_set_default_background(menu_con, libtcod.darker_grey)
libtcod.console_rect(menu_con, 57, y, 25, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(menu_con, libtcod.black)
# description of selected skill
libtcod.console_set_default_foreground(menu_con, libtcod.white)
lines = wrap(skill.desc, 30, subsequent_indent = ' ')
y2 = 11
for line in lines:
libtcod.console_print(menu_con, 86, y2, line)
y2 += 1
# activation levels
if skill.levels[0] == 100:
libtcod.console_print(menu_con, 86, 27, 'Always active')
else:
x2 = 86
for skill_level in skill.levels:
# crew has this activation level or greater
if skill_level <= level:
libtcod.console_set_default_foreground(menu_con, libtcod.light_blue)
else:
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, x2, 27, str(skill_level)+'%')
x2 += len(str(skill_level)) + 2
# gyro skill not available yet
if skill.name == 'Gyrostabilizer' and not campaign.gyro_skill_avail:
libtcod.console_set_default_foreground(menu_con, GREYED_COLOR)
elif level > 0:
libtcod.console_set_default_foreground(menu_con, libtcod.light_blue)
else:
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, 58, y, skill.name)
y += 1
n += 1
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed(): sys.exit()
# exit view
if key.vk == libtcod.KEY_ESCAPE:
exit_menu = True
break
key_char = chr(key.c)
# skill selection up
if key.vk == libtcod.KEY_UP or key_char in ['w', 'W']:
if selected_skill == 0:
selected_skill = len(skill_list)-1
else:
selected_skill -= 1
refresh = True
# skill selection down
elif key.vk == libtcod.KEY_DOWN or key_char in ['s', 'S']:
if selected_skill == len(skill_list)-1:
selected_skill = 0
else:
selected_skill += 1
refresh = True
# add / upgrade skill
elif key.vk == libtcod.KEY_ENTER:
# needs to be alive, active, and have at least one skill point
if not crew_member.NoActions() and crew_member.skill_pts > 0:
n = 0
for (skill, level) in skill_list:
if n == selected_skill:
# gyro skill not available yet
if skill.name == 'Gyrostabilizer' and not campaign.gyro_skill_avail:
break
# check to see if we can add / upgrade this skill
# add new skill
if level == 0:
text = 'Spend a skill point to add skill "' + skill.name + '"'
if PopUp(text, confirm=True):
crew_member.skill_pts -= 1
crew_member.skills.append(SkillRecord(skill.name, skill.levels[0]))
WriteJournal(crew_member.name + ' added a new skill: ' + skill.name + ', now at '+ str(skill.levels[0]) + '% activation')
PlaySound('new_skill')
# upgrade skill
else:
for skill_level in skill.levels:
# we're not at this level yet, so we can upgrade
if level < skill_level:
text = 'Spend a skill point to upgrade skill "' + skill.name + '"'
if PopUp(text, confirm=True):
crew_member.skill_pts -= 1
crew_member.UpgradeSkill(skill.name, skill_level)
WriteJournal(crew_member.name + ' upgraded a skill: ' + skill.name + ', now at ' + str(skill_level) + '% activation')
PlaySound('new_skill')
# we found the right skill level
break
# we found the selected skill, so we can break out of for loop
break
n += 1
refresh = True
libtcod.console_flush()
# display campaign stats, can be accessed during the campaign also shown at the end of a campaign
def ShowCampaignStats():
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
exit_menu = False
while not exit_menu:
# generate and display menu
libtcod.console_clear(menu_con)
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 2,
libtcod.BKGND_NONE, libtcod.CENTER, 'Campaign Stats')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# display current day of campaign calendar
libtcod.console_set_default_background(menu_con, ROW_COLOR)
libtcod.console_rect(menu_con, MENU_CON_XM-20, 5, 40, 1, False, flag=libtcod.BKGND_SET)
if 'Days of Combat' not in campaign.stats:
d1 = '0'
else:
d1 = campaign.stats['Days of Combat']
d2 = len(campaign.days) - campaign.start_date
text = 'Campaign Day ' + str(d1) + '/' + str(d2)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 5, libtcod.BKGND_NONE,
libtcod.CENTER, text)
w = int(ceil(float(d1) / float(d2) * 40.0))
libtcod.console_set_default_background(menu_con, HIGHLIGHT_COLOR)
libtcod.console_rect(menu_con, MENU_CON_XM-20, 5, w, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(menu_con, libtcod.black)
# display current VP total
text = 'Total Victory Points: ' + str(campaign.vp + campaign.day_vp)
libtcod.console_print(menu_con, 53, 7, text)
# display rest of campaign stats
y = 9
for stat_name in C_STATS:
text = stat_name + ': '
if stat_name not in campaign.stats:
text += '0'
else:
text += str(campaign.stats[stat_name])
libtcod.console_print(menu_con, 53, y, text)
y += 1
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-4,
libtcod.BKGND_NONE, libtcod.CENTER,
'Display Campaign [%cJ%c]ournal'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cESC%c] Return'%HIGHLIGHT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
if key.vk == libtcod.KEY_ESCAPE:
exit_menu = True
break
key_char = chr(key.c)
if key_char in ['j', 'J']:
ShowTextWindow('Campaign Journal', campaign.campaign_journal)
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
libtcod.console_flush()
# re-draw screen
if battle is None:
if campaign.day_in_progress:
RenderCampaign()
else:
RenderEncounter()
# display a window of text lines, allow player to scroll up and down
def ShowTextWindow(title, text_lines):
# copy existing screen to con
libtcod.console_blit(0, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, con, 0, 0)
# generate list of lines to display
lines = []
for line in text_lines:
if len(line) <= 84:
lines.append(line)
continue
# split original line
split_lines = wrap(line, 84, subsequent_indent = ' ')
lines.extend(split_lines)
# starting y scroll position: bottom of list
y2 = len(lines) - 1
exit_menu = False
while not exit_menu:
# generate and display window
libtcod.console_clear(text_con)
libtcod.console_print_frame(text_con, 0, 0, TEXT_CON_WIDTH, TEXT_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
# window title
libtcod.console_set_default_foreground(text_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(text_con, TEXT_CON_XM, 1,
libtcod.BKGND_NONE, libtcod.CENTER,
title)
libtcod.console_set_default_foreground(text_con, libtcod.white)
# commands
libtcod.console_print_ex(text_con, TEXT_CON_XM, TEXT_CON_HEIGHT-3,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cUp/Down/PgUp/PgDn/Home/End%c] Scroll'%HIGHLIGHT)
libtcod.console_print_ex(text_con, TEXT_CON_XM, TEXT_CON_HEIGHT-2,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cESC%c] Return'%HIGHLIGHT)
# text content
if len(lines) <= 48:
y1 = 0
else:
y1 = y2 - 48
y = 3
for n in range(y1, y2+1):
if n > len(lines) - 1: break
libtcod.console_print(text_con, 2, y, lines[n])
y += 1
libtcod.console_blit(text_con, 0, 0, TEXT_CON_WIDTH, TEXT_CON_HEIGHT, 0, TEXT_CON_X, TEXT_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
if key.vk == libtcod.KEY_ESCAPE:
exit_menu = True
break
elif key.vk == libtcod.KEY_UP:
if y2 > 48:
y2 -= 1
refresh = True
elif key.vk == libtcod.KEY_DOWN:
if y2 < len(lines) - 1:
y2 += 1
refresh = True
elif key.vk == libtcod.KEY_HOME:
y2 = 48
refresh = True
elif key.vk == libtcod.KEY_END:
y2 = len(lines) - 1
refresh = True
elif key.vk == libtcod.KEY_PAGEUP:
y2 -= 10
if y2 < 48: y2 = 48
refresh = True
elif key.vk == libtcod.KEY_PAGEDOWN:
y2 += 10
if y2 > len(lines) - 1: y2 = len(lines) - 1
refresh = True
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
key_char = chr(key.c)
libtcod.console_flush()
# copy con back to screen
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
# display a window allowing the player to change game settings, which are saved in the campaign
# object
def ShowSettings():
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
exit_menu = False
while not exit_menu:
# generate and display menu
libtcod.console_clear(menu_con)
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 2,
libtcod.BKGND_NONE, libtcod.CENTER, 'Game Settings Menu')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 3,
libtcod.BKGND_NONE, libtcod.CENTER, VERSION + SUBVERSION)
#libtcod.console_print_ex(menu_con, MENU_CON_XM, 4,
# libtcod.BKGND_NONE, libtcod.CENTER, "test")
# Campaign Settings
libtcod.console_print_frame(menu_con, 50, 5, 40, 6, clear=False,
flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print(menu_con, 52, 6, 'Campaign Settings')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
text = 'Tank Selection: '
if campaign.unlimited_tank_selection:
text += 'Unlimited'
else:
text += 'Strict'
libtcod.console_print(menu_con, 52, 8, text)
text = 'Commander Replacement: '
if campaign.casual_commander:
text += 'Casual'
else:
text += 'Realistic'
libtcod.console_print(menu_con, 52, 9, text)
# Display Settings
libtcod.console_print_frame(menu_con, 50, 12, 58, 13, clear=False,
flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print(menu_con, 52, 13, 'Display Settings')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
text = '[%cA%c]nimations: '%HIGHLIGHT
if campaign.animations:
text += 'On'
else:
text += 'Off'
libtcod.console_print(menu_con, 52, 15, text)
text = '[%cS%c]ounds: '%HIGHLIGHT
if campaign.sounds:
text += 'On'
else:
text += 'Off'
libtcod.console_print(menu_con, 52, 16, text)
text = '[%cD%c]isplay Tutorial Messages: '%HIGHLIGHT
if campaign.tutorial_message:
text += 'On'
else:
text += 'Off'
libtcod.console_print(menu_con, 52, 17, text)
text = '[%cW%c]ait for Enter before clearing on-screen labels: '%HIGHLIGHT
if campaign.pause_labels:
text += 'On'
else:
text += 'Off'
libtcod.console_print(menu_con, 52, 18, text)
# NEW: only display option if allowed by renderer
if libtcod.sys_get_renderer() in [3, 4]:
text = '[%cF%c]ull Screen: '%HIGHLIGHT
# display based on actual fullscreen status, not campaign setting
if libtcod.console_is_fullscreen():
text += 'On'
else:
text += 'Off'
libtcod.console_print(menu_con, 52, 20, text)
text = '[%cR%c]esolution for Full Screen: '%HIGHLIGHT
text += str(campaign.fs_res_x) + ' x ' + str(campaign.fs_res_y)
libtcod.console_print(menu_con, 52, 21, text)
libtcod.console_set_default_foreground(menu_con, libtcod.lighter_blue)
libtcod.console_print(menu_con, 53, 22, 'Changing either of these two settings may')
libtcod.console_print(menu_con, 53, 23, 'pause your computer for a few seconds')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-3,
libtcod.BKGND_NONE, libtcod.CENTER, 'Press highlighted letter to change setting')
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2,
libtcod.BKGND_NONE, libtcod.CENTER,
'[%cESC%c] Return'%HIGHLIGHT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
if key.vk == libtcod.KEY_ESCAPE:
exit_menu = True
break
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
key_char = chr(key.c)
if key_char in ['a', 'A']:
campaign.animations = not campaign.animations
refresh = True
elif key_char in ['s', 'S']:
campaign.sounds = not campaign.sounds
refresh = True
elif key_char in ['w', 'W']:
campaign.pause_labels = not campaign.pause_labels
refresh = True
elif key_char in ['d', 'D']:
campaign.tutorial_message = not campaign.tutorial_message
refresh = True
if libtcod.sys_get_renderer() not in [3, 4]: continue
if key_char in ['f', 'F']:
# switch FS mode and update campaign setting if required
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
campaign.fullscreen = libtcod.console_is_fullscreen()
refresh = True
elif key_char in ['r', 'R']:
n = 0
for (x, y) in FS_RES_LIST:
if x == campaign.fs_res_x and y == campaign.fs_res_y:
if n == len(FS_RES_LIST) - 1:
(campaign.fs_res_x, campaign.fs_res_y) = FS_RES_LIST[0]
else:
(campaign.fs_res_x, campaign.fs_res_y) = FS_RES_LIST[n+1]
break
n += 1
libtcod.sys_force_fullscreen_resolution(campaign.fs_res_x, campaign.fs_res_y)
# toggle FS to force an update into the new resolution
if libtcod.console_is_fullscreen():
libtcod.console_set_fullscreen(False)
libtcod.console_set_fullscreen(True)
refresh = True
libtcod.console_flush()
# re-draw screen
if battle is None:
if campaign.day_in_progress:
RenderCampaign()
else:
RenderEncounter()
# display information on the campaign or encounter map; no input possible while it's
# being displayed
# x, y is highlighted object or location; label appears centered under this
# if crewman is not none, label is being spoken by that crewman
def ShowLabel(x, y, original_text, crewman=None):
libtcod.console_set_default_background(0, GREYED_COLOR)
# build text string
text = ''
# add crewman position to front of string
if crewman is not None:
text += crewman.position + ': '
text += original_text
# if wait for enter is on in campaign settings, add to text to display
if campaign.pause_labels:
text += ' [Enter to continue]'
# divide text to be shown into lines
lines = wrap(text, 28)
n = 1
for line in lines:
# don't try to draw outside the screen
if y+n >= SCREEN_HEIGHT:
break
if x + int((len(line)+1)/2) >= SCREEN_WIDTH:
x = SCREEN_WIDTH - int((len(line)+1)/2)
# make sure label falls within map console
if battle is not None:
if x - int((len(line)+1)/2) <= MAP_CON_X:
x = MAP_CON_X + int((len(line)+1)/2)
else:
if x - int((len(line)+1)/2) <= C_MAP_CON_X:
x = C_MAP_CON_X + int((len(line)+1)/2)
# if animations are off, display labels all at once
if not campaign.animations:
libtcod.console_print_ex(0, x, y+n, libtcod.BKGND_SET, libtcod.CENTER, line)
libtcod.console_flush()
# otherwise, reveal label two characters at a time
else:
for i in range(0, len(line)+1, 2):
libtcod.console_print_ex(0, x, y+n, libtcod.BKGND_SET, libtcod.CENTER, line[:i])
libtcod.console_flush()
Wait(1)
else:
# if there's an odd character left
if len(line)+1 > i:
libtcod.console_print_ex(0, x, y+n, libtcod.BKGND_SET, libtcod.CENTER, line)
libtcod.console_flush()
Wait(1)
n += 1
if campaign.pause_labels:
WaitForEnter()
else:
Wait(1100)
libtcod.console_set_default_background(0, libtcod.black)
# if in an encounter, add the label to the message queue, and re-render the screen
if battle is not None:
Message(original_text, color=libtcod.light_grey)
RenderEncounter()
# wait for a specified amount of miliseconds, refreshing the screen in the meantime
def Wait(wait_time):
# added this to avoid the spinning wheel of death in Windows
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
libtcod.sys_sleep_milli(wait_time)
# returns true if number is odd
def IsOdd(num):
return num & 1 and True or False
# returns center of given hex
# hex 0, 0 always center of map console
def Hex2Screen(hx, hy):
x = int((MAP_CON_WIDTH/2) + (hx*9))
y = int((MAP_CON_HEIGHT/2) + (hy*6) + (hx*3))
return x, y
# returns hex at given coordinate
# can return hex 0,0
def Screen2Hex(x, y):
for map_hex in battle.maphexes:
if GetDistance(x, y, map_hex.x, map_hex.y) <= 3:
return map_hex
return None
# draws a single ascii hex
def DrawHex(console, x, y):
libtcod.console_print_ex(console, x-3, y-3, libtcod.BKGND_SET, libtcod.LEFT, '|-----|')
libtcod.console_print_ex(console, x-4, y-2, libtcod.BKGND_SET, libtcod.LEFT, '/ \\')
libtcod.console_print_ex(console, x-5, y-1, libtcod.BKGND_SET, libtcod.LEFT, '/ \\')
libtcod.console_print_ex(console, x-6, y, libtcod.BKGND_SET, libtcod.LEFT, '| |')
libtcod.console_print_ex(console, x-5, y+1, libtcod.BKGND_SET, libtcod.LEFT, '\\ /')
libtcod.console_print_ex(console, x-4, y+2, libtcod.BKGND_SET, libtcod.LEFT, '\\ /')
libtcod.console_print_ex(console, x-3, y+3, libtcod.BKGND_SET, libtcod.LEFT, '|-----|')
# returns true if two given hexes are adjacent
def IsAdjacent(hex1, hex2):
if hex1 == hex2: return False # same hex!
DIRECTIONS = [(1,0), (1,-1), (0,-1), (-1,0), (-1,1), (0,1)]
for (x_mod, y_mod) in DIRECTIONS:
if hex1.hx + x_mod == hex2.hx and hex1.hy + y_mod == hex2.hy:
return True
return False
# returns the rounded distance between two points
def GetDistance(x1, y1, x2, y2):
return int(sqrt((x1-x2)**2 + (y1-y2)**2))
# Bresenham's Line Algorithm
# returns a series of x, y points along a line
# based on an implementation on the roguebasin wiki
def GetLine(x1, y1, x2, y2):
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
return points
# get this difference in sectors between two facings / directions
def GetSectorDistance(new_f, old_f):
diff = abs(new_f - old_f)
if diff == 4:
diff = 2
elif diff == 5:
diff = 1
return diff
# return just the total result of a percentile 2D10 roll
def Roll1D100():
return libtcod.random_get_int(0, 1, 100)
# return the result of a 1d10 roll
def Roll1D10():
return libtcod.random_get_int(0, 1, 10)
# return the result of a 1D6 roll
def Roll1D6():
return libtcod.random_get_int(0, 1, 6)
# return the result of a 2D6 roll
def Roll2D6():
d1 = libtcod.random_get_int(0, 1, 6)
d2 = libtcod.random_get_int(0, 1, 6)
return d1, d2, (d1+d2)
# returns true if all enemy units in an encounter are dead
def AllEnemiesDead():
for unit in battle.enemy_units:
if unit.alive: return False
return True
# add a new message to the encounter or campaign message queue and delete oldest one
# if required
def Message(new_msg, color=libtcod.white):
# don't show if not in an encounter
if battle is None: return
# split the message if necessary, among multiple lines
new_msg_lines = wrap(new_msg, MSG_CON_WIDTH-4, subsequent_indent = ' ')
for line in new_msg_lines:
#if the buffer is full, remove the first line to make room for the new one
if len(battle.messages) == MSG_CON_HEIGHT:
del battle.messages[0]
#add the new line as a tuple, with the text and the color
battle.messages.append( (line, color) )
# update the message console
UpdateMsgCon()
RenderEncounter()
# roll on the activation table and spawn enemy units on the battle map
def ActivateEnemies():
# if counterattack, use day resistance, otherwise use player location level
if campaign.scen_type == 'Counterattack' or battle.counterattack:
res_level = campaign.scen_res
else:
res_level = campaign.day_map.player_node.resistance
# determine initial number of enemy units
if res_level == 'Light':
n = 2
elif res_level == 'Medium':
n = 3
else:
n = 4
# spawn enemies
for rolls in range(0, n):
SpawnEnemy()
UpdateMapOverlay()
RenderEncounter()
# pause for a bit
Wait(300)
# spawn an enemy unit, can specify a unit class and spawn hex
def SpawnEnemy(unit_class=None, map_hex=None):
# check to see if a date modifier applies
def CheckModifer(mod):
if mod['year'] > campaign.current_date[0]: return False # wrong year
if mod['month'] > campaign.current_date[1]: return False # not late enough in the year
if mod['date'] > campaign.current_date[2]: return False # not late enough in the month
# apply the modifier
return True
new_unit = EnemyUnit()
if unit_class is None:
# roll for unit class based on mission type
# counterattack battle overrides day mission
if battle.counterattack:
column = 2
else:
if campaign.scen_type == 'Advance':
column = 0
elif campaign.scen_type == 'Battle':
column = 1
else:
column = 2
# activation list is a list of tuples: unit class followed by spawn chance
# make a copy so we can apply date modifiers if any
activation_list = list(campaign.mission_activations[column])
# apply date modifier if any
if len(campaign.activation_modifiers) > 0:
for mod in campaign.activation_modifiers:
if CheckModifer(mod):
for (k,v) in activation_list:
if k == mod['class_name']:
# apply the modifier
v += mod['mod']
if v < 0: v = 0
break
# get sum of activation chances, might be modified beyond total of 100
total = 0
for (k,v) in activation_list:
total += v
random.shuffle(activation_list)
unit_class = ''
roll = libtcod.random_get_int(0, 0, total)
for (k,v) in activation_list:
if v == 0: continue # skip if no chance to spawn
if roll <= v:
unit_class = k
break
roll -= v
if unit_class == '':
print ('ERROR: Could not generate a random unit class')
return
# determine unit type
unit_type = ''
# if unit is TANK, SPG, or AT Gun, check to see if there's already a unit
# type set
if unit_class == 'TANK' and battle.tank_type is not None:
unit_type = battle.tank_type
elif unit_class == 'SPG' and battle.spg_type is not None:
unit_type = battle.spg_type
elif unit_class == 'AT_GUN' and battle.at_gun_type is not None:
unit_type = battle.at_gun_type
# MG and LW only have one unit type each
if unit_class == 'MG':
unit_type = 'MG Team'
elif unit_class == 'LW':
unit_type = 'Light Weapons Infantry'
# if unit type not set, generate it now
if unit_type == '':
result = libtcod.random_get_int(0, 1, 1000)
for class_list in campaign.class_activations:
if class_list[0] == unit_class:
# copy list of possible unit types (skipping first element)
type_list = class_list[1:]
# shuffle list and run through, finding result
random.shuffle(type_list)
for (k,v) in type_list:
if v == 0: continue # skip if no chance to spawn
if result <= v:
unit_type = k
break
result -= v
break
if unit_type == '':
print ('ERROR: Could not generate a random unit type for class: ' + unit_class)
return
# apply morale modifier for certain unit classes / types
if unit_type in ['PzKw V G', 'PzKw VI E', 'PzKw VI B']:
if new_unit.morale < 10: new_unit.morale += 1
# set AT Gun main gun info
if unit_class == 'AT_GUN':
# set up unit gun type
new_unit.stats = {}
new_unit.stats['main_gun'] = unit_type
# set encounter vehicle type if required
if unit_class == 'TANK' and battle.tank_type is None:
battle.tank_type = unit_type
elif unit_class == 'SPG' and battle.spg_type is None:
battle.spg_type = unit_type
elif unit_class == 'AT_GUN' and battle.at_gun_type is None:
battle.at_gun_type = unit_type
# set up the stats of the new unit
new_unit.unit_type = unit_type
new_unit.unit_class = unit_class
# set vehicle stats for vehicles
if new_unit.unit_class in ['TANK', 'SPG', 'APC', 'AC', 'TRUCK']:
SetVehicleStats(new_unit)
# if unit is LW, check to see if armed with a panzerfaust
if new_unit.unit_class == 'LW':
roll = Roll1D6()
if campaign.current_date[0] == 1945:
roll -= 1
if roll <= 3:
new_unit.pf = True
# if unit is an APC, see if it is carrying infantry
elif new_unit.unit_class == 'APC':
if Roll1D6() <= 4:
new_unit.full_apc = True
# if unit is an AC, set its spot flag
elif new_unit.unit_class == 'AC':
new_unit.spotting_player = False
# determine spawn hex if not specified
if map_hex is None:
# roll for spawn sector
d1, d2, roll = Roll2D6()
if campaign.scen_type == 'Counterattack':
roll += 1
if roll <= 6:
sector = 4
elif roll <= 9:
sector = random.choice([3,5])
elif roll <= 11:
sector = random.choice([2,0])
else:
sector = 1
# now that we have the sector, determine the spawn range
result = Roll1D10()
# apply area type drm
if campaign.day_map.player_node.node_type == 'C':
result -= 3
elif campaign.day_map.player_node.node_type == 'D':
result -= 2
elif campaign.day_map.player_node.node_type == 'F':
result -= 5
# apply firefly drm
if tank.stats['vehicle_type'] == 'Sherman VC':
result += 3
if unit_class == 'LW':
if result <= 6:
rng = 0 # close
else:
rng = 1 # medium
elif unit_class == 'MG':
if result <= 3:
rng = 0 # close
elif result <= 8:
rng = 1 # medium
else:
rng = 2 # long
elif unit_class == 'AT':
if result <= 2:
rng = 0 # close
elif result <= 7:
rng = 1 # medium
else:
rng = 2 # long
elif unit_class == 'SPG':
if result <= 2:
rng = 0 # close
elif result <= 6:
rng = 1 # medium
else:
rng = 2 # long
else:
if result <= 3:
rng = 0 # close
elif result <= 7:
rng = 1 # medium
else:
rng = 2 # long
# we have the sector and range, now determine the hex location
# build list of possible spawn hexes
spawn_hexes = []
for map_hex in battle.maphexes:
if rng == map_hex.rng and sector == map_hex.sector:
spawn_hexes.append(map_hex)
new_unit.map_hex = random.choice(spawn_hexes)
else:
new_unit.map_hex = map_hex
# determine draw location within hex
(new_unit.x, new_unit.y) = new_unit.GetCharLocation()
# determine unit facing
new_unit.SetFacing()
# determine initial unit terrain and movement status
new_unit.SetTerrain()
battle.enemy_units.append(new_unit)
# report message
text = new_unit.GetDesc(new_spawn=True) + ' reported at '
if new_unit.map_hex.sector == 0:
text += "four o'clock"
elif new_unit.map_hex.sector == 1:
text += "six o'clock"
elif new_unit.map_hex.sector == 2:
text += "eight o'clock"
elif new_unit.map_hex.sector == 3:
text += "ten o'clock"
elif new_unit.map_hex.sector == 4:
text += "twelve o'clock"
elif new_unit.map_hex.sector == 5:
text += "two o'clock"
text += ', '
if new_unit.map_hex.rng == 0:
text += 'close range!'
elif new_unit.map_hex.rng == 1:
text += 'medium range.'
else:
text += 'long range.'
# play sound effect
PlaySound('radio')
# show information as a flash label on the map
ShowLabel(new_unit.x+MAP_CON_X, new_unit.y+MAP_CON_Y, text)
return new_unit
# create a new crewmember for the player tank
def SpawnCrewMember(name, position, rank_level, replacement=False, old_member=None):
# if tank model does not have an asst driver, skip
if position == 'Asst. Driver' and 'no_asst_driver' in tank.stats:
return None
new_crew = Crewman()
if name is None:
new_crew.GenerateName()
else:
new_crew.name = name
new_crew.position = position
new_crew.rank_level = rank_level
# choose a random hometown
if campaign.player_nation == 'USA':
new_crew.hometown = random.choice(USA_HOMETOWNS)
elif campaign.player_nation == 'CAN':
new_crew.hometown = random.choice(CAN_HOMETOWNS)
# removed transcoding; may be able to add a better solution in the future
#new_crew.hometown = new_crew.hometown.decode('utf8').encode('cp850')
# set default order and initial hatch state
if position == 'Commander':
new_crew.default_order = 'None'
new_crew.hatch = 'Open'
elif position == 'Gunner':
new_crew.default_order = 'None'
new_crew.hatch = 'None'
elif position == 'Loader':
new_crew.default_order = 'Reload'
# set hatch based on tank
if tank.stats['loader_hatch'] != 'None':
new_crew.hatch = 'Open'
else:
new_crew.hatch = 'None'
elif position == 'Driver':
new_crew.default_order = 'Stop'
new_crew.hatch = 'Open'
elif position == 'Asst. Driver':
new_crew.default_order = 'None'
new_crew.hatch = 'Open'
# set current order to default
new_crew.order = new_crew.default_order
# set initial spot ability; this will change later on
new_crew.SetSpotAbility()
# set up next and previous pointers unless this is a replacement crew member
if not replacement:
# if there's already at least one crew in tank list, set prev and next pointers
if len(tank.crew) > 0:
tank.crew[-1].next = new_crew
new_crew.prev = tank.crew[-1]
new_crew.next = tank.crew[0]
tank.crew[0].prev = new_crew
# otherwise, just set own next pointer to self
else:
new_crew.next = new_crew
new_crew.prev = new_crew
tank.crew.append(new_crew)
# record to journal
WriteJournal(new_crew.position + ' assigned as: ' + new_crew.GetRank(short=True) + ' ' + new_crew.name)
return new_crew
# set up stats for a unit based on its vehicle type
def SetVehicleStats(obj):
# get the right vehicle type entry
for vehicle_type in VEHICLE_TYPES:
if vehicle_type[0] == obj.unit_type:
break
else:
print ('ERROR: Vehicle type not found: ' + obj.unit_type)
return
obj.stats = {}
# go through keys and values, skipping first item in list
for (k, value) in vehicle_type[1:]:
if k == 'HVSS':
# random chance of actually having HVSS if on or after Nov. '44
# if we're started a new campaign, date has not been set, so
# assume earliest date in calendar
if campaign.current_date == [0,0,0]:
date = campaign.days[0]
year = int(date['year'])
month = int(date['month'])
else:
year = campaign.current_date[0]
month = campaign.current_date[1]
if year >= 1945 or (year == 1944 and month >= 11):
if libtcod.random_get_int(0, 1, 10) <= value:
obj.stats['HVSS'] = True
elif value == '':
obj.stats[k] = True
else:
obj.stats[k] = value
# if object is player tank, set up the ammo types as well
if obj == tank:
obj.general_ammo['HE'] = 0
obj.general_ammo['AP'] = 0
obj.rr_ammo['HE'] = 0
obj.rr_ammo['AP'] = 0
if obj.stats['main_gun'] == '75':
obj.general_ammo['WP'] = 0
obj.general_ammo['HCBI'] = 0
obj.rr_ammo['WP'] = 0
obj.rr_ammo['HCBI'] = 0
elif obj.stats['main_gun'] == '76L':
obj.general_ammo['HVAP'] = 0
obj.rr_ammo['HVAP'] = 0
elif obj.stats['main_gun'] == '76LL':
obj.general_ammo['APDS'] = 0
obj.rr_ammo['APDS'] = 0
# determine hit location on vehicles
def GetHitLocation(hull_down):
result = Roll1D10()
if hull_down:
if result <= 5:
return 'Turret'
else:
return 'Miss'
else:
if result <= 4:
return 'Turret'
elif result <= 9:
return 'Hull'
else:
return 'Track'
# calculate base to-hit number, drm, and final roll required for an ordinance to-hit attack
def CalcTH(attacker, target, area_fire, ammo_type):
# determine range of attack
# different calculation depending on whether player is attacker or target
if attacker == tank:
rng = target.map_hex.rng
else:
rng = attacker.map_hex.rng
##### Determine base to-hit score required #####
# direct fire
if not area_fire:
# infantry targets
if target.unit_class in ['AT_GUN', 'MG', 'LW']:
if rng == 0:
base_th = 8
elif rng == 1:
base_th = 5
else:
base_th = 2
# vehicle targets
else:
if rng == 0:
base_th = 10
elif rng == 1:
base_th = 7
else:
base_th = 5
# area fire
else:
if rng == 0:
base_th = 7
elif rng == 1:
base_th = 8
else:
base_th = 6
# to-hit score modifiers
# long-range guns
if 'LL' in attacker.stats['main_gun']:
if rng == 1:
base_th += 1
elif rng == 2:
base_th += 2
elif 'L' in attacker.stats['main_gun']:
if rng > 0:
base_th += 1
else:
if rng > 0:
base_th -= 1
# firing smoke at close range
if ammo_type in ['WP', 'HCBI'] and rng == 0:
base_th += 2
# smaller caliber guns
if attacker.stats['main_gun'] == '20L':
if rng == 1:
base_th -= 1
elif rng == 2:
base_th -= 3
##### Dice Roll Modifiers #####
drm = []
# if turret has been rotated; doesn't apply to RoF shots
# only applies to player tank
if attacker == tank:
if not tank.has_rof:
diff = GetSectorDistance(tank.turret_facing, tank.old_t_facing)
if diff != 0:
# apply penalty, +1 per sector
drm.append(('Turret has been rotated', diff))
# commander buttoned up
crew_member = GetCrewByPosition('Commander')
if crew_member.hatch == 'Shut' and 'vision_cupola' not in tank.stats:
drm.append(('Commander buttoned up', 1))
# tank moving, firing with gyrostabilizer
if tank.moving:
if not GetCrewByPosition('Gunner').SkillCheck('Gyrostabilizer'):
drm.append(('Firing on the move - Gyrostabilizer skill failed', 4))
else:
drm.append(('Firing on the move', 2))
# acquired target
if target.acquired == 1:
drm.append(('Target acquired 1', -1))
elif target.acquired == 2:
drm.append(('Target acquired 2', -2))
# increase acquired target number for next shot
if target.acquired < 2:
target.acquired += 1
# some different modifiers used for enemy units
else:
# take advantage of AC spotting player
spotter = False
if attacker.acquired_player == 0:
for unit in battle.enemy_units:
if unit == attacker: continue
if unit.unit_class == 'AC':
if unit.spotting_player:
spotter = True
break
if spotter:
drm.append(('Target acquired 1 via Spotter', -1))
else:
if attacker.acquired_player == 1:
drm.append(('Target acquired 1', -1))
elif attacker.acquired_player == 2:
drm.append(('Target acquired 2', -2))
# increase acquired target level for next shot
if attacker.acquired_player < 2:
attacker.acquired_player += 1
# AT Guns rotating to fire
if attacker.unit_class == 'AT_GUN':
if attacker.facing == 'Side':
if attacker.unit_type == '88LL':
drm.append(('Rotated Facing - 360' + chr(248) + ' mount', 1))
else:
drm.append(('Rotated Facing', 2))
elif attacker.facing == 'Rear':
if attacker.unit_type == '88LL':
drm.append(('Rotated Facing - 360' + chr(248) + ' mount', 2))
else:
drm.append(('Rotated Facing', 3))
# vehicle target is moving
if target.unit_class not in ['LW', 'MG', 'AT_GUN']:
if target.moving:
if not GetCrewByPosition('Gunner').SkillCheck('Target Tracking'):
drm.append(('Vehicle target is moving', 2))
# vehicle target size
if target.unit_class not in ['LW', 'MG', 'AT_GUN']:
target_size = target.stats['target_size']
if target_size == 'Small':
drm.append(('Small vehicle target', 1))
elif target_size == 'Large':
drm.append(('Large vehicle target', -1))
elif target_size == 'Very Large':
drm.append(('Very Large vehicle target', -2))
# target terrain, direct fire only
if attacker == tank and not area_fire:
# all AT guns are assumed to be emplaced, no moving around
if target.unit_class == 'AT_GUN' and target.terrain != 'Fortification':
drm.append(('Emplaced gun target', 2))
else:
if target.terrain == 'Woods':
drm.append(('Target in Woods', 1))
elif target.terrain == 'Building':
drm.append(('Target in Building', 2))
elif target.terrain == 'Fortification':
drm.append(('Target in Fortification', 3))
# LoS hinderance (smoke)
if attacker == tank:
smoke_factors = GetSmokeFactors(0, 0, target.map_hex.hx, target.map_hex.hy)
else:
smoke_factors = GetSmokeFactors(0, 0, attacker.map_hex.hx, attacker.map_hex.hy)
if smoke_factors > 0:
drm.append(('Smoke Factors', smoke_factors*2))
# firing through fog or falling snow
if not area_fire and (campaign.weather.fog or campaign.weather.precip == 'Snow'):
drm.append(('Fog or Falling Snow', 2))
# commander directing fire
if attacker == tank:
crew_member = GetCrewByPosition('Commander')
if crew_member.order == 'Direct Main Gun Fire':
if crew_member.hatch == 'Open':
if crew_member.SkillCheck('Fire Direction'):
mod = -3
else:
mod = -2
drm.append(('Commander Directing Fire', mod))
elif 'vision_cupola' in tank.stats:
if crew_member.SkillCheck('Fire Direction'):
mod = -2
else:
mod = -1
drm.append(('Commander Directing Fire', mod))
total_drm = 0
for (text, mod) in drm:
total_drm += mod
roll_req = base_th - total_drm
return (base_th, roll_req, drm)
# return an armour value modified to be x steps higher/lower
def GetArmourStep(base_armour, modifier):
ARMOUR_VALUES = [0,1,2,3,4,6,8,11,14,18,26]
index = ARMOUR_VALUES.index(base_armour)
new_index = index + modifier
if new_index < 0 or new_index > 10:
return base_armour
return ARMOUR_VALUES[new_index]
# calculate base to-kill number, drm, and final roll required for a hit on a vehicle
def CalcTK(attacker, target, target_facing, ammo_type, critical, area_fire, hit_location):
# determine range of attack
# different calculation depending on whether player is attacker or target
if attacker == tank:
rng = target.map_hex.rng
else:
rng = attacker.map_hex.rng
if rng == 0:
rng_text = 'Close'
elif rng == 1:
rng_text = 'Medium'
else:
rng_text = 'Long'
rng_text += ' Range'
# get armour modifier, or set unarmoured target location flag
unarmoured = False
if hit_location == 'Hull':
if target_facing in ['Rear', 'Side']:
if 'hull_side_armour' in target.stats:
armour_text = 'Hull Side'
armour_mod = target.stats['hull_side_armour']
else:
unarmoured = True
else:
if 'hull_front_armour' in target.stats:
armour_text = 'Hull Front'
armour_mod = target.stats['hull_front_armour']
else:
unarmoured = True
# turret hit
else:
if target_facing in ['Rear', 'Side']:
if 'turret_side_armour' in target.stats:
armour_text = 'Turret Side'
armour_mod = target.stats['turret_side_armour']
else:
unarmoured = True
else:
if 'turret_front_armour' in target.stats:
armour_text = 'Turret Front'
armour_mod = target.stats['turret_front_armour']
else:
unarmoured = True
# rear armour is always one step lower
if not unarmoured and target_facing == 'Rear':
if hit_location == 'Hull':
armour_text = 'Hull Rear'
else:
armour_text = 'Turret Rear'
armour_mod = GetArmourStep(armour_mod, -1)
if not unarmoured:
armour_text += ' Armour'
drm = []
##### Panzerfaust #####
if ammo_type == 'PF':
base_tk = 31
drm.append((armour_text, -armour_mod))
##### HE ammo #####
elif ammo_type == 'HE':
if attacker.stats['main_gun'] == '20L':
if unarmoured:
base_tk = 6
else:
base_tk = 3
drm.append((armour_text, -armour_mod))
elif attacker.stats['main_gun'] in ['88L', '88LL']:
if unarmoured:
base_tk = 14
else:
base_tk = 8
drm.append((armour_text, -armour_mod))
else:
if unarmoured:
base_tk = 12
else:
base_tk = 7
drm.append((armour_text, -armour_mod))
if unarmoured and critical:
base_tk = base_tk * 2
if not critical and area_fire:
if target.terrain == 'Woods':
drm.append(('Target in Woods', 1))
##### HVAP / APDS ammo #####
# unarmoured targets use AP procedure instead
elif ammo_type in ['HVAP', 'APDS'] and not unarmoured:
if attacker.stats['main_gun'] == '76L':
base_tk = 20
if campaign.player_nation == 'USA':
base_tk += 2
range_mods = [2,-2,-5]
elif attacker.stats['main_gun'] == '76LL':
base_tk = 25
range_mods = [0,0,-2]
# apply range modifier
drm.append((rng_text, range_mods[rng]))
# apply armour modifier
drm.append((armour_text, -armour_mod))
##### AP ammo #####
elif ammo_type == 'AP':
# hit location is unarmoured
if unarmoured:
if attacker.stats['main_gun'] == '20L':
base_tk = 7
elif attacker.stats['main_gun'] in ['88L', '88LL']:
base_tk = 10
else:
base_tk = 9
if critical:
base_tk = base_tk * 2
else:
# start with gun type to get base TK number, also set range modifiers
if attacker.stats['main_gun'] == '20L':
base_tk = 6
elif attacker.stats['main_gun'] == '50L':
base_tk = 13
elif attacker.stats['main_gun'] == '75':
base_tk = 14
elif attacker.stats['main_gun'] in ['75L', '76L']:
base_tk = 17
elif attacker.stats['main_gun'] == '88L':
base_tk = 20
elif attacker.stats['main_gun'] in ['75LL', '76LL']:
base_tk = 23
elif attacker.stats['main_gun'] == '88LL':
base_tk = 27
else:
print ('ERROR: Gun Type not found!')
return (2, 2, [])
# double if critical
if critical:
base_tk = base_tk * 2
# apply range modifier
if attacker.stats['main_gun'] == '20L':
range_mods = [1,-1,-3]
else:
range_mods = [0,-1,-2]
# apply range modifier
drm.append((rng_text, range_mods[rng]))
# apply armour modifier
drm.append((armour_text, -armour_mod))
# calculate roll required
total_drm = 0
for (text, mod) in drm:
total_drm += mod
roll_req = base_tk + total_drm
return (base_tk, roll_req, drm)
# calculate base to-kill number, drm, and final tk number for a player attack on the IFT
def CalcIFT(attacker, target, attack_weapon, critical, area_fire, fp=0, rng=0):
# determine base roll to get a kill result
if attack_weapon == 'MG':
# bow MG - penalty for medium range
if rng == 8 and target.map_hex.rng == 1:
fp = int(fp/2)
# infantry targets
if target.unit_class in ['AT_GUN', 'MG', 'LW']:
if fp == 1:
base_tk = 4
elif fp == 2:
base_tk = 5
elif fp == 4:
base_tk = 6
# unarmoured truck
else:
if fp == 1:
base_tk = 3
elif fp == 2:
base_tk = 4
elif fp == 4:
base_tk = 5
elif attack_weapon == '20L':
if critical:
base_tk = 5
else:
base_tk = 4
elif attack_weapon == '88L':
if critical:
base_tk = 13
else:
base_tk = 9
else:
if critical:
base_tk = 12
else:
base_tk = 8
# calculate DRM
drm = []
# if critical, subtract TEM as DRM
if critical:
# special: AT Guns automatically destroyed with a critical hit
# DRM are not displayed so don't bother listing them
if target.unit_class == 'AT_GUN':
return (base_tk, 13, drm)
if target.terrain == 'Woods':
drm.append(('Target in Woods', -1))
elif target.terrain == 'Building':
drm.append(('Target in Building', -2))
elif target.terrain == 'Fortification':
drm.append(('Target in Fortification', -3))
# if non-critical area fire hit, add TEM as positive modifier instead
elif area_fire:
# all AT guns are assumed to be emplaced, no moving around
if target.unit_class == 'AT_GUN' and target.terrain != 'Fortification':
drm.append(('Emplaced gun target', 2))
else:
if target.terrain == 'Woods':
drm.append(('Target in Woods', 1))
elif target.terrain == 'Building':
drm.append(('Target in Building', 2))
elif target.terrain == 'Fortification':
drm.append(('Target in Fortification', 3))
# target moving in open
if target.terrain == 'Open' and target.moving:
drm.append(('Target moving in open', -1))
# HE in mud or deep snow
if campaign.weather.ground in ['Mud', 'Deep Snow'] and attack_weapon != 'MG':
drm.append(('HE in Mud or Deep Snow', 1))
# MG attack modifiers
if attack_weapon == 'MG':
if attacker.moving:
drm.append(('Attacker Moving or Pivoting', 1))
# coax fired and turret was rotated
elif rng == 12 and tank.turret_facing != tank.old_t_facing:
drm.append(('Turret has been rotated', 1))
# target in fortification
if target.terrain == 'Fortification':
drm.append(('Target in fortification', 3))
# target in woods
elif target.terrain == 'Woods':
drm.append(('Target in woods', 1))
# target in building
elif target.terrain == 'Building':
drm.append(('Target in building', 2))
# attack vs. emplaced gun
elif target.unit_class == 'AT_GUN':
drm.append(('Emplaced gun target', 2))
# commander directing MG fire
crew_member = GetCrewByPosition('Commander')
if (rng == 12 and crew_member.order == 'Direct Co-ax MG Fire') or (rng == 8 and crew_member.order == 'Direct Bow MG Fire'):
if crew_member.SkillCheck('Fire Direction'):
mod = -2
else:
mod = -1
drm.append(('Commander Directing Fire', mod))
# skill check for asst driver
if rng == 8:
crew_member = GetCrewByPosition('Asst. Driver')
if crew_member.order == 'Fire Bow MG':
if crew_member.SkillCheck('Apprentice Gunner'):
drm.append(('Asst. Driver Skill', -1))
# LoS hinderance (smoke)
if attacker == tank:
smoke_factors = GetSmokeFactors(0, 0, target.map_hex.hx, target.map_hex.hy)
else:
smoke_factors = GetSmokeFactors(0, 0, attacker.map_hex.hx, attacker.map_hex.hy)
if smoke_factors > 0:
drm.append(('Smoke Factors', smoke_factors*2))
# calculate roll required
total_drm = 0
for (text, mod) in drm:
total_drm += mod
roll_req = base_tk - total_drm
return (base_tk, roll_req, drm)
##########################################################################################
# Encounter Windows and Animations #
##########################################################################################
# display information about a vehicle
def ShowVehicleTypeInfo(unit_type, console, x, y, no_image=False):
def PrintInfo(px, py, text):
libtcod.console_print_ex(console, px, py, libtcod.BKGND_NONE, libtcod.LEFT, text)
# get the right vehicle type entry
for vt in VEHICLE_TYPES:
if vt[0] == unit_type:
break
else:
print ('ERROR: Vehicle type not found: ' + unit_type)
return
stats = {}
for (k, value) in vt[1:]:
stats[k] = value
# display the info
text = stats['vehicle_type']
if 'sub_type' in stats:
text += ' (' + stats['sub_type'] + ')'
if 'nickname' in stats:
text += ' "' + stats['nickname'] + '"'
PrintInfo(x, y, text)
PrintInfo(x, y+1, stats['vehicle_class'])
libtcod.console_set_default_foreground(console, libtcod.light_grey)
PrintInfo(x, y+3, 'Main Gun:')
libtcod.console_set_default_foreground(console, libtcod.white)
if 'main_gun' in stats:
text = stats['main_gun']
if text != 'MG':
text.replace('L', '') + 'mm'
else:
text = 'None'
PrintInfo(x+10, y+3, text)
libtcod.console_set_default_foreground(console, libtcod.light_grey)
PrintInfo(x, y+5, 'Armour: Front Side')
PrintInfo(x, y+6, 'Turret')
PrintInfo(x, y+7, 'Hull')
libtcod.console_set_default_foreground(console, libtcod.white)
if 'turret_front_armour' in stats:
text = str(stats['turret_front_armour'])
else:
text = '-'
PrintInfo(x+11, y+6, text)
if 'turret_side_armour' in stats:
text = str(stats['turret_side_armour'])
else:
text = '-'
PrintInfo(x+17, y+6, text)
if 'hull_front_armour' in stats:
text = str(stats['hull_front_armour'])
else:
text = '-'
PrintInfo(x+11, y+7, text)
if 'hull_side_armour' in stats:
text = str(stats['hull_side_armour'])
else:
text = '-'
PrintInfo(x+17, y+7, text)
if 'loader_hatch' in stats:
libtcod.console_set_default_foreground(console, libtcod.light_grey)
PrintInfo(x, y+9, 'Loader Hatch:')
libtcod.console_set_default_foreground(console, libtcod.white)
PrintInfo(x+14, y+9, stats['loader_hatch'])
ys = 1
if 'vision_cupola' in stats:
PrintInfo(x, y+9+ys, 'Vision Cupola')
ys += 1
if 'smoke_mortar' in stats:
PrintInfo(x, y+9+ys, 'Smoke Mortar')
ys += 1
if 'wet_stowage' in stats:
PrintInfo(x, y+9+ys, 'Wet Stowage')
ys += 1
if 'HVSS' in stats:
PrintInfo(x, y+9+ys, 'Possible HVSS')
ys += 1
# show vehicle info text if any
if 'info_text' in stats:
ys += 2
lines = wrap(stats['info_text'], 34, subsequent_indent = ' ')
for line in lines:
PrintInfo(x, y+9+ys, line)
ys += 1
# skip drawing overhead image of tank
if no_image: return
# show vehicle overhead image if any
if 'overhead_view' in stats:
temp_console = LoadXP(stats['overhead_view'])
libtcod.console_blit(temp_console, 0, 0, 0, 0, console, x+46, y)
# show info about an enemy unit
def UnitInfo(mx, my):
# make sure mouse cursor is over map window
if mx < MAP_CON_X or my > MAP_CON_HEIGHT: return
# adjust for offset
mx -= MAP_CON_X
my -= 2
# see if there is a unit here
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.x == mx and unit.y == my:
# if unit needs to be unidentified and isn't, don't display info for it
if unit.unit_class in ['TANK', 'SPG', 'AT_GUN'] and not unit.identified:
return
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
# generate display
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 2,
libtcod.BKGND_NONE, libtcod.CENTER, 'Unit Info')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 4,
libtcod.BKGND_NONE, libtcod.CENTER, unit.GetDesc())
# grab description from UNIT_INFO if not vehicle
if unit.unit_class in ['LW', 'MG', 'AT_GUN']:
text = UNIT_INFO[unit.unit_type]
lines = wrap(text, 56, subsequent_indent = ' ')
y = 6
for line in lines:
libtcod.console_print(menu_con, MENU_CON_XM-28, y, line)
y += 1
else:
# show vehicle info
ShowVehicleTypeInfo(unit.unit_type, menu_con, 58, 7)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-4,
libtcod.BKGND_NONE, libtcod.CENTER, 'Press ESC to exit')
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
WaitForEscape()
return
# show a pop-up window describing an attack or a to kill roll and its results
def DisplayRoll(roll_action, tk_roll=False):
# display the menu as it is being drawn, pausing for animation effect
def UpdateMenu(wait_time):
if not campaign.animations: return
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
Wait(wait_time)
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
# window title
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
if tk_roll:
text = 'To Kill Roll'
else:
text = 'To Hit Roll'
libtcod.console_print(menu_con, MENU_CON_XM, 2, text)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# attack/hit description
if tk_roll:
text = roll_action.target + ' hit by ' + roll_action.attack_type + ' fired from ' + roll_action.attacker
else:
text = roll_action.attacker + ' firing ' + roll_action.attack_type + ' at ' + roll_action.target
# text might be long, so split up into lines
lines = wrap(text, 60)
libtcod.console_print(menu_con, MENU_CON_XM, 4, lines[0])
if len(lines) > 1:
libtcod.console_print(menu_con, MENU_CON_XM, 5, lines[1])
# hit location if any
if roll_action.hit_location is not '':
libtcod.console_print(menu_con, MENU_CON_XM, 6, 'Target hit in ' + roll_action.hit_location)
# display roll required
if tk_roll:
# check auto_ko
if roll_action.auto_ko:
text = 'Target automatically destroyed!'
# check no chance
elif roll_action.nc:
text = 'No chance to kill target with this attack'
else:
text = 'Base to kill number: ' + str(roll_action.score_req)
else:
text = roll_action.rng + ' range, base to hit number: ' + str(roll_action.score_req)
libtcod.console_print(menu_con, MENU_CON_XM, 7, text)
# display DRM and required roll to hit if any
if not roll_action.auto_ko and not roll_action.nc:
libtcod.console_print_frame(menu_con, 38, 9, 63, 16,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_print(menu_con, MENU_CON_XM, 10, 'Dice Roll Modifiers')
y = 12
if len(roll_action.drm) == 0:
libtcod.console_print(menu_con, MENU_CON_XM, y, 'None')
else:
for (text, num) in roll_action.drm:
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_print(menu_con, 40, y, text)
libtcod.console_set_alignment(menu_con, libtcod.RIGHT)
text = str(num)
if num > 0:
text = '+' + text
libtcod.console_print(menu_con, 94, y, text)
UpdateMenu(200)
y += 1
# display total modifier
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_print(menu_con, 40, 23, 'Total:')
libtcod.console_set_alignment(menu_con, libtcod.RIGHT)
text = str(roll_action.total_drm)
if roll_action.total_drm > 0:
text = '+' + text
libtcod.console_print(menu_con, 94, 23, text)
UpdateMenu(400)
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
if tk_roll:
libtcod.console_print(menu_con, MENU_CON_XM, 26, 'Required to kill: Less than ' + str(roll_action.roll_req))
libtcod.console_print(menu_con, MENU_CON_XM, 28, 'To Kill roll (2D6):')
else:
libtcod.console_print(menu_con, MENU_CON_XM, 26, 'Required to hit: ' + str(roll_action.roll_req) + ' or less')
libtcod.console_print(menu_con, MENU_CON_XM, 28, 'To Hit roll (2D6):')
UpdateMenu(900)
# dice roll animation and sound
skip_roll = False
# skip roll if auto kill or no chance to kill, unless weapon was MG and roll
# was a malfunction
if roll_action.auto_ko or roll_action.nc:
skip_roll = True
if 'MG' in roll_action.attack_type and roll_action.roll == 12:
skip_roll = False
if not skip_roll:
for n in range(20):
libtcod.console_print(menu_con, MENU_CON_XM, 29, ' ')
text = str(libtcod.random_get_int(0, 1, 6)) + '+' + str(libtcod.random_get_int(0, 1, 6))
libtcod.console_print(menu_con, MENU_CON_XM, 29, text)
PlaySound('dice_roll')
UpdateMenu(45)
# display real to hit roll result
libtcod.console_print(menu_con, MENU_CON_XM, 29, ' + ')
text = str(roll_action.d1) + '+' + str(roll_action.d2)
libtcod.console_print(menu_con, MENU_CON_XM, 29, text)
libtcod.console_print(menu_con, MENU_CON_XM, 30, 'Total: ' + str(roll_action.roll))
libtcod.console_print(menu_con, MENU_CON_XM, 32, roll_action.result)
# display RoF result if any
if roll_action.rof_result is not '':
libtcod.console_print(menu_con, MENU_CON_XM, 34, roll_action.rof_result)
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2, 'Press Enter to continue')
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
WaitForEnter()
# work out the fate of a crew of a destroyed sherman that didn't explode
def ResolveCrewFate(hit_location, sector, pf, abandoned=False):
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
# use the menu console
libtcod.console_clear(menu_con)
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print(menu_con, MENU_CON_XM, 1, 'Tank Knocked Out')
libtcod.console_print(menu_con, MENU_CON_XM, 2, 'Crew Wounds and Bail-Out Results')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
# column titles
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print(menu_con, 26, 11, 'Crewman')
libtcod.console_print(menu_con, 52, 11, 'Initial Wound')
libtcod.console_print(menu_con, 74, 11, 'Bail Out')
libtcod.console_print(menu_con, 92, 11, 'Final Wound')
libtcod.console_hline(menu_con, 25, 12, 97, flag=libtcod.BKGND_DEFAULT)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# list crew, highlighting every other row
y = 13
for crewman in tank.crew:
text = crewman.GetRank(short=True) + ' ' + crewman.name
libtcod.console_print(menu_con, 26, y, text)
if crewman.nickname != '':
libtcod.console_print(menu_con, 27, y+1, '"' + crewman.nickname + '"')
# status
if crewman.NoActions():
libtcod.console_set_default_foreground(menu_con, libtcod.light_red)
if not crewman.alive:
text = 'Dead'
elif crewman.unconscious:
text = 'Unconscious'
else:
text = 'Stunned'
libtcod.console_print(menu_con, 27, y+2, text)
# wounds
if crewman.v_serious_wound:
text = 'Very Serious Wound'
elif crewman.serious_wound:
text = 'Serious Wound'
elif crewman.light_wound:
text = 'Light Wound'
else:
text = ''
libtcod.console_set_default_foreground(menu_con, libtcod.red)
libtcod.console_print(menu_con, 27, y+3, text)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
if IsOdd(y):
libtcod.console_set_default_background(menu_con, ROW_COLOR)
libtcod.console_rect(menu_con, 25, y, 97, 3, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(menu_con, libtcod.black)
y += 3
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cEnter%c] to proceed'%HIGHLIGHT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
WaitForEnter()
# clear press enter display
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2,
libtcod.BKGND_NONE, libtcod.CENTER, ' ')
# Initial wound rolls
# only possibility of an initial wound if tank was not abandoned
if not abandoned:
y = 13
for crewman in tank.crew:
if crewman.alive:
text = crewman.TakeWound(hit_location, sector)
if text is not None:
libtcod.console_set_default_foreground(menu_con, libtcod.red)
# split long strings
lines = wrap(text, 18)
n = 0
for line in lines:
libtcod.console_print(menu_con, 52, y+n, line)
n+=1
libtcod.console_set_default_foreground(menu_con, libtcod.white)
else:
libtcod.console_print(menu_con, 52, y, 'Not wounded.')
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
Wait(300)
y += 3
Wait(300)
# Surviving and conscious crewmen roll to bail out
y = 13
for crewman in tank.crew:
if crewman.alive:
text = crewman.BailOut()
if text == 'Passed':
WriteJournal(crewman.name + ' bailed out successfully.')
else:
WriteJournal(crewman.name + ' failed to bail out!')
if text != 'Passed':
libtcod.console_set_default_foreground(menu_con, libtcod.red)
libtcod.console_print(menu_con, 74, y, text)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
Wait(300)
y += 3
# Crewmen who bailed out roll again for wound
crewman_inside = False
y = 13
for crewman in tank.crew:
if crewman.bailed_out:
text = crewman.TakeWound(None, None)
if text is not None:
libtcod.console_set_default_foreground(menu_con, libtcod.red)
# split long strings
lines = wrap(text, 24)
n = 0
for line in lines:
libtcod.console_print(menu_con, 92, y+n, line)
n+=1
libtcod.console_set_default_foreground(menu_con, libtcod.white)
else:
libtcod.console_print(menu_con, 92, y, 'Not wounded.')
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
Wait(300)
elif crewman.alive:
# set flag - might be killed if tank burns up
crewman_inside = True
y += 3
# brew up roll
if not abandoned:
roll = Roll1D100()
if 'wet_stowage' in tank.stats:
target_score = 15
elif 'M4A1' in tank.stats['vehicle_type']:
target_score = 75
elif 'M4A3' in tank.stats['vehicle_type']:
target_score = 70
else:
target_score = 80
if pf: target_score += 5
if roll <= target_score and not crewman_inside:
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-7,
libtcod.BKGND_NONE, libtcod.CENTER, 'Your tank burns up, but ' +
'luckily everyone managed to bail out.')
else:
if roll <= target_score:
text = 'Your tank burns up, killing anyone trapped inside.'
result_text = 'Burns to death'
else:
text = 'Luckily your tank does not burn up. Any surviving crewmen still inside are rescued.'
result_text = 'Rescued'
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-7,
libtcod.BKGND_NONE, libtcod.CENTER, text)
y = 13
for crewman in tank.crew:
if not crewman.bailed_out and crewman.alive:
# apply result
if roll <= target_score:
crewman.alive = False
libtcod.console_set_default_foreground(menu_con, libtcod.red)
else:
crewman.bailed_out = True
# display text description of result
libtcod.console_print(menu_con, 92, y, result_text)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
Wait(300)
y += 3
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-5, 'Your combat is over')
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-3, '[%cEnter%c] to continue'%HIGHLIGHT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
WaitForEnter()
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
# check to see if we're ending the campaign because commander is dead or sent home
if CheckCommander():
campaign.exiting = True
##########################################################################################
# Encounter Actions and Phases #
##########################################################################################
# deplete smoke factors on map
def DepleteSmoke():
for smoke_factor in battle.smoke_factors:
# rain depletes smoke faster
if campaign.weather.precip == 'Rain':
smoke_factor.num_factors -= 0.75
else:
smoke_factor.num_factors -= 0.5
for smoke_factor in battle.smoke_factors:
if smoke_factor.num_factors <= 0.0:
battle.smoke_factors.remove(smoke_factor)
# do spotting and identification checks for each crewmember that is able to do so
def DoSpotting():
# flag if anything results from spotting attempts
spot_result = False
for crewman in tank.crew:
# skip if this crew member cannot spot
if crewman.spot == 'None': continue
sectors = []
# determine sectors in which crew member can spot
if crewman.spot == 'All':
sectors = [0, 1, 2, 3, 4, 5]
elif crewman.spot == 'Any One Sector':
sectors = [crewman.spot_sector]
elif crewman.spot == 'Turret Front':
# set according to turret facing
sectors = [tank.turret_facing]
elif crewman.spot == 'All Except Rear':
sectors = [0, 2, 3, 4, 5]
elif crewman.spot == 'Tank Front':
sectors = [4]
# test to spot and/or identify each enemy unit
for unit in battle.enemy_units:
# skip if unit not alive
if not unit.alive: continue
# skip if unit is outside of spotting area for this crew member
if unit.map_hex.sector not in sectors: continue
# skip if unit is already hidden
if unit.hidden: continue
# skip if unit is spotted and identified
if unit.spotted and unit.identified: continue
# skip if unit is spotted and doesn't need to be identified
if unit.spotted and unit.unit_class not in ['TANK', 'SPG', 'AT_GUN']: continue
# skip if foggy or falling snow and target is at medium and long range
if unit.map_hex.rng > 0 and (campaign.weather.fog or campaign.weather.precip == 'Snow'):
continue
# do roll
d1, d2, roll = Roll2D6()
mod_roll = roll
# apply modifiers
if crewman.hatch in ['Shut', 'None']:
mod_roll += 2
else:
# Eagle Eyed Skill Test
if crewman.SkillCheck('Eagle Eyed'):
mod_roll -= 2
if tank.moving:
mod_roll += 2
if unit.terrain in ['Woods', 'Building', 'Fortification', 'Hull Down']:
mod_roll += 2
if unit.unit_class not in ['LW', 'MG', 'AT_GUN']:
if unit.stats['target_size'] == 'Small':
mod_roll += 1
elif unit.stats['target_size'] == 'Large':
mod_roll -= 1
elif unit.stats['target_size'] == 'Very Large':
mod_roll -= 2
if unit.map_hex.rng == 1:
mod_roll -= 1
elif unit.map_hex.rng == 0:
mod_roll -= 2
if unit.fired:
mod_roll -= 2
if unit.moving:
mod_roll -= 2
if unit.spotted_lr:
mod_roll -= 1
mod_roll += (GetSmokeFactors(0, 0, unit.map_hex.hx, unit.map_hex.hy) * 2)
# natural 12, or modified roll is 12 or more
if roll == 12 or mod_roll >= 12:
# target becomes hidden if not already spotted
if unit.spotted or unit.hidden: continue
text = unit.GetDesc() + ' is now hidden.'
unit.hidden = True
ShowLabel(unit.x+MAP_CON_X, unit.y+MAP_CON_Y, text)
spot_result = True
# natural 2, or modified roll is equal to or less than 4
elif roll == 2 or mod_roll <= 4:
# target is spotted and, if required, identified
unit.spotted = True
if unit.unit_class in ['TANK', 'SPG', 'AT_GUN']:
unit.identified = True
text = unit.GetDesc() + ' spotted!'
ShowLabel(unit.x+MAP_CON_X, unit.y+MAP_CON_Y, text, crewman=crewman)
spot_result = True
# modified result equal to or less than score required
elif mod_roll <= 8:
# mark that unit was spotted this round
unit.spotted_tr = True
# if already spotted, no additional benefit
if unit.spotted: continue
unit.spotted = True
text = unit.GetDesc() + ' spotted!'
ShowLabel(unit.x+MAP_CON_X, unit.y+MAP_CON_Y, text, crewman=crewman)
spot_result = True
UpdateMapOverlay()
RenderEncounter()
if not spot_result:
Message('No results from spotting attempts.')
# clear old flags and set new flags
for unit in battle.enemy_units:
unit.spotted_lr = False
unit.fired = False
if unit.spotted_tr:
unit.spotted_tr = False
unit.spotted_lr = True
tank.fired_main_gun = False
# redraw units on map to reflect new spotting status
UpdateMapOverlay()
# try to set up firing MG(s)
def SetupFireMGs():
# set MG flags
if GetCrewByPosition('Commander').order == 'Fire AA MG' or GetCrewByPosition('Loader').order == 'Fire AA MG':
tank.aa_mg_can_fire = True
if GetCrewByPosition('Gunner').order == 'Fire Co-Axial MG':
tank.coax_mg_can_fire = True
if 'no_asst_driver' not in tank.stats:
if GetCrewByPosition('Asst. Driver').order == 'Fire Bow MG' and not tank.hull_down:
tank.bow_mg_can_fire = True
# activate first available MG
tank.SelectFirstMG()
if tank.active_mg == -1: return False
# set phase now so SelectNextTarget knows what to do
NewPhase('Fire MGs')
# get first target
SelectNextTarget()
return True
# fire an MG
def FireMG():
if battle.target is None: return
# set MG type used and play sound effect
if tank.active_mg == 0:
choice = 'Co-ax'
PlaySound('coax_mg_firing')
elif tank.active_mg == 1:
choice = 'Bow'
PlaySound('bow_mg_firing')
elif tank.active_mg == 2:
choice = 'AA'
PlaySound('aa_mg_firing')
else:
return
# display firing animation
MGAnimation(MAP_X0 + MAP_CON_X, MAP_Y0 + MAP_CON_Y, battle.target.x + MAP_CON_X,
battle.target.y + MAP_CON_Y)
# now there's only one MG selected, so fire it at the target!
# mark that this MG has fired, also record mg firepower and normal range
if choice == 'Co-ax':
tank.coax_mg_can_fire = False
mg_fp = tank.stats['co_ax_mg']
mg_rng = 12
elif choice == 'Bow':
tank.bow_mg_can_fire = False
mg_fp = tank.stats['bow_mg']
mg_rng = 8
else:
tank.aa_mg_can_fire = False
mg_fp = tank.stats['aa_mg']
mg_rng = 8
# select the next MG that can fire
tank.SelectFirstMG()
# calculate modifiers and final DR required
(base_tk, roll_req, drm) = CalcIFT(tank, battle.target, 'MG', False, False, fp=mg_fp, rng=mg_rng)
# create roll action object to hold details about the action
roll_action = RollAction()
# input details
roll_action.attacker_unit_type = tank.unit_type
roll_action.attacker = tank.stats['vehicle_type'] + ' "' + tank.name + '"'
roll_action.attack_type = choice + ' MG'
# mark if target is unspotted or needs to be identified
if not battle.target.spotted or (battle.target.unit_class in ['TANK', 'SPG', 'AT_GUN'] and not battle.target.identified):
roll_action.target_unidentified = True
roll_action.target_unit_type = battle.target.unit_type
roll_action.target = battle.target.GetDesc()
if battle.target.map_hex.rng == 0:
roll_action.rng = 'Close'
elif battle.target.map_hex.rng == 1:
roll_action.rng = 'Medium'
else:
roll_action.rng = 'Long'
roll_action.score_req = base_tk
roll_action.drm = drm
roll_action.CalculateTotalDRM()
roll_action.roll_req = roll_req
# record if KO is impossible
if roll_req <= 2: roll_action.nc = True
##### To-Kill Roll #####
d1, d2, roll = Roll2D6()
roll_action.d1 = d1
roll_action.d2 = d2
roll_action.roll = roll
# malfunction
if d1 == 6 and d2 == 6:
roll_action.result = 'The MG has malfunctioned!'
# Target Destroyed
elif roll < roll_req:
roll_action.result = battle.target.GetDesc() + ' is destroyed!'
WriteJournal(battle.target.GetDesc() + ' was destroyed by MG fire from ' + tank.name)
battle.target.RecordKO()
battle.target.alive = False
# Infantry are automatically Pinned
elif roll == roll_req or roll == roll_req + 1:
if battle.target.unit_class in ['LW', 'MG', 'AT_GUN']:
battle.target.PinTest(auto=True)
if not battle.target.alive:
roll_action.result = battle.target.GetDesc() + ' is Broken and destroyed!'
WriteJournal(battle.target.GetDesc() + ' was broken and destroyed by MG fire from ' + tank.name)
else:
roll_action.result = battle.target.GetDesc() + ' is Pinned.'
# no effect on vehicles
else:
roll_action.result = battle.target.GetDesc() + ' is unharmed.'
# No effect
else:
roll_action.result = battle.target.GetDesc() + ' is unharmed.'
# display results to player then delete roll object
DisplayRoll(roll_action, tk_roll=True)
del roll_action
# apply effects of MG malfunction if any
if d1 == 6 and d2 == 6:
if choice == 'Co-ax':
tank.TakeDamage(damage_type='Co-ax MG Malfunction')
elif choice == 'Bow':
tank.TakeDamage(damage_type='Bow MG Malfunction')
else:
tank.TakeDamage(damage_type='AA MG Malfunction')
# try to select another target
SelectNextTarget()
UpdateMapOverlay()
RenderEncounter()
# attempt to repair a tank malfunction
# if post encounter, has different requirements and possible effects
def AttemptRepairs(post_encounter=False):
# skip if tank was destroyed or severely damaged
if not tank.alive or tank.swiss_cheese: return
# step through in reverse so we can pop out old entries and add new ones
for damage in reversed(tank.damage_list):
# if broken periscope, replace if after combat, otherwise can't fix
if 'Periscope Broken' in damage:
if post_encounter:
PopUp('The ' + damage + ' has been replaced!')
tank.damage_list.remove(damage)
continue
damage_type = GetDamageType(damage)
# not found; can't be repaired
if damage_type is None: continue
# no repair allowed
if damage_type.repair_score == 0: continue
# see if we can attempt this repair now
active_crewman = None
repair_ok = False
if (damage_type.auto_repair and post_encounter):
repair_ok = True
else:
for crewman in tank.crew:
if crewman.order == damage_type.order:
repair_ok = True
active_crewman = crewman
break
if not repair_ok: continue
# do repair roll
d1, d2, roll = Roll2D6()
# check for skill activation
if not post_encounter:
if active_crewman.SkillCheck('Mechanic'):
roll -= 1
# bonus automatically applied if repair attempt is after an encounter
if not post_encounter:
if GetCrewByPosition('Gunner').order == 'Help Repair':
roll -= 1
else:
roll -= 1
if (d1 == 6 and d2 == 2 and damage_type.break_score != 0) or roll >= damage_type.break_score:
PopUp('The repair attempt failed! ' + damage_type.break_result + '.')
tank.damage_list.remove(damage)
tank.damage_list.append(damage_type.break_result)
elif roll > damage_type.repair_score and not (damage_type.auto_repair and post_encounter):
PopUp('The ' + damage_type.name + ' repair was not successful.')
else:
PopUp('The ' + damage_type.name + ' has been repaired!')
tank.damage_list.remove(damage)
if battle is not None: UpdateTankCon()
# try to set up main gun to fire
# returns True if target selection can proceed, False if not
def SetupMainGun():
# check for malfunctioning, broken main gun or gun sight
if 'Main Gun Malfunction' in tank.damage_list or 'Main Gun Broken' in tank.damage_list:
return False
if 'Gun Sight Broken' in tank.damage_list:
return False
# if gunner is out of action or not ordered to fire, return
if GetCrewByPosition('Gunner').order != 'Fire Main Gun':
return False
# if tank is moving, cannot fire main gun unless gunner has gyrostabilizer training
if tank.moving:
gyro = False
for skill in GetCrewByPosition('Gunner').skills:
if skill.name == 'Gyrostabilizer':
gyro = True
break
if not gyro:
PopUp('Tank is moving and cannot fire main gun.')
return False
# need to have a shell in the gun
if tank.ammo_load == 'None':
PopUp('No shell loaded - order Loader to Change Gun Load.')
return False
# if WP or HCBI loaded, switch to Area Fire automatically
if tank.ammo_load in ['WP', 'HCBI']:
battle.area_fire = True
# if AP loaded, switch to direct fire automatically
elif tank.ammo_load in ['AP', 'HVAP', 'APDS']:
battle.area_fire = False
# set this now so SelectNextTarget knows how to select the inital target
NewPhase('Fire Main Gun')
# grab first possible target
SelectNextTarget()
return True
# selects the next valid target for active weapon
def SelectNextTarget():
# build a list of valid targets
# build list differently based on whether main gun or MGs are firing
targets = []
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.hidden or not unit.spotted: continue
# skip if target is beyond close range and it's foggy or falling snow
if unit.map_hex.rng > 0:
if campaign.weather.fog or campaign.weather.precip == 'Snow':
continue
# main gun target
if battle.phase == 'Fire Main Gun':
if tank.turret_facing != unit.map_hex.sector:
continue
targets.append(unit) # target is good
# MG target
elif battle.phase == 'Fire MGs':
# must not be in long range
if unit.map_hex.rng == 2: continue
# must be unarmoured target
if unit.unit_class not in ['AT_GUN', 'MG', 'LW', 'TRUCK']: continue
# try to add target based on active MG
# coax
if tank.active_mg == 0:
if tank.turret_facing != unit.map_hex.sector: continue
targets.append(unit)
# bow
elif tank.active_mg == 1:
if unit.map_hex.sector != 4: continue
if tank.hull_down: continue
targets.append(unit)
# AA
elif tank.active_mg == 2:
targets.append(unit)
# if no targets possible, return
if len(targets) == 0:
battle.target = None
return
# if no target is already set, then return the first in the list
if battle.target is None:
battle.target = targets[0]
return
# otherwise, skip to the already selected target
for target in targets:
if target == battle.target: break
# if there's a next one, return that, otherwise return the first one in the list
index_num = targets.index(target)
if index_num < len(targets) - 1:
battle.target = targets[index_num + 1]
else:
battle.target = targets[0]
# fire the player tank's main gun at the selected target
def FireMainGun():
if battle.target is None: return
# random callout
callout_roll = Roll1D10()
if callout_roll == 1:
ShowLabel(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y, 'Firing!',
GetCrewByPosition('Gunner'))
if callout_roll == 2:
ShowLabel(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y, 'On the way!',
GetCrewByPosition('Gunner'))
# play firing sound
soundfile = GetFiringSound(tank.stats['main_gun'])
if soundfile is not None:
PlaySound(soundfile)
# do firing animation
MainGunAnimation(MAP_X0 + MAP_CON_X, MAP_Y0 + MAP_CON_Y, battle.target.x + MAP_CON_X,
battle.target.y + MAP_CON_Y)
# calculate modifiers and final DR required
(base_th, roll_req, drm) = CalcTH(tank, battle.target, battle.area_fire, tank.ammo_load)
# set the tank fired main gun flag
tank.fired_main_gun = True
# lose any other acquired target
for unit in battle.enemy_units:
if unit == battle.target: continue
unit.acquired = 0
# create roll action to hold details about the action
roll_action = RollAction()
# input details
roll_action.attacker_unit_type = tank.unit_type
roll_action.attacker = tank.stats['vehicle_type'] + ' "' + tank.name + '"'
# record description of main gun and ammo used
roll_action.attack_type = tank.stats['main_gun'].replace('L', '') + 'mm ' + tank.ammo_load
# mark if target is unspotted or needs to be identified
if not battle.target.spotted or (battle.target.unit_class in ['TANK', 'SPG', 'AT_GUN'] and not battle.target.identified):
roll_action.target_unidentified = True
roll_action.target_unit_type = battle.target.unit_type
roll_action.target = battle.target.GetDesc()
if battle.target.map_hex.rng == 0:
roll_action.rng = 'Close'
elif battle.target.map_hex.rng == 1:
roll_action.rng = 'Medium'
else:
roll_action.rng = 'Long'
roll_action.score_req = base_th
roll_action.drm = drm
roll_action.CalculateTotalDRM()
roll_action.roll_req = roll_req
##### To-hit Roll #####
d1, d2, roll = Roll2D6()
roll_action.d1 = d1
roll_action.d2 = d2
roll_action.roll = roll
# flag to record what kind of animation / effect to show later
hit_result = None
stop_firing = False
# handle smoke attacks differently
if tank.ammo_load in ['WP', 'HCBI']:
# double sixes: main gun malfunction
if roll == 12:
roll_action.result = 'Main Gun Malfunction!'
tank.damage_list.append('Main Gun Malfunction')
elif roll <= roll_req:
roll_action.result = 'Shot hit target area!'
hit_result = 'smoke_hit'
# add smoke factors
if tank.ammo_load == 'WP':
n = 1.0
else:
n = 2.0
PlaceSmoke(battle.target.map_hex, n)
PaintMapCon()
# if WP, record hit on target for pin check
if tank.ammo_load == 'WP':
battle.target.hit_record.append(MainGunHit(tank.stats['main_gun'],
tank.ammo_load, False, battle.area_fire))
else:
roll_action.result = 'Shot missed target area!'
hit_result = 'miss'
else:
# tell target it's been fired at
battle.target.shot_at = True
# record hits to apply after final shot
# check for Knows Weak Spots skill activation
weak_spot = False
if roll == 3:
crew_member = GetCrewByPosition('Gunner')
if crew_member.SkillCheck('Knows Weak Spots'):
weak_spot = True
# critical hit, automatically hits
# if original to-hit roll was 2+
if roll_action.roll_req >= 2 and (roll == 2 or weak_spot):
roll_action.result = 'Critical Hit!'
stop_firing = True
battle.target.hit_record.append(MainGunHit(tank.stats['main_gun'],
tank.ammo_load, True, battle.area_fire))
# double sixes: main gun malfunction
elif roll == 12:
roll_action.result = 'Automatic Miss, Main Gun Malfunction!'
tank.damage_list.append('Main Gun Malfunction')
elif roll <= roll_req:
roll_action.result = 'Shot hit!'
battle.target.hit_record.append(MainGunHit(tank.stats['main_gun'],
tank.ammo_load, False, battle.area_fire))
if tank.ammo_load == 'HE':
hit_result = 'he_hit'
else:
hit_result = 'ap_hit'
else:
roll_action.result = 'Shot missed.'
hit_result = 'miss'
# clear the fired shell, record last shell type
old_load = tank.ammo_load
tank.ammo_load = 'None'
# if main gun malfunctioned, cannot reload or maintain RoF
if 'Main Gun Malfunction' in tank.damage_list:
roll_action.rof_result = 'Cannot reload or maintain RoF'
UpdateTankCon()
# go to next phase
battle.trigger_phase = True
else:
# try to load a new shell into the main gun if loader is on correct order
crew_member = GetCrewByPosition('Loader')
if crew_member.order in ['Reload', 'Change Gun Load']:
# check for possibility of Shell Juggler skill activation first
skill_used = False
if tank.use_rr and tank.general_ammo[tank.ammo_reload] > 0:
if crew_member.SkillCheck('Shell Juggler'):
tank.ammo_load = tank.ammo_reload
tank.general_ammo[tank.ammo_reload] -= 1
UpdateTankCon()
skill_used = True
if not skill_used:
if tank.use_rr:
if tank.rr_ammo[tank.ammo_reload] > 0:
tank.ammo_load = tank.ammo_reload
tank.rr_ammo[tank.ammo_reload] -= 1
UpdateTankCon()
else:
# wasn't able to use rr
tank.use_rr = False
if not tank.use_rr:
if tank.general_ammo[tank.ammo_reload] > 0:
tank.ammo_load = tank.ammo_reload
tank.general_ammo[tank.ammo_reload] -= 1
UpdateTankCon()
# if changed gun load, cannot maintain RoF
if GetCrewByPosition('Loader').order == 'Change Gun Load':
roll_action.rof_result = 'Loader changed gun load this round, cannot maintain RoF'
battle.trigger_phase = True
# if no shell is loaded, cannot maintain RoF
elif tank.ammo_load == 'None':
if crew_member.order != 'Reload':
roll_action.rof_result = 'No shell in main gun, cannot maintain RoF'
else:
roll_action.rof_result = 'No shells of reload type available, cannot reload!'
battle.trigger_phase = True
else:
# determine if RoF is maintained
# switch fire mode if required for reloaded shell type
if battle.area_fire and tank.ammo_load in ['AP', 'HVAP', 'APDS']:
battle.area_fire = False
UpdateTankCon()
elif not battle.area_fire and tank.ammo_load in ['WP', 'HCBI']:
battle.area_fire = True
UpdateTankCon()
# see if RoF is maintained: use unmodified to-hit roll plus new modifiers
# only best modifier will apply in case of skill mods
skill_mod = 0
# Ready Rack use or Asst. Driver passing ammo
if tank.use_rr:
roll -= 2
else:
if 'no_asst_driver' not in tank.stats:
crew_member = GetCrewByPosition('Asst. Driver')
if crew_member.order == 'Pass Ammo':
mod = -1
if crew_member.SkillCheck('Shell Tosser'):
skill_mod = -2
mod = 0
roll += mod
crew_member = GetCrewByPosition('Gunner')
if crew_member.SkillCheck('Quick Trigger'):
if skill_mod > -1: skill_mod = -1
crew_member = GetCrewByPosition('Loader')
if crew_member.SkillCheck('Fast Hands'):
if skill_mod > -1: skill_mod = -1
roll += skill_mod
if roll <= tank.stats['rof_num'] and not stop_firing:
roll_action.rof_result = 'RoF maintained!'
tank.has_rof = True
else:
roll_action.rof_result = "RoF wasn't maintained, end of main gun fire."
# go to hit resolution
battle.trigger_phase = True
# show all this to the player!
DisplayRoll(roll_action)
# reset turret facing since can't rotate again
tank.old_t_facing = tank.turret_facing
# show shot result animation and/or sound
RenderEncounter()
if hit_result is not None:
HitAnimation(hit_result)
# redraw the map overlay
UpdateMapOverlay()
# delete the roll action object
del roll_action
# call out new ammo load if any
if tank.ammo_load != 'None' and old_load != tank.ammo_load:
ShowLabel(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y,
tank.ammo_load + ' up!', GetCrewByPosition('Loader'))
RenderEncounter()
##########################################################################################
# Encounter Animations #
##########################################################################################
# display an animation of a main gun firing from x1,y1 to x2,y2
# displayed on root console
def MainGunAnimation(x1, y1, x2, y2):
if not campaign.animations: return
# get the line to display the animation
line = GetLine(x1, y1, x2, y2)
# don't animate if too short
if len(line) < 3: return
for (x, y) in line[2:]:
# record the foreground color and character of the cell
col = libtcod.console_get_char_foreground(0, x, y)
char = libtcod.console_get_char(0, x, y)
# now set to white, main gun round character
libtcod.console_set_char_foreground(0, x, y, libtcod.white)
libtcod.console_set_char(0, x, y, libtcod.CHAR_BULLET)
# refresh screen and wait
libtcod.console_flush()
Wait(70)
# reset character
libtcod.console_set_char_foreground(0, x, y, col)
libtcod.console_set_char(0, x, y, char)
libtcod.console_flush()
# display an animation of MG fire
def MGAnimation(x1, y1, x2, y2):
if not campaign.animations: return
# get the line to display the animation
line = GetLine(x1, y1, x2, y2)
# don't animate if too short
if len(line) < 3: return
# erase the los line and render the screen
UpdateMapOverlay(skip_los=True)
RenderEncounter()
for n in range(20):
# pick a random point along the line
(x, y) = random.choice(line[2:-1])
# record the original foreground color and character of the cell
col = libtcod.console_get_char_foreground(0, x, y)
char = libtcod.console_get_char(0, x, y)
# pick random display color
c = libtcod.random_get_int(0, 0, 30)
libtcod.console_set_char_foreground(0, x, y, libtcod.Color(220, 145+c, 30))
# set character to mg bullet
libtcod.console_set_char(0, x, y, 249)
# refresh screen and wait
libtcod.console_flush()
Wait(70)
# reset character
libtcod.console_set_char_foreground(0, x, y, col)
libtcod.console_set_char(0, x, y, char)
# reset los display and re-render screen
UpdateMapOverlay()
RenderEncounter()
# display an animation of an artilery or air strike on an area centered on x,y
# displayed on root console
def ArtyStrikeAnimation(x, y):
if not campaign.animations: return
# wait for latter part of sound effect
if campaign.sounds:
Wait(400)
for n in range(10):
x1 = x + libtcod.random_get_int(0, -7, 7)
y1 = y + libtcod.random_get_int(0, -4, 4)
# skip if off map
if x1 < C_MAP_CON_X or x1 >= SCREEN_WIDTH - 1 or y1 < 4 or y1 >= SCREEN_HEIGHT:
continue
# cycle through animation characters, ending with grey smoke
libtcod.console_set_char_foreground(0, x1, y1, libtcod.red)
libtcod.console_set_char(0, x1, y1, 249)
libtcod.console_flush()
Wait(40)
libtcod.console_set_char(0, x1, y1, libtcod.CHAR_BULLET)
libtcod.console_flush()
Wait(40)
libtcod.console_set_char(0, x1, y1, libtcod.CHAR_RADIO_UNSET)
libtcod.console_flush()
Wait(40)
libtcod.console_set_char_foreground(0, x1, y1, libtcod.light_grey)
libtcod.console_set_char(0, x1, y1, libtcod.CHAR_BLOCK1)
libtcod.console_flush()
Wait(40)
# blit display console to screen to clear animation and update screen
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
# animation of a hit from a main gun
def HitAnimation(hit_result):
# shot missed, no animation just sound effect
if hit_result == 'miss':
PlaySound('main_gun_miss')
return
# smoke hit
if hit_result == 'smoke_hit':
PlaySound('smoke_hit')
return
# AP or HE hit
PlaySound(hit_result)
if not campaign.animations: return
if hit_result == 'he_hit':
animations = HE_HIT_ANIMATION
else:
animations = AP_HIT_ANIMATION
x = MAP_CON_X + battle.target.x
y = MAP_CON_Y + battle.target.y
col = libtcod.console_get_char_background(map_con, battle.target.x, battle.target.y)
libtcod.console_set_char_background(0, x, y, col)
for (char, color, pause) in animations:
libtcod.console_set_char(0, x, y, char)
libtcod.console_set_char_foreground(0, x, y, color)
libtcod.console_flush()
Wait(pause)
libtcod.console_flush()
# add one or more smoke factors into a map hex
def PlaceSmoke(target_hex, num_factors):
battle.smoke_factors.append(SmokeFactor(target_hex.hx, target_hex.hy, num_factors))
CalculateSmokeFactors()
# calculate active smokes factors for each hex
def CalculateSmokeFactors():
# clear any existing smoke factors
for map_hex in battle.maphexes:
map_hex.smoke_factors = 0
# recalculate
for map_hex in battle.maphexes:
for smoke_factor in battle.smoke_factors:
if map_hex.hx == smoke_factor.hx and map_hex.hy == smoke_factor.hy:
map_hex.smoke_factors += int(ceil(smoke_factor.num_factors))
# redraw battle map
PaintMapCon()
# handles a crew member in the turret throwing a smoke grenade out of an open hatch
def HandleSmokeGrenades():
for crew_member in tank.crew:
if crew_member.order == 'Throw Smoke Grenade':
if crew_member.hatch != 'Open':
PopUp(crew_member.name + " must have an open hatch to throw a smoke grenade.")
elif tank.smoke_grenades == 0:
PopUp('No smoke grenades remaining!')
else:
# throw the grenade!
tank.smoke_grenades -= 1
UpdateTankCon()
Message(crew_member.name + ' throws a smoke grenade!')
for map_hex in battle.maphexes:
if map_hex.hx == 0 and map_hex.hy == 0:
PlaceSmoke(map_hex, 0.5)
break
# handles firing the smoke mortar
def HandleSmokeMortar():
for crew_member in tank.crew:
if crew_member.order == 'Fire Smoke Mortar':
if tank.smoke_bombs == 0:
PopUp('No ammo remaining for smoke mortar!')
else:
tank.smoke_bombs -= 1
UpdateTankCon()
Message(crew_member.name + ' fires smoke from the mortar!')
for map_hex in battle.maphexes:
if map_hex.rng == 0 and tank.turret_facing == map_hex.sector:
PlaceSmoke(map_hex, 1)
break
# returns total number of smoke factors between hexes
def GetSmokeFactors(hx1, hy1, hx2, hy2):
# build list of hexes in line to target
x1, y1 = Hex2Screen(hx1, hy1)
x2, y2 = Hex2Screen(hx2, hy2)
line = GetLine(x1, y1, x2, y2)
hex_list = []
for (x,y) in line:
map_hex = Screen2Hex(x,y)
if map_hex is not None:
if map_hex not in hex_list:
hex_list.append(map_hex)
smoke_factors = 0
for map_hex in hex_list:
smoke_factors += map_hex.smoke_factors
return smoke_factors
# set up restock ready rack interface
def SetupReadyRack():
# only possible if loader is on correct order
if GetCrewByPosition('Loader').order != 'Restock Ready Rack':
return False
return True
# attempt to set up tank to pivot
def SetupPivot():
if GetCrewByPosition('Driver').order != 'Pivot Tank':
return False
# make sure tank can move
if tank.bogged_down or tank.immobilized:
PopUp('Tank is immobile and unable to pivot')
return False
# pivot can proceed - reset variable
tank.new_facing = 4
return True
# commits a pivot of the player tank, and rotates enemies and sector control around the
# player tank
def PivotTank():
# apply effects of pivot
# can still throw a track, but only on natural 12 followed by 5+ on D6
# check for maintaining HD
d1, d2, roll = Roll2D6()
if roll == 12:
if libtcod.random_get_int(0, 1, 6) >= 5:
# thrown track
PopUp('Your tank has thrown a track and is immobilized!')
tank.moving = False
tank.new_facing = 4
tank.immobilized = True
UpdateTankCon()
RenderEncounter()
return
if tank.hull_down:
# chance to maintain HD
if roll <= 6:
PopUp('Your tank pivots and maintains a Hull Down position.')
else:
PopUp('Your tank pivots but has lost its Hull Down position.')
tank.hull_down = False
tank.moving = True
# determine number of sectors pivoted and direction to pivot
# not strictly the most efficent system here: always assumes a clockwise rotation
if tank.new_facing == 5:
sector_distance = 1
clockwise = True
elif tank.new_facing == 0:
sector_distance = 2
clockwise = True
elif tank.new_facing == 1:
sector_distance = 3
clockwise = True
elif tank.new_facing == 2:
sector_distance = 2
clockwise = False
else:
sector_distance = 1
clockwise = False
for n in range(sector_distance):
# rotate enemy units
for unit in battle.enemy_units:
unit.RotatePosition(clockwise)
# rotate smoke factors if any on map
if len(battle.smoke_factors) > 0:
for obj in battle.smoke_factors:
obj.RotatePosition(clockwise)
# recalculate hex smoke factors
CalculateSmokeFactors()
# reset tank variable
tank.new_facing = 4
# redraw consoles and update screen
UpdateTankCon()
UpdateMapOverlay()
RenderEncounter()
# resolve a movement order for the player tank
def MoveTank():
crew = GetCrewByPosition('Driver')
# if already pivoted, return
if crew.order == 'Pivot Tank':
return
# if driver is dead, stunned, or unconscious, can't move tank
# if tank is already moving, it stops
if crew.NoActions():
if tank.moving:
PopUp('Your driver is incapacitated, and your tank rolls to a stop')
tank.moving = False
UpdateTankCon()
RenderEncounter()
return
# if stopped and driver is on stop orders, don't check anything
if crew.order == 'Stop' and not tank.moving: return
# stop order is simple
if crew.order == 'Stop':
if tank.moving:
PopUp('Your tank comes to a stop')
tank.moving = False
UpdateTankCon()
RenderEncounter()
return
# unbog attempt
if crew.order == 'Attempt Unbog' and tank.bogged_down:
d1, d2, roll = Roll2D6()
mod_roll = roll
# skill check
if crew.hatch == 'Open':
if crew.SkillCheck('Tough Mudder'):
mod_roll -= 2
# driver buttoned up
else:
mod_roll += 2
# HVSS
if 'HVSS' in tank.stats:
mod_roll -= 1
# Commander Directing Movement from Open Hatch: -2
# Commander Directing Movement from Vision Cupola: -1
crew_member = GetCrewByPosition('Commander')
if crew_member.order == 'Direct Movement':
if crew_member.hatch == 'Open':
if crew_member.SkillCheck('Driver Direction'):
mod = -3
else:
mod = -2
mod_roll += mod
elif 'vision_cupola' in tank.stats:
if crew_member.SkillCheck('Driver Direction'):
mod = -2
else:
mod = -1
mod_roll += mod
if roll == 12 or mod_roll >= 11:
PopUp('You have thrown a track trying to unbog your tank!')
tank.bogged_down = False
tank.immobilized = True
UpdateTankCon()
CrewTalk(random.choice(CREW_TALK_THROWN_TRACK), position_list='Driver')
elif mod_roll <= 4:
PopUp('Your tank is no longer bogged down')
tank.bogged_down = False
UpdateTankCon()
else:
PopUp('Driver is unable to unbog tank.')
return
# make sure tank can move
# this shouldn't happen, since driver has to be on Stop or Attempt Unbog
# order in these two cases
if tank.bogged_down or tank.immobilized:
Message('Tank is unable to move.')
return
# play sound effect
PlaySound('sherman_movement')
# check for move from stop
move_from_stop = False
if not tank.moving:
move_from_stop = True
# tank gets moving status for this turn at least
# and loses hull down if it had it
tank.moving = True
tank.hull_down = False
UpdateTankCon()
RenderEncounter()
# lose all acquired targets, ACs stop spotting player
for unit in battle.enemy_units:
unit.acquired = 0
unit.acquired_player = 0
if unit.unit_class == 'AC':
unit.spotting_player = False
# do movement roll
d1, d2, roll = Roll2D6()
# determine modifiers to roll
mod_roll = roll
# Commander Directing Movement from Open Hatch: -2
# Commander Directing Movement from Vision Cupola: -1
crew_member = GetCrewByPosition('Commander')
if crew_member.order == 'Direct Movement':
if crew_member.hatch == 'Open':
if crew_member.SkillCheck('Driver Direction'):
mod = -3
else:
mod = -2
mod_roll += mod
elif 'vision_cupola' in tank.stats:
if crew_member.SkillCheck('Driver Direction'):
mod = -2
else:
mod = -1
mod_roll += mod
# HVSS: -1
if 'HVSS' in tank.stats:
mod_roll -= 1
# Driver Buttoned up: +2
crew_member = GetCrewByPosition('Driver')
if crew_member.hatch != 'Open':
mod_roll += 2
# check Drag Racer skill
if move_from_stop:
if crew_member.SkillCheck('Drag Racer'):
mod_roll -= 1
# weather effects
if campaign.weather.ground == 'Snow':
mod_roll += 1
elif campaign.weather.ground in ['Mud', 'Deep Snow']:
mod_roll += 2
# if unmodified roll is 12, or if modified roll is 12 or more, tank has
# possibility of throwing a track or bogging down
if roll == 12 or mod_roll >= 12:
d6_roll = libtcod.random_get_int(0, 1, 6)
if d6_roll >= 5:
# thrown track
PopUp('Your tank has thrown a track and is immobilized!')
tank.moving = False
tank.immobilized = True
UpdateTankCon()
RenderEncounter()
return
elif d6_roll >= 3:
# bogged down
PopUp('Your tank becomes bogged down!')
tank.moving = False
tank.bogged_down = True
UpdateTankCon()
RenderEncounter()
return
# Check modified roll for Hull Down:
if crew_member.order == 'Forward to Hull Down':
# skill check
if crew_member.SkillCheck('Eye for Cover'):
mod_roll -= 2
if campaign.day_map.player_node.node_type == 'F':
mod_roll -= 2
if mod_roll <= 4:
PopUp('Your tank moves forward into a hull down position.')
tank.hull_down = True
UpdateTankCon()
RenderEncounter()
else:
PopUp('Your tank moves forward but is unable to move into a hull down position.')
elif crew_member.order == 'Reverse to Hull Down':
# skill check
if crew_member.SkillCheck('Eye for Cover'):
mod_roll -= 1
if campaign.day_map.player_node.node_type == 'F':
mod_roll -= 2
if mod_roll <= 3:
PopUp('Your tank moves backward into a hull down position.')
tank.hull_down = True
UpdateTankCon()
RenderEncounter()
else:
PopUp('Your tank moves backward but is unable to move into a hull down position.')
# Effect on enemy units: Do a new 2D6 roll plus new modifiers
d1, d2, roll = Roll2D6()
# high-powered engine or light tank
if 'M4A3' in tank.stats['vehicle_type']:
roll -= 2
elif tank.stats['vehicle_class'] == 'Light Tank':
roll -= 2
# duckbills
if 'duckbills' in tank.stats:
roll += 2
# weather effects
if campaign.weather.ground == 'Mud':
roll += 4
elif campaign.weather.ground in ['Snow', 'Deep Snow']:
roll += 2
# if equal to target number, enemy facings are recalculated, less than, enemy units and
# smoke are moved
if crew_member.order == 'Reverse to Hull Down':
target_score = 5
move_dist = -1
elif crew_member.order == 'Forward to Hull Down':
target_score = 6
move_dist = 1
elif crew_member.order == 'Reverse':
target_score = 7
move_dist = -1
else:
# forward
target_score = 8
move_dist = 1
if roll < target_score:
PopUp('Your tank has moved far enough that enemies are in new positions.')
for unit in battle.enemy_units:
if not unit.alive: continue
unit.YMove(move_dist)
# reset spotting, hidden, and target acquired
unit.spotted = False
unit.hidden = False
unit.acquired = 0
unit.acquired_player = 0
if unit.unit_class == 'AC':
unit.spotting_player = False
for obj in battle.smoke_factors:
obj.YMove(move_dist)
CalculateSmokeFactors()
# re-draw enemy units and smoke in new positions
UpdateMapOverlay()
# check for changed facings
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.SetFacing():
text = unit.GetDesc() + ' now on ' + unit.facing + ' facing'
ShowLabel(unit.x+MAP_CON_X, unit.y+MAP_CON_Y, text)
elif roll == target_score:
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.SetFacing():
text = unit.GetDesc() + ' now on ' + unit.facing + ' facing'
ShowLabel(unit.x+MAP_CON_X, unit.y+MAP_CON_Y, text)
else:
PopUp('Your tank does not move far enough to affect enemy positions.')
# see if we are just rotating the tank's turret (not firing main gun)
def SetupRotateTurret():
crew_member = GetCrewByPosition('Gunner')
if crew_member.order != 'Rotate Turret':
return False
tank.old_t_facing = tank.turret_facing # record current turret facing
return True
# rotate the main gun turret one sector
def RotateTurret(clockwise):
# check for turret traverse gear broken
if 'Turret Traverse Malfunction' in tank.damage_list: return
if 'Turret Traverse Broken' in tank.damage_list: return
if clockwise:
tank.turret_facing += 1
if tank.turret_facing > 5:
tank.turret_facing = 0
else:
tank.turret_facing -= 1
if tank.turret_facing < 0:
tank.turret_facing = 5
# if current target is no longer valid, choose a new one
if battle.target is not None:
if battle.phase == 'Fire Main Gun':
if tank.turret_facing != battle.target.map_hex.sector:
SelectNextTarget()
elif battle.phase == 'Fire MGs' and tank.active_mg == 0:
SelectNextTarget()
# otherwise, try to get a new target
else:
SelectNextTarget()
UpdateMapOverlay()
RenderEncounter()
# display menu in tank console to set order for a crew member
def DisplayCrewOrders():
# get selected crew member
crewman = battle.selected_crew
# if no order selected yet, select current order by default
if battle.selected_order == None:
n = 0
for order in crewman.orders_list:
if order.name == crewman.order:
battle.selected_order = n
break
n += 1
# can't find current order, select first one instead
if battle.selected_order == None:
battle.selected_order = 0
# display menu
libtcod.console_print(tank_con, 1, 1, 'Set Crew Order for:')
# display current crew member info
libtcod.console_set_default_background(tank_con, ROW_COLOR)
libtcod.console_rect(tank_con, 1, 3, TANK_CON_WIDTH-2, 2, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(tank_con, libtcod.black)
libtcod.console_print(tank_con, 1, 2, 'Crewman Position Order Hatch Spot')
info_list = crewman.GetInfo()
libtcod.console_print(tank_con, 1, 3, info_list[0])
libtcod.console_print(tank_con, 21, 3, info_list[1])
# order might be long, so split it up
lines = wrap(info_list[2], 11)
libtcod.console_print(tank_con, 34, 3, lines[0])
if len(lines) > 1:
libtcod.console_print(tank_con, 34, 4, lines[1])
libtcod.console_print(tank_con, 47, 3, info_list[3])
# spot might be long too
lines = wrap(info_list[4], 11)
libtcod.console_print(tank_con, 54, 3, lines[0])
if len(lines) > 1:
libtcod.console_print(tank_con, 54, 4, lines[1])
# nickname
if info_list[5] != '':
libtcod.console_print(tank_con, 2, 4, '"' + info_list[5] + '"')
# display list of order names
y = 6
n = 0
dy = 15
for order in crewman.orders_list:
# if order is selected then highlight
if battle.selected_order == n:
libtcod.console_set_default_background(tank_con, SELECTED_COLOR)
libtcod.console_print_ex(tank_con, 1, y, libtcod.BKGND_SET, libtcod.LEFT, order.name)
libtcod.console_set_default_background(tank_con, libtcod.black)
# if order is selected then display order description
# and spot effects
if battle.selected_order == n:
# split up the description string
lines = wrap(order.desc, TANK_CON_WIDTH-23, subsequent_indent = ' ')
for line in lines:
libtcod.console_print(tank_con, 1, dy, line)
dy += 1
if order.spot:
text = 'May spot'
else:
text = 'May not spot'
text += ' next turn'
libtcod.console_print(tank_con, 1, dy, text)
y += 1
n += 1
# display instructions for selecting and issuing orders
dy += 2
for line in ORDER_INFO:
libtcod.console_print(tank_con, 1, dy, line)
dy+=1
################################################################################
# Console Drawing and Updating #
################################################################################
# display weather conditions to given console
def DisplayWeather(console, x, y):
# cloud cover
if campaign.weather.clouds == 'Clear':
libtcod.console_set_default_background(console, CLEAR_SKY_COLOR)
libtcod.console_set_default_foreground(console, libtcod.white)
else:
libtcod.console_set_default_background(console, OVERCAST_COLOR)
libtcod.console_set_default_foreground(console, libtcod.dark_grey)
libtcod.console_rect(console, x, y, 10, 3, False, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(console, x+5, y, libtcod.BKGND_SET, libtcod.CENTER, campaign.weather.clouds)
# precipitation
if campaign.weather.precip in ['Rain', 'Snow']:
libtcod.console_set_default_foreground(console, libtcod.white)
libtcod.console_print_ex(console, x+5, y+1, libtcod.BKGND_SET, libtcod.CENTER, campaign.weather.precip)
# fog
if campaign.weather.fog:
libtcod.console_set_default_background(console, libtcod.light_grey)
libtcod.console_set_default_foreground(console, libtcod.white)
libtcod.console_rect(console, x, y+2, 10, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(console, x+5, y+2, libtcod.BKGND_SET, libtcod.CENTER, 'Fog')
# ground cover
libtcod.console_set_default_foreground(console, libtcod.black)
if campaign.weather.ground == 'Dry':
libtcod.console_set_default_background(console, OPEN_GROUND_COLOR)
elif campaign.weather.ground == 'Mud':
libtcod.console_set_default_background(console, MUD_COLOR)
else:
libtcod.console_set_default_background(console, libtcod.white)
libtcod.console_rect(console, x, y+3, 10, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(console, x+5, y+3, libtcod.BKGND_SET, libtcod.CENTER, campaign.weather.ground)
# paint the encounter map console
def PaintMapCon():
libtcod.console_set_default_background(map_con, libtcod.black)
libtcod.console_clear(map_con)
# draw map hexes, including player hex 0,0
libtcod.console_set_default_foreground(map_con, HEX_EDGE_COLOR)
for map_hex in battle.maphexes:
if map_hex.rng == 2:
libtcod.console_set_default_background(map_con, libtcod.Color(80, 120, 80))
elif map_hex.rng == 1:
libtcod.console_set_default_background(map_con, libtcod.Color(90, 130, 90))
else:
libtcod.console_set_default_background(map_con, libtcod.Color(100, 140, 100))
if map_hex.hx == 0 and map_hex.hy == 0:
libtcod.console_set_default_foreground(map_con, libtcod.black)
DrawHex(map_con, map_hex.x, map_hex.y)
libtcod.console_set_default_background(map_con, libtcod.black)
# if fog or falling snow, display lack of sight for medium and long range hexes
if campaign.weather.fog or campaign.weather.precip == 'Snow':
for map_hex in battle.maphexes:
if map_hex.rng > 0:
libtcod.console_set_default_background(map_con, libtcod.Color(180, 180, 180))
libtcod.console_set_default_foreground(map_con, HEX_EDGE_COLOR)
DrawHex(map_con, map_hex.x, map_hex.y)
libtcod.console_set_default_background(map_con, libtcod.black)
# draw smoke hexes overtop
for map_hex in battle.maphexes:
# skip if not visible anyway
if campaign.weather.fog or campaign.weather.precip == 'Snow':
if map_hex.rng > 0: continue
if map_hex.smoke_factors > 0:
# calculate background colour to use
c = 120 + (map_hex.smoke_factors*20)
# limit to light grey!
if c > 220:
c = 220
col = libtcod.Color(c, c, c)
libtcod.console_set_default_background(map_con, col)
if map_hex.hx == 0 and map_hex.hy == 0:
libtcod.console_set_default_foreground(map_con, libtcod.black)
else:
libtcod.console_set_default_foreground(map_con, HEX_EDGE_COLOR)
# paint smoke hex
DrawHex(map_con, map_hex.x, map_hex.y)
libtcod.console_set_default_background(map_con, libtcod.black)
libtcod.console_set_default_foreground(map_con, libtcod.black)
# draw zone boundaries
def DrawBoundaryLine(x1, y1, x2, y2):
char = '/'
if y1 == y2:
char = '-'
elif y1 < y2:
if x1 < x2:
char = '\\'
else:
if x1 > x2:
char = '\\'
line = GetLine(x1, y1, x2, y2)
# skip first and last location
for (x, y) in line[1:-1]:
libtcod.console_put_char(map_con, x, y, char, flag=libtcod.BKGND_DEFAULT)
(x,y) = line[-1]
libtcod.console_put_char(map_con, x, y, '|', flag=libtcod.BKGND_DEFAULT)
# sectors 5 and 0
DrawBoundaryLine(MAP_X0+6, MAP_Y0, MAP_X0+12, MAP_Y0)
DrawBoundaryLine(MAP_X0+12, MAP_Y0, MAP_X0+15, MAP_Y0+3)
DrawBoundaryLine(MAP_X0+15, MAP_Y0+3, MAP_X0+21, MAP_Y0+3)
DrawBoundaryLine(MAP_X0+21, MAP_Y0+3, MAP_X0+24, MAP_Y0)
DrawBoundaryLine(MAP_X0+24, MAP_Y0, MAP_X0+30, MAP_Y0)
# sectors 0 and 1
DrawBoundaryLine(MAP_X0+3, MAP_Y0+3, MAP_X0+6, MAP_Y0+6)
DrawBoundaryLine(MAP_X0+6, MAP_Y0+6, MAP_X0+3, MAP_Y0+9)
DrawBoundaryLine(MAP_X0+3, MAP_Y0+9, MAP_X0+6, MAP_Y0+12)
DrawBoundaryLine(MAP_X0+6, MAP_Y0+12, MAP_X0+12, MAP_Y0+12)
DrawBoundaryLine(MAP_X0+12, MAP_Y0+12, MAP_X0+15, MAP_Y0+15)
# sectors 1 and 2
DrawBoundaryLine(MAP_X0-3, MAP_Y0+3, MAP_X0-6, MAP_Y0+6)
DrawBoundaryLine(MAP_X0-6, MAP_Y0+6, MAP_X0-3, MAP_Y0+9)
DrawBoundaryLine(MAP_X0-3, MAP_Y0+9, MAP_X0-6, MAP_Y0+12)
DrawBoundaryLine(MAP_X0-6, MAP_Y0+12, MAP_X0-12, MAP_Y0+12)
DrawBoundaryLine(MAP_X0-12, MAP_Y0+12, MAP_X0-15, MAP_Y0+15)
# sectors 2 and 3
DrawBoundaryLine(MAP_X0-6, MAP_Y0, MAP_X0-12, MAP_Y0)
DrawBoundaryLine(MAP_X0-12, MAP_Y0, MAP_X0-15, MAP_Y0+3)
DrawBoundaryLine(MAP_X0-15, MAP_Y0+3, MAP_X0-21, MAP_Y0+3)
DrawBoundaryLine(MAP_X0-21, MAP_Y0+3, MAP_X0-24, MAP_Y0)
DrawBoundaryLine(MAP_X0-24, MAP_Y0, MAP_X0-30, MAP_Y0)
# sectors 3 and 4
DrawBoundaryLine(MAP_X0-3, MAP_Y0-3, MAP_X0-6, MAP_Y0-6)
DrawBoundaryLine(MAP_X0-6, MAP_Y0-6, MAP_X0-12, MAP_Y0-6)
DrawBoundaryLine(MAP_X0-12, MAP_Y0-6, MAP_X0-15, MAP_Y0-9)
DrawBoundaryLine(MAP_X0-15, MAP_Y0-9, MAP_X0-12, MAP_Y0-12)
DrawBoundaryLine(MAP_X0-12, MAP_Y0-12, MAP_X0-15, MAP_Y0-15)
# sectors 4 and 5
DrawBoundaryLine(MAP_X0+3, MAP_Y0-3, MAP_X0+6, MAP_Y0-6)
DrawBoundaryLine(MAP_X0+6, MAP_Y0-6, MAP_X0+12, MAP_Y0-6)
DrawBoundaryLine(MAP_X0+12, MAP_Y0-6, MAP_X0+15, MAP_Y0-9)
DrawBoundaryLine(MAP_X0+15, MAP_Y0-9, MAP_X0+12, MAP_Y0-12)
DrawBoundaryLine(MAP_X0+12, MAP_Y0-12, MAP_X0+15, MAP_Y0-15)
# draw the encounter map overlay console
def UpdateMapOverlay(skip_los=False):
# reset console colors and clear
libtcod.console_set_default_foreground(overlay_con, libtcod.black)
libtcod.console_set_default_background(overlay_con, KEY_COLOR)
libtcod.console_clear(overlay_con)
# draw player tank
tank.DrawMe()
# draw enemy units
for unit in battle.enemy_units:
unit.DrawMe()
libtcod.console_set_default_foreground(overlay_con, libtcod.white)
libtcod.console_set_default_background(overlay_con, libtcod.black)
# highlight spotting sector if in set spot sector phase
if battle.phase == 'Set Spot Sectors' and battle.selected_crew is not None:
if battle.selected_crew.spot_sector == 0:
for (x, y) in GetLine(MAP_X0+2, MAP_Y0+1, 67, 35):
col = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, 250, libtcod.white, col)
elif battle.selected_crew.spot_sector == 1:
for (x, y) in GetLine(MAP_X0, MAP_Y0+2, 36, 46):
col = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, 250, libtcod.white, col)
elif battle.selected_crew.spot_sector == 2:
for (x, y) in GetLine(MAP_X0-2, MAP_Y0+1, 5, 35):
col = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, 250, libtcod.white, col)
elif battle.selected_crew.spot_sector == 3:
for (x, y) in GetLine(MAP_X0-2, MAP_Y0-1, 5, 15):
col = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, 250, libtcod.white, col)
elif battle.selected_crew.spot_sector == 4:
for (x, y) in GetLine(MAP_X0, MAP_Y0-2, 36, 4):
col = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, 250, libtcod.white, col)
elif battle.selected_crew.spot_sector == 5:
for (x, y) in GetLine(MAP_X0+2, MAP_Y0-1, 67, 15):
col = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, 250, libtcod.white, col)
# draw LoS if in Fire Main Gun or Fire MGs phase
if battle.phase in ['Fire Main Gun', 'Fire MGs'] and battle.target is not None and not skip_los:
line = GetLine(MAP_X0, MAP_Y0, battle.target.x, battle.target.y)
for (x,y) in line[2:-1]:
col = libtcod.console_get_char_background(map_con, x, y)
libtcod.console_put_char_ex(overlay_con, x, y, 250, libtcod.white, col)
# draw weather conditions display in top right corner
DisplayWeather(overlay_con, MAP_CON_WIDTH-10, 1)
libtcod.console_set_default_foreground(overlay_con, libtcod.white)
libtcod.console_set_default_background(overlay_con, libtcod.black)
# display symbol legend on map
# Units
libtcod.console_print_ex(overlay_con, 1, 41, libtcod.BKGND_SET, libtcod.LEFT, 'Unit type:')
libtcod.console_print_ex(overlay_con, 1, 42, libtcod.BKGND_SET, libtcod.LEFT, '----------')
libtcod.console_put_char(overlay_con, 1, 43, libtcod.CHAR_RADIO_UNSET, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 4, 43, libtcod.BKGND_SET, libtcod.LEFT, 'Tank')
libtcod.console_put_char(overlay_con, 1, 44, 'X', flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 4, 44, libtcod.BKGND_SET, libtcod.LEFT, 'Anti Tank Gun')
libtcod.console_put_char(overlay_con, 1, 45, "#", flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 4, 45, libtcod.BKGND_SET, libtcod.LEFT, 'Self Propelled Gun')
libtcod.console_put_char(overlay_con, 1, 46, libtcod.CHAR_BULLET_INV, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 4, 46, libtcod.BKGND_SET, libtcod.LEFT, 'Armoured Personel Carrier')
libtcod.console_put_char(overlay_con, 1, 47, libtcod.CHAR_RADIO_SET, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 4, 47, libtcod.BKGND_SET, libtcod.LEFT, 'Armoured Car')
libtcod.console_put_char(overlay_con, 1, 48, libtcod.CHAR_BLOCK1, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 4, 48, libtcod.BKGND_SET, libtcod.LEFT, 'Light Infantry')
libtcod.console_put_char(overlay_con, 1, 49, 'x', flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 4, 49, libtcod.BKGND_SET, libtcod.LEFT, 'Machine Gun')
libtcod.console_put_char(overlay_con, 1, 50, libtcod.CHAR_BULLET_SQUARE, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 4, 50, libtcod.BKGND_SET, libtcod.LEFT, 'Truck')
# Tactical situation
#if self.terrain == 'Hull Down':
# char = libtcod.CHAR_ARROW2_N
#elif self.terrain == 'Building':
# char = libtcod.CHAR_DVLINE
#elif self.terrain == 'Woods':
# char = libtcod.CHAR_SPADE
libtcod.console_print_ex(overlay_con, 60, 41, libtcod.BKGND_SET, libtcod.LEFT, 'Situation:')
libtcod.console_print_ex(overlay_con, 60, 42, libtcod.BKGND_SET, libtcod.LEFT, '----------')
libtcod.console_put_char(overlay_con, 60, 43, libtcod.CHAR_ARROW2_N, flag=libtcod.BKGND_SET)
libtcod.console_put_char(overlay_con, 61, 43, ' ', flag=libtcod.BKGND_SET)
libtcod.console_put_char(overlay_con, 62, 43, libtcod.CHAR_ARROW2_N, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 64, 43, libtcod.BKGND_SET, libtcod.LEFT, 'Hull Down')
libtcod.console_put_char(overlay_con, 60, 44, libtcod.CHAR_DVLINE, flag=libtcod.BKGND_SET)
libtcod.console_put_char(overlay_con, 61, 44, ' ', flag=libtcod.BKGND_SET)
libtcod.console_put_char(overlay_con, 62, 44, libtcod.CHAR_DVLINE, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 64, 44, libtcod.BKGND_SET, libtcod.LEFT, 'Building')
libtcod.console_put_char(overlay_con, 60, 45, libtcod.CHAR_SPADE, flag=libtcod.BKGND_SET)
libtcod.console_put_char(overlay_con, 61, 45, ' ', flag=libtcod.BKGND_SET)
libtcod.console_put_char(overlay_con, 62, 45, libtcod.CHAR_SPADE, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(overlay_con, 64, 45, libtcod.BKGND_SET, libtcod.LEFT, 'Woods')
# display current round number
libtcod.console_print_ex(overlay_con, int(MAP_CON_WIDTH/2), 0, libtcod.BKGND_SET,
libtcod.CENTER, 'Round ' + str(battle.rounds_passed))
# display current turn phase if any
if battle.phase != 'None':
libtcod.console_print_ex(overlay_con, int(MAP_CON_WIDTH/2), 1, libtcod.BKGND_SET, libtcod.CENTER, battle.phase + ' Phase')
# display MG selections if firing MGs
if battle.phase == 'Fire MGs':
libtcod.console_set_default_foreground(overlay_con, GREYED_COLOR)
if tank.coax_mg_can_fire:
if tank.active_mg == 0:
libtcod.console_set_default_foreground(overlay_con, SELECTED_COLOR)
else:
libtcod.console_set_default_foreground(overlay_con, libtcod.white)
libtcod.console_print_ex(overlay_con, 2, 1, libtcod.BKGND_SET, libtcod.LEFT, 'Co-ax MG')
libtcod.console_set_default_foreground(overlay_con, GREYED_COLOR)
if tank.bow_mg_can_fire:
if tank.active_mg == 1:
libtcod.console_set_default_foreground(overlay_con, SELECTED_COLOR)
else:
libtcod.console_set_default_foreground(overlay_con, libtcod.white)
libtcod.console_print_ex(overlay_con, 2, 2, libtcod.BKGND_SET, libtcod.LEFT, 'Bow MG')
libtcod.console_set_default_foreground(overlay_con, GREYED_COLOR)
if tank.aa_mg_can_fire:
if tank.active_mg == 2:
libtcod.console_set_default_foreground(overlay_con, SELECTED_COLOR)
else:
libtcod.console_set_default_foreground(overlay_con, libtcod.white)
libtcod.console_print_ex(overlay_con, 2, 3, libtcod.BKGND_SET, libtcod.LEFT, 'AA MG')
# display reminder if spotting/combat restricted
if campaign.weather.fog or campaign.weather.precip == 'Snow':
libtcod.console_print_ex(overlay_con, int(MAP_CON_WIDTH/2), MAP_CON_HEIGHT-1, libtcod.BKGND_SET, libtcod.CENTER, 'Fog/Snow: Spotting/Combat at close range only')
# draw or update the map info console
def UpdateMapInfoCon(mx, my):
libtcod.console_clear(map_info_con)
# make sure mouse cursor is over map window
if mx < MAP_CON_X or my > MAP_CON_HEIGHT:
libtcod.console_print_ex(map_info_con, int(MAP_INFO_CON_WIDTH/2), 1,
libtcod.BKGND_NONE, libtcod.CENTER, 'Mouseover a unit for information')
return
# adjust for offset
mx -= MAP_CON_X
my -= 2
# if we're over ourself
if mx == MAP_X0 and my == MAP_Y0:
text = 'M4 Sherman "' + tank.name + '"'
libtcod.console_print_ex(map_info_con, int(MAP_INFO_CON_WIDTH/2), 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
return
# check for enemy unit under cursor
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.x == mx and unit.y == my:
# print info on this unit
x = int(MAP_INFO_CON_WIDTH/2)
y = 0
# basic description
text = unit.GetDesc()
libtcod.console_print(map_info_con, x, y, text)
y += 1
# morale status
text = ''
if unit.pinned:
text = 'Pinned'
elif unit.stunned:
text = 'Stunned'
libtcod.console_set_default_foreground(map_info_con, libtcod.light_red)
libtcod.console_print(map_info_con, x, y, text)
libtcod.console_set_default_foreground(map_info_con, libtcod.white)
y += 1
# range and sector
rng = unit.map_hex.rng
if rng == 2:
text = 'Long'
elif rng == 1:
text = 'Medium'
else:
text = 'Short'
text += ' Range, '
if unit.map_hex.sector == 0:
text = 'Rear Right '
elif unit.map_hex.sector == 1:
text = 'Rear '
elif unit.map_hex.sector == 2:
text = 'Rear Left '
elif unit.map_hex.sector == 3:
text = 'Front Left '
elif unit.map_hex.sector == 4:
text = 'Front '
elif unit.map_hex.sector == 5:
text = 'Front Right '
text += 'Sector'
libtcod.console_print(map_info_con, x, y, text)
y += 1
# if hidden or not spotted, no more info displayed
if unit.hidden or not unit.spotted:
return
# emplaced, movement, or immobile status
if unit.unit_class == 'AT_GUN':
text = 'Emplaced'
elif unit.moving:
text = 'Moving'
else:
if unit.immobile:
text = 'Immobile'
else:
text = 'Stationary'
# terrain
text += ' in ' + unit.terrain
libtcod.console_print(map_info_con, x, y, text)
y+=1
# facing if applicable
if unit.facing != '':
text = unit.facing + ' Facing'
libtcod.console_print(map_info_con, x, y, text)
y+=1
# acquired target / acquired player target
text = ''
if unit.acquired > 0:
text += 'Acquired Target: ' + str(unit.acquired)
if unit.acquired > 0 and unit.acquired_player > 0:
text += ' ; '
if unit.acquired_player > 0:
text += 'Acquired Player: ' + str(unit.acquired_player)
if unit.unit_class == 'AC':
if unit.spotting_player:
text += 'Spotting Player'
if text != '':
libtcod.console_print(map_info_con, x, y, text)
y+=1
# display unit info reminder if doesn't need to be identified first
if not (unit.unit_class in ['TANK', 'SPG', 'AT_GUN'] and not unit.identified):
libtcod.console_print(map_info_con, x, MAP_INFO_CON_HEIGHT-1, 'Right-click for more info')
# unit info displayed, so return
return
# no unit found, display instruction text
libtcod.console_print_ex(map_info_con, int(MAP_INFO_CON_WIDTH/2), 1,
libtcod.BKGND_NONE, libtcod.CENTER, 'Mouseover a unit for information')
# write current messages to message console
def UpdateMsgCon():
libtcod.console_clear(msg_con)
y=0
for (line, color) in battle.messages:
libtcod.console_set_default_foreground(msg_con, color)
libtcod.console_print(msg_con, 0, y, line)
y += 1
# draw tank info to tank info console
# used in encounters as well as in the campaign day view
def UpdateTankCon():
libtcod.console_clear(tank_con)
# if we're currently in issue orders input mode, show selected crew member
# and possible orders instead
if battle is not None:
if battle.phase == 'Issue Order':
DisplayCrewOrders()
return
##### Tank Name #####
libtcod.console_set_default_foreground(tank_con, HIGHLIGHT_COLOR)
libtcod.console_print(tank_con, 1, 1, tank.name)
##### Tank Model Name, and Nickname if any #####
libtcod.console_set_default_foreground(tank_con, libtcod.white)
text = tank.stats['vehicle_type']
if 'nickname' in tank.stats:
text += ' "' + tank.stats['nickname'] + '"'
# note if current tank has HVSS
if 'HVSS' in tank.stats:
text += ' (HVSS)'
libtcod.console_print(tank_con, 1, 2, text)
##### Tank Status if in Battle Encounter #####
if battle is not None:
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
text = ''
if tank.lead_tank:
text += 'Lead Tank, '
if tank.moving:
text += 'Moving'
else:
text += 'Stopped'
if tank.hull_down:
libtcod.console_set_default_foreground(tank_con, libtcod.light_green)
text += ', Hull Down'
if tank.bogged_down:
libtcod.console_set_default_foreground(tank_con, libtcod.red)
text += ', Bogged Down'
if tank.immobilized:
libtcod.console_set_default_foreground(tank_con, libtcod.red)
text += ', Immobilized'
libtcod.console_print(tank_con, 1, 3, text)
libtcod.console_set_default_foreground(tank_con, libtcod.white)
##### Ammo Load Info - Displayed at Top Right of Console #####
libtcod.console_set_alignment(tank_con, libtcod.RIGHT)
x = TANK_CON_WIDTH - 10
total_g = 0
total_rr = 0
for ammo_type in reversed(AMMO_TYPES):
if ammo_type in tank.general_ammo:
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_print(tank_con, x, 1, ammo_type)
libtcod.console_set_default_foreground(tank_con, libtcod.white)
text = str(tank.general_ammo[ammo_type])
libtcod.console_print(tank_con, x, 2, text)
total_g += tank.general_ammo[ammo_type]
text = str(tank.rr_ammo[ammo_type])
libtcod.console_print(tank_con, x, 3, text)
total_rr += tank.rr_ammo[ammo_type]
# check for rare ammo supplies
if campaign.resupply:
text = ''
if ammo_type == 'HCBI' and campaign.hcbi > 0:
text = str(campaign.hcbi)
elif ammo_type == 'HVAP' and campaign.hvap > 0:
text = str(campaign.hvap)
elif ammo_type == 'APDS' and campaign.apds > 0:
text = str(campaign.apds)
if text != '':
text = '(' + text + ')'
libtcod.console_print(tank_con, x+1, 0, text)
x -= (len(ammo_type)+2)
libtcod.console_set_default_background(tank_con, ROW_COLOR)
libtcod.console_rect(tank_con, x-4, 1, TANK_CON_WIDTH-x+3, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(tank_con, libtcod.black)
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_print(tank_con, x, 1, 'Ammo')
libtcod.console_print(tank_con, x, 2, 'General')
libtcod.console_print(tank_con, x, 3, 'Ready Rack')
if campaign.resupply and battle is None:
libtcod.console_print(tank_con, x-5, 1, 'Resupplying')
# display current total and maximum ammo load
x = TANK_CON_WIDTH-2
libtcod.console_print(tank_con, x, 1, 'Max')
libtcod.console_set_default_foreground(tank_con, libtcod.white)
text = str(total_g) + '/' + str(tank.stats['main_gun_rounds'])
libtcod.console_print(tank_con, x, 2, text)
text = str(total_rr) + '/' + str(tank.stats['rr_size'])
libtcod.console_print(tank_con, x, 3, text)
libtcod.console_set_alignment(tank_con, libtcod.LEFT)
##### Main Gun Info #####
libtcod.console_set_default_background(tank_con, ROW_COLOR)
libtcod.console_rect(tank_con, 1, 5, TANK_CON_WIDTH-2, 2, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(tank_con, libtcod.black)
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_print(tank_con, 1, 5, 'Main Gun:')
libtcod.console_set_default_foreground(tank_con, libtcod.white)
# generate display text for gun
text = tank.stats['main_gun'].replace('L', '') + 'mm'
libtcod.console_print(tank_con, 11, 5, text)
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_print(tank_con, 18, 5, 'Gun Load:')
libtcod.console_set_default_foreground(tank_con, libtcod.white)
if tank.ammo_load == 'None':
libtcod.console_set_default_foreground(tank_con, libtcod.red)
libtcod.console_print(tank_con, 28, 5, tank.ammo_load)
libtcod.console_set_default_foreground(tank_con, libtcod.white)
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_print(tank_con, 35, 5, 'Ammo Reload:')
libtcod.console_set_default_foreground(tank_con, libtcod.white)
libtcod.console_print(tank_con, 48, 5, tank.ammo_reload)
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_print(tank_con, 55, 5, 'From:')
libtcod.console_set_default_foreground(tank_con, libtcod.white)
if tank.use_rr:
text = 'Ready Rack'
else:
text = 'General'
libtcod.console_print(tank_con, 61, 5, text)
##### Misc Stats #####
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_print(tank_con, 1, 6, 'Smoke Grenades:')
libtcod.console_set_default_foreground(tank_con, libtcod.white)
libtcod.console_print(tank_con, 17, 6, str(tank.smoke_grenades))
if 'smoke_mortar' in tank.stats:
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_print(tank_con, 20, 6, 'Smoke Bombs:')
libtcod.console_set_default_foreground(tank_con, libtcod.white)
libtcod.console_print(tank_con, 33, 6, str(tank.smoke_bombs))
##### Damage Info #####
text = ''
# display if took a penetrating hit
if tank.swiss_cheese:
text += 'Suffered Penetrating Hit! '
for d in tank.damage_list:
if text != '':
text += '; '
text += d
if text == '':
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_print(tank_con, 1, 8, 'No damage')
else:
libtcod.console_set_default_foreground(tank_con, libtcod.red)
lines = wrap(text, TANK_CON_WIDTH-2)
y = 7
for line in lines:
libtcod.console_print(tank_con, 1, y, line)
y += 1
if y == 11: break
##### Crew Info #####
y = 12
libtcod.console_set_default_foreground(tank_con, libtcod.light_grey)
libtcod.console_set_default_background(tank_con, ROW_COLOR)
libtcod.console_rect(tank_con, 1, y-1, TANK_CON_WIDTH-2, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_print(tank_con, 1, y-1, 'Crewman Position Order Hatch Spot')
libtcod.console_set_default_foreground(tank_con, libtcod.white)
libtcod.console_set_default_background(tank_con, libtcod.black)
n = 0
for crew_member in tank.crew:
# determine basic foreground color for display
fc = libtcod.white
# if crewmember is having trouble, grey out their info
if crew_member.NoActions():
fc = GREYED_COLOR
# highlight if selected
selected = False
if battle is not None:
if battle.selected_crew is not None:
if crew_member.position == battle.selected_crew.position:
selected = True
if battle.phase not in ['Set Spot Sectors', 'Orders']:
selected = False
else:
if campaign.selected_crew is not None:
if crew_member.position == campaign.selected_crew.position:
selected = True
if selected:
fc = HIGHLIGHT_COLOR
# set foreground color
libtcod.console_set_default_foreground(tank_con, fc)
# shade every other slot
if IsOdd(n):
libtcod.console_set_default_background(tank_con, ROW_COLOR)
else:
libtcod.console_set_default_background(tank_con, ROW_COLOR2)
libtcod.console_rect(tank_con, 1, (n*3)+y, TANK_CON_WIDTH-2, 3, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(tank_con, libtcod.black)
# go through list of info text and display
info_list = crew_member.GetInfo()
# short rank and name
text = crew_member.GetRank(short=True) + ' ' + info_list[0]
libtcod.console_print(tank_con, 1, (n*3)+y, text)
# position
libtcod.console_print(tank_con, 25, (n*3)+y, info_list[1])
# order
# if not in battle, don't display anything
if info_list[2] != '':
lines = wrap(info_list[2], 11)
libtcod.console_print(tank_con, 39, (n*3)+y, lines[0])
if len(lines) > 1:
libtcod.console_print(tank_con, 39, (n*3)+y+1, lines[1])
# hatch
libtcod.console_print(tank_con, 53, (n*3)+y, info_list[3])
# spot
lines = wrap(info_list[4], 11)
libtcod.console_print(tank_con, 61, (n*3)+y, lines[0])
if len(lines) > 1:
libtcod.console_print(tank_con, 61, (n*3)+y+1, lines[1])
# nickname
if info_list[5] != '':
libtcod.console_print(tank_con, 2, (n*3)+y+1, '"' + info_list[5] + '"')
# wounds and/or status
libtcod.console_set_default_foreground(tank_con, libtcod.red)
if not crew_member.alive:
text = 'Dead'
else:
if crew_member.v_serious_wound:
text = 'Very Serious Wound'
elif crew_member.serious_wound:
text = 'Serious Wound'
elif crew_member.light_wound:
text = 'Light Wound'
else:
text = ''
if crew_member.unconscious:
if text != '':
text += ', '
text += 'Unconscious'
elif crew_member.stunned:
if text != '':
text += ', '
text += 'Stunned'
libtcod.console_print(tank_con, 2, (n*3)+y+2, text)
libtcod.console_set_default_foreground(tank_con, libtcod.white)
n += 1
libtcod.console_hline(tank_con, 1, 27, TANK_CON_WIDTH-2, flag=libtcod.BKGND_DEFAULT)
# display target mode if firing main gun
if battle is not None:
if battle.phase == 'Fire Main Gun':
libtcod.console_set_default_foreground(tank_con, HIGHLIGHT_COLOR)
if battle.area_fire:
text = 'Area Fire Mode'
else:
text = 'Direct Fire Mode'
libtcod.console_print(tank_con, 1, 28, text)
libtcod.console_set_default_foreground(tank_con, libtcod.white)
# display instructions based on current input mode
lines = []
if battle.phase == 'Set Spot Sectors':
lines = SPOT_SECTOR_INFO
elif battle.phase == 'Orders':
lines = ORDERS_PHASE_INFO
elif battle.phase == 'Pivot Tank':
lines = PIVOT_INFO
elif battle.phase == 'Rotate Turret':
lines = ROTATE_INFO
elif battle.phase == 'Fire Main Gun':
lines = FIRE_GUN_INFO
elif battle.phase == 'Fire MGs':
lines = FIRE_MGS_INFO
y = 29
for line in lines:
libtcod.console_print(tank_con, 1, y, line)
y += 1
else:
# list possible actions in campaign view
libtcod.console_print(tank_con, 1, TANK_CON_HEIGHT-8,
'Change [%cG%c]un load, '%HIGHLIGHT +
'Ammo [%cR%c]eload, '%HIGHLIGHT +
'[%cT%c]oggle Ready Rack use'%HIGHLIGHT)
libtcod.console_print(tank_con, 1, TANK_CON_HEIGHT-7,
'[%cW/S%c] '%HIGHLIGHT +
'or [%cUp/Down%c]: move crew selection, '%HIGHLIGHT +
'[%cH%c] toggle their hatch'%HIGHLIGHT)
libtcod.console_print(tank_con, 1, TANK_CON_HEIGHT-6,
'Open [%cM%c]ain Gun Ammunition Menu'%HIGHLIGHT)
libtcod.console_print(tank_con, 1, TANK_CON_HEIGHT-2,
'[%cENTER%c] to confirm this loadout'%HIGHLIGHT)
# date, time, etc. console
def UpdateDateCon():
libtcod.console_clear(date_con)
text = campaign.GetDate()
libtcod.console_print(date_con, 0, 0, text)
text = str(campaign.hour) + ':' + str(campaign.minute).zfill(2)
libtcod.console_print(date_con, 23, 0, text)
# mission for the day
text = campaign.scen_type
# special: counterattack battle
if battle is not None:
if battle.counterattack:
text = 'Counterattack'
libtcod.console_print(date_con, 31, 0, text)
# if we're not in a battle, display expected resistance level for the day
if battle is None:
libtcod.console_print(date_con, 50, 0, 'Day Resistance: ' + campaign.scen_res)
# otherwise, display area terrain
else:
text = campaign.GetTerrainDesc(campaign.day_map.player_node)
libtcod.console_print(date_con, 50, 0, text)
# request input from the player, displayed on con
# can supply a list of random strings
# if get_name is true, select random string from FIRST_NAMES and LAST_NAMES instead
def GetInput(console, prompt_text, y, max_length, random_list=[], get_name=False):
input_text = ''
exit_prompt = False
x = SCREEN_XM - int(max_length/2)
W = 84
libtcod.console_print_frame(console, SCREEN_XM - int(W/2), y-5, W, 13,
clear=True, flag=libtcod.BKGND_DEFAULT, fmt=0)
while not exit_prompt:
# display prompt text
libtcod.console_print_ex(console, SCREEN_XM, y-3, libtcod.BKGND_NONE, libtcod.CENTER, prompt_text)
# display input area
libtcod.console_set_default_background(con, PLAYER_COLOR)
libtcod.console_rect(console, x, y, max_length, 1, False, flag=libtcod.BKGND_SET)
# clear any old string, then display current string
libtcod.console_rect(console, x, y, max_length, 1, True, flag=libtcod.BKGND_SET)
libtcod.console_print_ex(console, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, input_text)
# if list of random possible strings is provided, add instruction
if len(random_list) > 0 or get_name:
text = 'Press Ctrl+R to randomly select an entry, replacing anything already inputted'
libtcod.console_print_ex(console, SCREEN_XM, y+4, libtcod.BKGND_NONE, libtcod.CENTER, text)
# display instructions
text = '[%cEnter%c] to continue'%HIGHLIGHT
libtcod.console_print_ex(console, SCREEN_XM, y+5, libtcod.BKGND_NONE, libtcod.CENTER, text)
libtcod.console_blit(console, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
refresh = False
while not refresh:
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
if key.vk == libtcod.KEY_ENTER:
refresh = True
exit_prompt = True
PlaySound('menu_select')
elif key.vk == libtcod.KEY_BACKSPACE:
if len(input_text) > 0:
input_text = input_text[:-1]
refresh = True
# if any valid character is entered
if 32 <= key.c <= 126:
# if control-r, choose a random string
if key.c == 114 and (key.lctrl or key.rctrl):
# if selecting a name
if get_name:
for n in range(99):
input_text = random.choice(FIRST_NAMES) + ' ' + random.choice(LAST_NAMES)
if len(input_text) <= max_length:
break
refresh = True
elif len(random_list) > 0:
# keep doing this many times until a result is found that
# is different than the current one
for n in range(99):
random_string = random.choice(random_list)
if len(random_string) > max_length:
random_string = random_string[:max_length]
if random_string != input_text:
input_text = random_string
break
refresh = True
# otherwise, try to add it to the string
else:
if len(input_text) < max_length:
new_text = chr(key.c)
if key.shift: new_text = new_text.upper()
input_text = input_text + new_text
refresh = True
libtcod.console_flush()
# reset console color
libtcod.console_set_default_background(con, libtcod.black)
return input_text
# ask the player to choose between several options
# only used for changing gun load at the moment
# can use ESC to cancel and not choose any option
def GetChoice(prompt_text, choice_list):
# darken screen if in battle
if battle is not None:
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
exit_prompt = False
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
# select first choice by default
selected = choice_list[0]
while not exit_prompt:
# display menu of choices
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_print(menu_con, MENU_CON_XM, 3, prompt_text)
y = 8
for choice in choice_list:
if choice == selected:
libtcod.console_set_default_background(menu_con, SELECTED_COLOR)
w = len(choice) + 2
libtcod.console_rect(menu_con, MENU_CON_XM-int(w/2), y, w, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(menu_con, libtcod.black)
libtcod.console_print(menu_con, MENU_CON_XM, y, choice)
y += 2
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-4, '[%cW/S/Up/Down%c] to move selection'%HIGHLIGHT)
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-3, '[Enter] to choose, [ESC] to cancel')
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
if key.vk == libtcod.KEY_ENTER:
refresh = True
exit_prompt = True
elif key.vk == libtcod.KEY_ESCAPE:
selected = None
refresh = True
exit_prompt = True
key_char = chr(key.c)
if key_char in ['w', 'W'] or key.vk == libtcod.KEY_UP:
if choice_list.index(selected) == 0:
selected = choice_list[-1]
else:
selected = choice_list[choice_list.index(selected)-1]
refresh = True
elif key_char in ['s', 'S'] or key.vk == libtcod.KEY_DOWN:
if choice_list.index(selected) == len(choice_list) - 1:
selected = choice_list[0]
else:
selected = choice_list[choice_list.index(selected)+1]
refresh = True
libtcod.console_flush()
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
return selected
# handle random encounter event
def RandomEvent():
# chance of no event this round
if Roll1D6() <= 2:
Message('No Random Event this round.')
return
# retermine roll result ranges
# Flanking Fire, Friendly Arty, Time Passes, Harrasing Fire, Enemy Arty, Enemy Reinforcement
# Mines
if campaign.scen_type == 'Advance':
RANGES = [3,4,5,7,8,9,12]
elif campaign.scen_type == 'Battle':
RANGES = [3,4,5,7,9,10,12]
else:
RANGES = [3,5,6,7,9,12,0]
d1, d2, roll = Roll2D6()
# flanking fire
if roll <= RANGES[0]:
# skip if no enemies
if AllEnemiesDead():
Message('No Random Event this round.')
return
PopUp('Friendly forces conduct Flanking Fire against the enemy.')
RenderEncounter()
result = False
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.FriendlyAction(flanking_fire=True):
result = True
UpdateMapOverlay()
RenderEncounter()
if not result:
PopUp('No results from Flanking Fire.')
# friendly artillery
elif roll <= RANGES[1]:
# skip if no enemies
if AllEnemiesDead():
Message('No Random Event this round.')
return
PopUp('Friendly forces conduct artillery fire against the enemy.')
PlaySound('arty_firing')
result = False
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.FriendlyAction(artillery=True):
result = True
UpdateMapOverlay()
RenderEncounter()
if not result:
PopUp('No results from Friendly Artillery.')
# time passes
elif roll <= RANGES[2]:
PopUp('15 minutes of time has passed.')
campaign.SpendTime(0, 15)
WriteJournal('Time is now ' + str(campaign.hour) + ':' + str(campaign.minute).zfill(2))
RenderEncounter()
# harrasing fire
elif roll <= RANGES[3]:
PlaySound('german_rifle_fire')
PopUp('Small-arms fire peppers your tank, threatening any crew member not buttoned up.')
tank.LWAttack()
# enemy artillery
elif roll <= RANGES[4]:
PopUp('Enemy artillery fire rains down on your position.')
PlaySound('arty_firing')
ArtyStrikeAnimation(MAP_X0+MAP_CON_X, MAP_Y0+MAP_CON_Y)
result = Roll1D10()
if result <= 6:
num_ko = 1
elif result <= 9:
num_ko = 2
else:
num_ko = 3
PopUp(str(num_ko) + ' friendly infantry squads are destroyed.')
battle.inf_lost += num_ko
tank.LWAttack()
# enemy reinforcement
elif roll <= RANGES[5]:
# check for reinforcement roll
if battle.enemy_reinforcements > 0:
roll = Roll1D6()
if roll != 1 and roll + battle.enemy_reinforcements >= 7:
Message('No Random Event this round.')
return
battle.enemy_reinforcements += 1
Message('Enemy reinforcements have arrived.')
RenderEncounter()
SpawnEnemy()
UpdateMapOverlay()
RenderEncounter()
# mines
elif roll <= RANGES[6]:
if not tank.moving:
Message('No Random Event this round.')
else:
PopUp('Your battle group has moved into a minefield!')
tank.MinefieldAttack()
# display the menu bar on the main console
def DisplayMenuBar():
libtcod.console_set_default_foreground(con, libtcod.light_grey)
libtcod.console_set_alignment(con, libtcod.LEFT)
if campaign.day_in_progress:
libtcod.console_print(con, 1, 0, MENU_BAR1)
libtcod.console_print(con, 14, 0, MENU_BAR2)
libtcod.console_set_default_foreground(con, libtcod.white)
# screen rendering function for encounters, draws everything to the main console
# if zoom_in is True, game will display an animation effect zooming in on the encounter
# location
def RenderEncounter(no_flush=False, zoom_in=False):
# clear the display console
libtcod.console_clear(con)
# display menu bar
DisplayMenuBar()
# blit consoles to display console
libtcod.console_blit(date_con, 0, 0, DATE_CON_WIDTH, DATE_CON_HEIGHT, con, 1, 2)
libtcod.console_blit(tank_con, 0, 0, TANK_CON_WIDTH, TANK_CON_HEIGHT, con, 1, 4)
libtcod.console_blit(msg_con, 0, 0, MSG_CON_WIDTH, MSG_CON_HEIGHT, con, 1, TANK_CON_HEIGHT+5)
libtcod.console_blit(map_con, 0, 0, MAP_CON_WIDTH, MAP_CON_HEIGHT, con, MAP_CON_X, MAP_CON_Y)
# blit map overlay
libtcod.console_blit(overlay_con, 0, 0, MAP_CON_WIDTH, MAP_CON_HEIGHT, con,
MAP_CON_X, MAP_CON_Y)
libtcod.console_blit(map_info_con, 0, 0, MAP_INFO_CON_WIDTH, MAP_INFO_CON_HEIGHT,
con, MAP_CON_X, SCREEN_HEIGHT-MAP_INFO_CON_HEIGHT)
# lines between console displays
libtcod.console_hline(con, 1, 1, SCREEN_WIDTH-2, flag=libtcod.BKGND_DEFAULT)
libtcod.console_hline(con, 1, 3, TANK_CON_WIDTH, flag=libtcod.BKGND_DEFAULT)
libtcod.console_hline(con, 1, TANK_CON_HEIGHT+4, TANK_CON_WIDTH, flag=libtcod.BKGND_DEFAULT)
libtcod.console_hline(con, MAP_CON_X, MAP_CON_HEIGHT+2, MAP_CON_WIDTH, flag=libtcod.BKGND_DEFAULT)
libtcod.console_vline(con, MAP_CON_X-1, 2, SCREEN_HEIGHT-2, flag=libtcod.BKGND_DEFAULT)
# zoom in effect
if zoom_in and campaign.animations:
x = campaign.day_map.player_node.x+C_MAP_CON_X
y = campaign.day_map.player_node.y+4-campaign.c_map_y
for w in range(3, SCREEN_WIDTH, 2):
libtcod.console_blit(con, x-w, y-w, w*2, w*2, 0, x-w, y-w)
libtcod.console_print_frame(0, x-w, y-w, w*2, w*2,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_flush()
if x-w < 0 and x+w >= SCREEN_WIDTH: break
# blit full display console to screen and update screen
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
if not no_flush:
libtcod.console_flush()
# wait for player to press enter before continuing
def WaitForEnter():
end_pause = False
while not end_pause:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
elif key.vk == libtcod.KEY_ENTER:
end_pause = True
# screenshot
elif key.vk == libtcod.KEY_F6 or key.vk == libtcod.KEY_6:
SaveScreenshot()
# sound toggle
elif key.vk == libtcod.KEY_F7 or key.vk == libtcod.KEY_7:
campaign.sounds = not campaign.sounds
if campaign.sounds:
PopUp("Sound turned on")
else:
PopUp("Sound turned off")
# refresh the screen
libtcod.console_flush()
# wait for enter to be released
while libtcod.console_is_key_pressed(libtcod.KEY_ENTER):
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
libtcod.console_flush()
# wait for player to press space before continuing
def WaitForSpace():
end_pause = False
while not end_pause:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
elif key.vk == libtcod.KEY_SPACE:
end_pause = True
# screenshot
elif key.vk == libtcod.KEY_F6 or key.vk == libtcod.KEY_6:
SaveScreenshot()
# sound toggle
elif key.vk == libtcod.KEY_F7 or key.vk == libtcod.KEY_7:
campaign.sounds = not campaign.sounds
if campaign.sounds:
PopUp("Sound turned on")
else:
PopUp("Sound turned off")
# refresh the screen
libtcod.console_flush()
# wait for enter to be released
while libtcod.console_is_key_pressed(libtcod.KEY_SPACE):
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
libtcod.console_flush()
# wait for player to press space before continuing
def WaitForEscape():
end_pause = False
while not end_pause:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
elif key.vk == libtcod.KEY_ESCAPE:
end_pause = True
# screenshot
elif key.vk == libtcod.KEY_F6 or key.vk == libtcod.KEY_6:
SaveScreenshot()
# sound toggle
elif key.vk == libtcod.KEY_F7 or key.vk == libtcod.KEY_7:
campaign.sounds = not campaign.sounds
if campaign.sounds:
PopUp("Sound turned on")
else:
PopUp("Sound turned off")
# refresh the screen
libtcod.console_flush()
# wait for enter to be released
while libtcod.console_is_key_pressed(libtcod.KEY_ESCAPE):
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
libtcod.console_flush()
# save the game in progress
def SaveGame():
# don't save if campaign is over
if campaign.over:
return
# create a new SavedGameInfo class
name = ''
for crewman in tank.crew:
if crewman.position == 'Commander':
name = crewman.name
break
info = SavedGameInfo(VERSION, campaign.campaign_name, name, tank.name, campaign.GetDate())
save = shelve.open('savegame', 'n')
save['info'] = info
save['campaign'] = campaign
save['tank'] = tank
save['battle'] = battle
save.close
# load a saved game
def LoadGame():
global campaign, tank, battle
save = shelve.open('savegame')
campaign = save['campaign']
tank = save['tank']
battle = save['battle']
save.close()
# reset campaign calendar info from xml file
LoadCampaignInfo()
# load campaign info from xml file
def LoadCampaignInfo():
root = xml.parse(DATAPATH + campaign.campaign_file)
campaign.campaign_name = root.find('name').text
campaign.player_nation = root.find('player_nation').text
campaign.enemy_nation = root.find('enemy_nation').text
# set campaign variables for ranks and awards based on player nation
if campaign.player_nation == 'USA':
campaign.ranks = USA_RANKS
campaign.decorations = USA_DECORATIONS
elif campaign.player_nation == 'CAN':
campaign.ranks = UKC_RANKS
campaign.decorations = UKC_DECORATIONS
# load campaign map file info from campaign file
if root.find('campaign_map_file') is not None:
campaign.map_file = root.find('campaign_map_file').text
# build list of permitted player vehicle types
campaign.player_veh_list = []
veh_list = root.find('player_tanks').findall('player_tank_type')
for item in veh_list:
campaign.player_veh_list.append(item.text)
# load unit class activation chance info for each type of mission
campaign.mission_activations = []
CLASSES = ['TANK','SPG','AT_GUN','LW','MG','TRUCK','APC','AC']
item = root.find('activation_table')
for tag_name in ['advance', 'battle', 'counterattack']:
item2 = item.find(tag_name)
tuple_list = []
for class_name in CLASSES:
value = int(item2.find(class_name).text)
tuple_list.append((class_name, value))
campaign.mission_activations.append(tuple_list)
# load activation modifiers as list of dictionaries
item = root.find('activation_modifiers')
if item is not None:
for child in item.findall('modifier'):
dictionary = {}
dictionary['year'] = int(child.find('year').text)
dictionary['month'] = int(child.find('month').text)
dictionary['date'] = int(child.find('date').text)
dictionary['class_name'] = child.find('class_name').text
dictionary['mod'] = int(child.find('mod').text)
campaign.activation_modifiers.append(dictionary)
# load activation chance info for each unit class (out of 1000)
campaign.class_activations = []
item = root.find('unit_class_activations')
for tag_name in CLASSES:
unit_list = [tag_name]
item_list = item.findall(tag_name)
for unit_type in item_list:
reader = csv.reader([unit_type.text], delimiter=';', skipinitialspace=True, strict=True)
for row in reader:
unit_list.append((row[0], int(row[1])))
campaign.class_activations.append(unit_list)
# load calendar day info into campaign object
REQUIRED_KEYS = ['month', 'date', 'year', 'comment']
OPTIONAL_KEYS = ['resistance_level', 'mission', 'description', 'terrain',
'map_x', 'map_y']
campaign.days = []
item_list = root.find('calendar').findall('day')
for item in item_list:
day = {}
# go through required keys and get their values
for key in REQUIRED_KEYS:
value = item.find(key).text
# some need to be cast to integer values
if key in ['month', 'date', 'year']:
value = int(value)
# add the key and value
day[key] = value
# do the same for optional key/value pairs
for key in OPTIONAL_KEYS:
if item.find(key) is not None:
value = item.find(key).text
day[key] = value
# add the completed day entry to the campaign calendar
campaign.days.append(day)
# delete the parsed xml data; we've saved everything we need
del root
# load a console image from an .xp file
def LoadXP(filename):
filename = DATAPATH + filename
xp_file = gzip.open(filename)
raw_data = xp_file.read()
xp_file.close()
xp_data = xp_loader.load_xp_string(raw_data)
console = libtcod.console_new(xp_data['width'], xp_data['height'])
xp_loader.load_layer_to_console(console, xp_data['layer_data'][0])
return console
# open the highscores file and try to add this campaign's outcome
def AddHighScore():
try:
# load the existing highscores object from the bones file
save = shelve.open('bones')
bones = save['bones']
save.close()
# compose the outcome text to be added
for crew_member in tank.crew:
if crew_member.position == 'Commander':
break
if not crew_member.alive:
outcome = 'KIA on ' + campaign.GetDate()
elif crew_member.v_serious_wound:
outcome = 'Sent Home on ' + campaign.GetDate()
else:
outcome = 'Survived'
# add the new entry to the list of highscores
vp = campaign.vp + campaign.day_vp
bones.score_list.append((tank.name, crew_member.name, vp, outcome,
campaign.unlimited_tank_selection,
campaign.casual_commander,
campaign.campaign_name))
# sort the new list
bones.score_list.sort(key=lambda tup: tup[2], reverse=True)
# limit to max length
if len(bones.score_list) > MAX_HS:
del bones.score_list[-1]
# save the new bones file
save = shelve.open('bones')
save['bones'] = bones
save.close()
except:
print('ERROR: Could not open bones file.')
# save a screenshot of the current main console
def SaveScreenshot():
img = libtcod.image_from_console(0)
filename = 'screenshot_' + time.strftime("%Y_%m_%d_%H_%M_%S") + '.bmp'
libtcod.image_save(img, filename)
PlaySound('screenshot')
PopUp("Screenshot saved as: " + filename)
# display a crew speech box, either on the encounter or the campaign day map
def CrewTalk(message, position_list=None):
# build a list of possible crew members to speak; commander never speaks
crew_list = []
for crewman in tank.crew:
if crewman.position == 'Commander': continue
if not crewman.alive or crewman.stunned or crewman.unconscious:
continue
if position_list is not None:
if crewman.position not in position_list: continue
crew_list.append(crewman)
if len(crew_list) == 0:
return
# select the crewman to speak
crewman = random.choice(crew_list)
# determine draw location,
if battle is None:
x = campaign.day_map.player_node.x+C_MAP_CON_X
y = campaign.day_map.player_node.y+3-campaign.c_map_y
else:
x = MAP_X0 + MAP_CON_X
y = MAP_Y0 + MAP_CON_Y
ShowLabel(x, y, message, crewman=crewman)
# re-draw original console to screen
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
# display a pop-up info window
# if confirm, we want a confirmation from the player
def PopUp(message, confirm=False, skip_update=False):
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
# draw message window to screen
lines = wrap(message, 30)
y = 25
# draw background box
libtcod.console_print_frame(0, SCREEN_XM-17, y-2, 34, len(lines)+6,
clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]'%(libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue'%HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y+1, libtcod.BKGND_NONE, libtcod.CENTER, text)
# wait for input
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
else:
if key.vk == libtcod.KEY_ENTER:
exit_menu = True
# update screen
libtcod.console_flush()
# play menu sound
#PlaySound('menu_select')
# if we don't want to redraw the battle or campaign screen
if skip_update: return choice
# redraw the console but don't refresh screen, in case we will show another pop-up
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
# checks if we need to set spot sectors for one or more crewmen
def CheckSpotSectors():
# first check that there are one or more enemy units that could be spotted or
# identified
none_to_spot = True
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.hidden: continue
if unit.spotted and unit.identified: continue
if unit.spotted and unit.unit_class not in ['TANK', 'SPG', 'AT_GUN']: continue
if unit.map_hex.rng > 0 and (campaign.weather.fog or campaign.weather.precip == 'Snow'): continue
none_to_spot = False
break
if none_to_spot: return False
# spotting could happen, so check to see if any crew need to set a spot sector
for crew_member in tank.crew:
if crew_member.spot == 'Any One Sector':
return True
# spotting could happen, but no crew members need to have their sector set
return False
################################################################################
# Encounter Handling #
################################################################################
# display encounter menu
def EncounterMenu():
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
# generate and display menu
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 1,
libtcod.BKGND_NONE, libtcod.CENTER, 'Encounter Menu')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# campaign is over
if campaign.exiting:
libtcod.console_print_ex(menu_con, MENU_CON_XM, 3,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cEnter%c] Return to Main Menu'%HIGHLIGHT)
# scenario is resolved
elif battle.result != 'Undetermined':
libtcod.console_print_ex(menu_con, MENU_CON_XM, 3,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cEnter%c] Return to Campaign Map'%HIGHLIGHT)
# scenario continues
else:
libtcod.console_print_ex(menu_con, MENU_CON_XM, 3,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cEnter%c] Return to Game'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 4,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cQ%c] Save Game and Quit'%HIGHLIGHT)
# display enemy units destroyed
x = 36
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print(menu_con, x, 7, ' Destroyed by Forced off')
libtcod.console_print(menu_con, x, 8, 'Enemy Forces VP Value Player Friendly Forces Encounter Map')
libtcod.console_print(menu_con, x, 9, '-----------------------------------------------------------------------------------')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, x, 10, 'LW and MG Squads 1')
libtcod.console_print(menu_con, x, 11, 'Trucks 1')
libtcod.console_print(menu_con, x, 12, 'APC or Armoured Car 2')
libtcod.console_print(menu_con, x, 13, 'Self-Propelled Guns 6')
libtcod.console_print(menu_con, x, 14, 'Panzer IV 7')
libtcod.console_print(menu_con, x, 15, 'Panzer V (Panther) 9')
libtcod.console_print(menu_con, x, 16, 'Panzer VI (Tiger) 12')
libtcod.console_print(menu_con, x, 17, 'Anti-Tank Gun 4')
libtcod.console_set_alignment(menu_con, libtcod.RIGHT)
VP_SCORES = [1, 1, 2, 6, 7, 9, 12, 4]
# destroyed by player
y = 10
player_vp = 0
n = 0
for num in battle.tank_ko_record:
libtcod.console_print(menu_con, x+40, y, str(num))
player_vp += (num * VP_SCORES[n])
y += 1
n += 1
# destroyed by friendly forces
y = 10
friendly_vp = 0
n = 0
for num in battle.friendly_ko_record:
libtcod.console_print(menu_con, x+54, y, str(num))
friendly_vp += (num * VP_SCORES[n])
y += 1
n += 1
# left behind by player movement
y = 10
left_behind_vp = 0
n = 0
for num in battle.left_behind:
libtcod.console_print(menu_con, x+72, y, str(num))
left_behind_vp -= int(num * VP_SCORES[n] / 2)
y += 1
n += 1
# Victory Point Totals
libtcod.console_print(menu_con, x+40, 19, str(player_vp))
libtcod.console_print(menu_con, x+54, 19, str(friendly_vp))
libtcod.console_print(menu_con, x+72, 19, str(left_behind_vp))
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_print(menu_con, x, 19, 'Totals:')
# display friendly forces lost
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print(menu_con, x, 22, 'Friendly Forces VP Value Lost')
libtcod.console_print(menu_con, x, 23, '------------------------------------------')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, x, 24, 'Tanks -5')
libtcod.console_print(menu_con, x, 25, 'Infantry Squads -3')
libtcod.console_set_alignment(menu_con, libtcod.RIGHT)
libtcod.console_print(menu_con, x+40, 24, str(battle.tanks_lost))
libtcod.console_print(menu_con, x+40, 25, str(battle.inf_lost))
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
# display present VP total
vp_total = player_vp + friendly_vp + left_behind_vp - (battle.tanks_lost * 5) - (battle.inf_lost * 3)
libtcod.console_print(menu_con, MENU_CON_XM, 28, 'Encounter VP Total: ' + str(vp_total))
# also display campaign day and overall VP
text = 'Campaign Day VP: ' + str(campaign.day_vp)
libtcod.console_print(menu_con, MENU_CON_XM, 30, text)
text = 'Total Campaign VP: ' + str(campaign.vp)
libtcod.console_print(menu_con, MENU_CON_XM, 31, text)
# record in case we are leaving encounter
battle.vp_total = vp_total
# Encounter Result: Undetermined / Victory / Tank Lost
libtcod.console_print(menu_con, MENU_CON_XM, 35, 'Encounter Result: ' + battle.result)
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
exit_menu = False
while not exit_menu:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
if key.vk == libtcod.KEY_ENTER: break
# get pressed key
key_char = chr(key.c)
if battle.result == 'Undetermined' and key_char in ['q', 'Q']:
SaveGame()
campaign.exiting = True
libtcod.console_clear(con)
return
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
libtcod.console_flush()
# re-draw screen
RenderEncounter()
# set a new phase for the encounter, update phase display title on encounter map,
# and update tank console to reflect new phase
def NewPhase(new_phase):
battle.phase = new_phase
UpdateMapOverlay()
UpdateTankCon()
RenderEncounter()
# main loop for battle encounter
def DoEncounter():
global battle, key, mouse
# get input and perform events
exit_encounter = False
while not exit_encounter:
# trigger encounter end
if battle.result != 'Undetermined':
# display scenario menu for battle overview if campaign is not over
if not campaign.over:
EncounterMenu()
break
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
# check to see if player is exiting out of the campaign
elif campaign.exiting:
battle = None
return
libtcod.console_flush()
GetEncounterInput()
# award or subtract VP
campaign.day_vp += battle.vp_total
if battle.vp_total < 0:
text = 'You have lost ' + str(abs(battle.vp_total))
elif battle.vp_total == 0:
text = 'You gain no'
else:
text = 'You are awarded ' + str(battle.vp_total)
text += ' VP for this encounter.'
battle = None
Message(text)
# get input and do encounter actions
def GetEncounterInput():
# check for keyboard or mouse input
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# mouse stuff first
mx, my = mouse.cx, mouse.cy
# check mouse position against last recorded one
if (mx, my) != battle.mouseover:
battle.mouseover = (mx, my)
# update map info console
UpdateMapInfoCon(mx, my)
RenderEncounter()
# see if right mouse button was clicked
if mouse.rbutton:
UnitInfo(mx, my)
RenderEncounter()
# if ESCAPE key is pressed, open encounter menu
if key.vk == libtcod.KEY_ESCAPE:
EncounterMenu()
# help display
elif key.vk == libtcod.KEY_F1 or key.vk == libtcod.KEY_1:
ShowHelp()
# tank info display
elif key.vk == libtcod.KEY_F2 or key.vk == libtcod.KEY_2:
ShowTankInfo()
# crew info display
elif key.vk == libtcod.KEY_F3 or key.vk == libtcod.KEY_3:
ShowCrewInfo()
# settings
elif key.vk == libtcod.KEY_F4 or key.vk == libtcod.KEY_4:
ShowSettings()
# campaign stats
elif key.vk == libtcod.KEY_F5 or key.vk == libtcod.KEY_5:
ShowCampaignStats()
# screenshot
elif key.vk == libtcod.KEY_F6 or key.vk == libtcod.KEY_6:
SaveScreenshot()
# sound toggle
elif key.vk == libtcod.KEY_F7 or key.vk == libtcod.KEY_7:
campaign.sounds = not campaign.sounds
if campaign.sounds:
PopUp("Sound turned on")
else:
PopUp("Sound turned off")
# backspace key can cancel issue order input mode
elif key.vk == libtcod.KEY_BACKSPACE:
if battle.phase == 'Issue Order':
battle.selected_order = None # clear any selected order
NewPhase('Orders')
# if the END or SPACE BAR keys are pressed, the game shifts to the next phase
# can also be trigged by game
if key.vk == libtcod.KEY_END or key.vk == libtcod.KEY_SPACE or battle.trigger_phase:
battle.trigger_phase = False
# end of set spot sectors sub-phase, do spotting then orders phase
if battle.phase == 'Set Spot Sectors':
NewPhase('Spotting')
DoSpotting()
NewPhase('Orders')
SaveGame()
return
# end of Orders phase, start of Crew Action phase
if battle.phase == 'Orders':
# check for abandoning the tank
for crewman in tank.crew:
if crewman.order == 'Abandon Tank':
text = ('You are about to abandon your tank. It ' +
'will be destroyed. Are you certain?')
if PopUp(text, confirm=True):
tank.AbandonTank()
return
# Change Gun Load
crew_member = GetCrewByPosition('Loader')
if crew_member.order == 'Change Gun Load':
tank.ChangeGunLoadMenu()
UpdateTankCon()
RenderEncounter()
SaveGame()
if SetupPivot():
NewPhase('Pivot Tank')
SaveGame()
return
else:
# set this but only so that the next if statement is
# immediately triggered
battle.phase = 'Pivot Tank'
if battle.phase == 'Pivot Tank':
# check for pivot order
crew_member = GetCrewByPosition('Driver')
if crew_member.order == 'Pivot Tank':
if tank.new_facing != 4:
PivotTank()
else:
# if pivot order was given but tank did not pivot,
# any moving status from a previous turn is lost
if tank.moving:
tank.moving = False
UpdateTankCon()
# check for player tank movement
NewPhase('Tank Movement')
MoveTank()
# check for rotating turret
if SetupRotateTurret():
NewPhase('Rotate Turret')
SaveGame()
return
else:
battle.phase = 'Rotate Turret'
if battle.phase == 'Rotate Turret':
# clear old facing for turret
tank.old_t_facing = tank.turret_facing
UpdateMapOverlay()
# check for firing main gun, or attempt repairing it
if SetupMainGun():
UpdateMapOverlay()
RenderEncounter()
SaveGame()
return
else:
battle.phase = 'Fire Main Gun'
if battle.phase == 'Fire Main Gun':
# finish up main gun sub-phase
# clear any target, clear RoF flag
battle.target = None
tank.has_rof = False
UpdateMapOverlay()
RenderEncounter()
# resolve any outstanding hits on alive enemy units
for unit in battle.enemy_units:
if not unit.alive: continue
if len(unit.hit_record) > 0:
unit.ResolveHits()
# check for firing MGs
if SetupFireMGs():
SaveGame()
return
else:
battle.phase = 'Fire MGs'
if battle.phase == 'Fire MGs':
# clear any target
battle.target = None
# throw smoke grenade phase
NewPhase('Smoke Grenades')
HandleSmokeGrenades()
# fire smoke mortar
NewPhase('Smoke Mortar')
HandleSmokeMortar()
# check for restocking ready rack
if SetupReadyRack():
PopUp('Loader may move shells into or out of the Ready Rack')
MainGunAmmoMenu()
RenderEncounter()
##### End of Action Phase, crew has chance to recover #####
NewPhase('Crew Recovery')
for crewman in tank.crew:
crewman.RecoveryRoll()
# if we got an ambush, skip this section but reset flag
if battle.friendly_ambush:
battle.friendly_ambush = False
else:
##### Enemy Action #####
NewPhase('Enemy Action')
# do an action for each active enemy unit on the board
for unit in battle.enemy_units:
if not unit.alive: continue
unit.DoAction()
UpdateMapOverlay()
RenderEncounter()
# check to see if tank has been knocked out by enemy action
if battle.result != 'Undetermined':
return
##### Friendly Action #####
# Skip if no alive enemy units
if not AllEnemiesDead():
NewPhase('Friendly Action')
# do an action for each active enemy unit on the board
result = False
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.FriendlyAction():
result = True
UpdateMapOverlay()
RenderEncounter()
if not result:
PopUp('No results from Friendly Action.')
##### Random Events #####
NewPhase('Random Events')
RandomEvent()
##### Repair Attempts #####
NewPhase('Attempt Repairs')
AttemptRepairs()
# check to trigger scenario loss
if not tank.alive:
return
# check to trigger scenario victory
if AllEnemiesDead():
Message('You have won the encounter!')
RenderEncounter()
battle.result = 'Victory'
return
##### Start of New Encounter Turn #####
battle.rounds_passed += 1
# reset the player tank for a new turn
tank.Reset()
# reset enemy units for new turn
for unit in battle.enemy_units:
if not unit.alive: continue
unit.Reset()
# reset Battle Leadership effect
if battle.battle_leadership:
Message('Battle Leadership no longer in effect.')
battle.battle_leadership = False
# test for new battle leadership effect
if GetCrewByPosition('Commander').SkillCheck('Battle Leadership'):
Message('Battle Leadership now in effect.')
battle.battle_leadership = True
# deplete smoke
NewPhase('Smoke Depletion')
DepleteSmoke()
CalculateSmokeFactors()
# rebuild list of orders and set spot ability for crew
for crewman in tank.crew:
crewman.BuildOrdersList()
crewman.SetSpotAbility()
# see if spot sectors need to be selected
if CheckSpotSectors():
# select first crew that can choose one spot sector
for crewman in tank.crew:
if crewman.spot == 'Any One Sector':
battle.selected_crew = crewman
break
NewPhase('Set Spot Sectors')
else:
# do spotting phase, then orders
DoSpotting()
NewPhase('Orders')
SaveGame()
# get pressed key
key_char = chr(key.c)
# debug commands
if DEBUG:
# knock out player tank
if key_char == 'k' and (key.lctrl or key.rctrl):
tank.alive = False
battle.result = 'Tank Lost'
RenderEncounter()
return
# win encounter
elif key_char == 'v' and (key.lctrl or key.rctrl):
PopUp('You have won the encounter!')
RenderEncounter()
battle.result = 'Victory'
return
# immobilize tank
elif key_char == 'i' and (key.lctrl or key.rctrl):
PopUp('Your tank is immobilized!')
tank.moving = False
tank.immobilized = True
UpdateTankCon()
RenderEncounter()
return
# apply a minor damage result
elif key_char == 'd' and (key.lctrl or key.rctrl):
tank.TakeDamage()
RenderEncounter()
return
# select spot sector mode
if battle.phase == 'Set Spot Sectors':
# select next crew member that can select a spot sector
if key_char in ['s', 'S'] or key.vk == libtcod.KEY_DOWN:
battle.selected_crew = battle.selected_crew.next
while battle.selected_crew.spot != 'Any One Sector':
battle.selected_crew = battle.selected_crew.next
UpdateTankCon()
UpdateMapOverlay()
RenderEncounter()
# select previous crew member that can select a spot sector
elif key_char in ['w', 'W'] or key.vk == libtcod.KEY_UP:
battle.selected_crew = battle.selected_crew.prev
while battle.selected_crew.spot != 'Any One Sector':
battle.selected_crew = battle.selected_crew.prev
UpdateTankCon()
UpdateMapOverlay()
RenderEncounter()
# rotate selected sector clockwise
elif key_char in ['d', 'D'] or key.vk == libtcod.KEY_RIGHT:
battle.selected_crew.spot_sector += 1
if battle.selected_crew.spot_sector > 5:
battle.selected_crew.spot_sector = 0
UpdateMapOverlay()
RenderEncounter()
# rotate selected sector counter clockwise
elif key_char in ['a', 'A'] or key.vk == libtcod.KEY_LEFT:
battle.selected_crew.spot_sector -= 1
if battle.selected_crew.spot_sector < 0:
battle.selected_crew.spot_sector = 5
UpdateMapOverlay()
RenderEncounter()
# orders input mode - can select crew, toggle hatch state, change ammo options,
# and switch to 'Issue Order' input mode
elif battle.phase == 'Orders':
# select next crew member
if key_char in ['s', 'S'] or key.vk == libtcod.KEY_DOWN:
battle.selected_crew = battle.selected_crew.next
UpdateTankCon()
RenderEncounter()
# select previous crew member
elif key_char in ['w', 'W'] or key.vk == libtcod.KEY_UP:
battle.selected_crew = battle.selected_crew.prev
UpdateTankCon()
RenderEncounter()
# toggle hatch status for selected crew member
elif key_char in ['h', 'H']:
tank.ToggleHatch(battle.selected_crew)
UpdateTankCon()
RenderEncounter()
# issue an order to selected crew member from a menu
elif key_char in ['o', 'O']:
# only allow if a valid crew member is selected
if battle.selected_crew is None: return
if battle.selected_crew.NoActions(): return
NewPhase('Issue Order')
# cycle through ammo reload selections
elif key_char in ['r', 'R']:
tank.CycleReload()
UpdateTankCon()
RenderEncounter()
# toggle use of ready rack
elif key_char in ['t', 'T']:
tank.use_rr = not tank.use_rr
UpdateTankCon()
RenderEncounter()
# switch to another order for selected crewman
elif key_char in ['a', 'd'] or key.vk in [libtcod.KEY_LEFT, libtcod.KEY_RIGHT]:
# only allow if a valid crew member is selected
if battle.selected_crew is None: return
if battle.selected_crew.NoActions(): return
# find current order in order list
n = 0
for order in battle.selected_crew.orders_list:
if order.name == battle.selected_crew.order:
break
n += 1
# find the previous or next one
if key_char in ['d', 'D'] or key.vk == libtcod.KEY_RIGHT:
if n == len(battle.selected_crew.orders_list) - 1:
battle.selected_crew.order = battle.selected_crew.orders_list[0].name
else:
battle.selected_crew.order = battle.selected_crew.orders_list[n+1].name
else:
if n == 0:
battle.selected_crew.order = battle.selected_crew.orders_list[len(battle.selected_crew.orders_list) - 1].name
else:
battle.selected_crew.order = battle.selected_crew.orders_list[n-1].name
# reset spot ability to reflect new order
battle.selected_crew.SetSpotAbility()
UpdateTankCon()
RenderEncounter()
# debug: wound selected crew member
if DEBUG:
if key_char == 'w' and (key.lctrl or key.rctrl):
if battle.selected_crew is None: return
text = battle.selected_crew.TakeWound(None, None)
UpdateTankCon()
RenderEncounter()
if text is not None:
PopUp(battle.selected_crew.name + ' is wounded! Result: ' + text)
# generate crew report
elif key_char == 'c' and (key.lctrl or key.rctrl):
if battle.selected_crew is None: return
text = battle.selected_crew.GenerateReport()
for line in text:
WriteJournal(line)
Message('DEBUG: Report added to campaign journal')
# issue order input mode
elif battle.phase == 'Issue Order':
# select previous order in list
if key_char in ['w', 'W'] or key.vk == libtcod.KEY_UP:
if battle.selected_order > 0:
battle.selected_order -= 1
else:
battle.selected_order = len(battle.selected_crew.orders_list)-1
UpdateTankCon()
RenderEncounter()
# select next order in list
elif key_char in ['s', 'S'] or key.vk == libtcod.KEY_DOWN:
if len(battle.selected_crew.orders_list) > battle.selected_order + 1:
battle.selected_order += 1
else:
battle.selected_order = 0
UpdateTankCon()
RenderEncounter()
# issue selected order
elif key_char in ['o', 'O']:
battle.selected_crew.order = battle.selected_crew.orders_list[battle.selected_order].name
# reset spot ability to reflect new order
battle.selected_crew.SetSpotAbility()
Message(battle.selected_crew.name + ' now on ' + battle.selected_crew.order + ' order.')
battle.selected_order = None # clear any selected order
NewPhase('Orders')
# pivot tank mode
elif battle.phase == 'Pivot Tank':
if key_char in ['d', 'D'] or key.vk == libtcod.KEY_RIGHT: # pivot clockwise
if tank.new_facing == 5:
tank.new_facing = 0
else:
tank.new_facing += 1
UpdateMapOverlay()
RenderEncounter()
elif key_char in ['a', 'A'] or key.vk == libtcod.KEY_LEFT: # pivot counter clockwise
if tank.new_facing == 0:
tank.new_facing = 5
else:
tank.new_facing -= 1
UpdateMapOverlay()
RenderEncounter()
elif key.vk == libtcod.KEY_ENTER: # commit pivot
battle.trigger_phase = True
# rotate turret mode
elif battle.phase == 'Rotate Turret':
if key_char in ['d', 'D'] or key.vk == libtcod.KEY_RIGHT: # rotate clockwise
RotateTurret(True)
elif key_char in ['a', 'A'] or key.vk == libtcod.KEY_LEFT: # rotate counter clockwise
RotateTurret(False)
elif key.vk == libtcod.KEY_ENTER: # commit rotation
battle.trigger_phase = True
# main gun firing mode
elif battle.phase == 'Fire Main Gun':
# if not holding RoF, can rotate turret or select a different target
if not tank.has_rof:
if key_char in ['d', 'D'] or key.vk == libtcod.KEY_RIGHT: # rotate clockwise
RotateTurret(True)
elif key_char in ['a', 'A'] or key.vk == libtcod.KEY_LEFT: # rotate counter clockwise
RotateTurret(False)
# switch firing mode if HE loaded
elif key_char in ['f', 'F'] and tank.ammo_load == 'HE':
battle.area_fire = not battle.area_fire
UpdateTankCon()
RenderEncounter()
# select next target
if key.vk == libtcod.KEY_TAB:
SelectNextTarget()
UpdateMapOverlay()
RenderEncounter()
# fire gun!
if key.vk == libtcod.KEY_ENTER:
FireMainGun()
# cycle through ammo reload selections
elif key_char in ['r', 'R']:
tank.CycleReload()
UpdateTankCon()
RenderEncounter()
# toggle use of ready rack
elif key_char in ['t', 'T']:
tank.use_rr = not tank.use_rr
UpdateTankCon()
RenderEncounter()
# firing MGs
elif battle.phase == 'Fire MGs':
# if co-ax can fire, can rotate turret
if tank.coax_mg_can_fire:
if key_char in ['d', 'D'] or key.vk == libtcod.KEY_RIGHT: # rotate clockwise
RotateTurret(True)
elif key_char in ['a', 'A'] or key.vk == libtcod.KEY_LEFT: # rotate counter clockwise
RotateTurret(False)
# activate a different MG
if tank.coax_mg_can_fire or tank.bow_mg_can_fire or tank.aa_mg_can_fire:
if key_char in ['m', 'M']:
if tank.active_mg == 0:
if tank.bow_mg_can_fire:
tank.active_mg = 1
elif tank.aa_mg_can_fire:
tank.active_mg = 2
elif tank.active_mg == 1:
if tank.aa_mg_can_fire:
tank.active_mg = 2
elif tank.coax_mg_can_fire:
tank.active_mg = 0
elif tank.active_mg == 2:
if tank.coax_mg_can_fire:
tank.active_mg = 0
elif tank.bow_mg_can_fire:
tank.active_mg = 1
UpdateMapOverlay()
RenderEncounter()
SelectNextTarget()
# select next target
if key.vk == libtcod.KEY_TAB:
SelectNextTarget()
UpdateMapOverlay()
RenderEncounter()
# fire an MG
elif key.vk == libtcod.KEY_ENTER:
FireMG()
# no more MGs can fire
if tank.active_mg == -1:
battle.trigger_phase = True
libtcod.console_flush()
################################################################################
# Set up and Handle an Encounter #
################################################################################
# starts up or loads and continues a battle encounter
def InitEncounter(load=False, counterattack=False, res_level=None):
global battle
# loading a battle in progress
if load:
# find the selected crewman: since pointer is saved, it's pointing to a
# now non-existing object
for crew_member in tank.crew:
if crew_member.name == battle.selected_crew.name:
battle.selected_crew = crew_member
break
# draw consoles for first time
UpdateDateCon()
UpdateTankCon()
UpdateMsgCon()
PaintMapCon()
UpdateMapOverlay()
UpdateMapInfoCon(0,0) # give 0, 0 for mouse position
RenderEncounter()
else:
# set up battle object
battle = Battle(counterattack=counterattack, res_level=res_level)
# roll on deployment table for player tank status
tank.SetDeployment()
# set up initial list of orders for crew, also set their initial spot ability
for crewman in tank.crew:
crewman.BuildOrdersList()
crewman.SetSpotAbility()
# draw encounter consoles for first time
PaintMapCon()
UpdateMapOverlay()
UpdateMapInfoCon(0,0) # give 0, 0 for mouse position
UpdateTankCon()
UpdateMsgCon()
UpdateDateCon()
# first time we're showing the encounter console, so use a zoom-in effect
RenderEncounter(zoom_in=True)
Message('Encounter begins!')
# activate enemy units and draw screen
ActivateEnemies()
RenderEncounter()
# do artillery / air strike if any
if campaign.day_map.player_node.arty_strike:
# reset flag
campaign.day_map.player_node.arty_strike = False
PopUp('Friendly forces conduct artillery fire against the enemy.')
result = False
# play sound effects
PlaySound('arty_firing')
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.FriendlyAction(artillery=True):
result = True
UpdateMapOverlay()
RenderEncounter()
if not result:
PopUp('No results from Friendly Artillery.')
elif campaign.day_map.player_node.air_strike:
# reset flag
campaign.day_map.player_node.air_strike = False
PopUp('Friendly forces conduct an air strike against the enemy.')
result = False
for unit in battle.enemy_units:
if not unit.alive: continue
if unit.FriendlyAction(air_strike=True):
result = True
UpdateMapOverlay()
RenderEncounter()
if not result:
PopUp('No results from air strike.')
# advancing fire results
if campaign.day_map.player_node.advancing_fire:
# reset flag
campaign.day_map.player_node.advancing_fire = False
PopUp('You use advancing fire to attack the enemy.')
result = False
# sound effects played before advancing fire resolution
soundfile = GetFiringSound(tank.stats['main_gun'])
if soundfile is not None:
PlaySound(soundfile)
Wait(300)
PlaySound(soundfile)
Wait(300)
PlaySound(soundfile)
for unit in battle.enemy_units:
if not unit.alive:
continue
if unit.FriendlyAction(advance_fire=True):
result = True
UpdateMapOverlay()
RenderEncounter()
if not result:
PopUp('No results from advancing fire.')
# select first crew by default
battle.selected_crew = tank.crew[0]
# check to make sure there's at least one enemy left alive after initial
# attacks
if not AllEnemiesDead():
# do ambush roll
roll = Roll1D10()
# apply weather modifiers
if campaign.weather.fog or campaign.weather.precip != 'None':
roll -= 1
# terrain modifiers
if campaign.day_map.player_node.node_type == 'F': # bocage
roll -= 2
# check for keen senses skill
crew_member = GetCrewByPosition('Commander')
if crew_member.SkillCheck('Keen Senses'):
if campaign.scen_type != 'Counterattack' and not battle.counterattack:
roll += 2
else:
roll -= 2
# ambush occurs if roll <= 7
if roll <= 7:
# counterattack ambush!
if campaign.scen_type == 'Counterattack' or battle.counterattack:
PopUp('Your forces have ambushed the enemy!')
battle.friendly_ambush = True
else:
PopUp('Your tank has been ambushed! Enemy gets first attack.')
##### Enemy Action #####
NewPhase('Enemy Action')
for unit in battle.enemy_units:
if not unit.alive: continue
unit.DoAction(ambush=True)
UpdateMapOverlay()
RenderEncounter()
# check to see if tank has been knocked out by enemy action
# or if commander has been taken out
if battle.result != 'Undetermined' or campaign.over:
if not campaign.over:
EncounterMenu()
return
##### Random Events #####
NewPhase('Random Events')
RandomEvent()
else:
PopUp('Enemy units are caught off guard, you have first attack.')
# set spot ability for crew
for crew_member in tank.crew:
crew_member.SetSpotAbility()
# see if we need to set spot sectors for one or more crewmen
if CheckSpotSectors():
# select first crew that can choose one spot sector
for crew_member in tank.crew:
if crew_member.spot == 'Any One Sector':
battle.selected_crew = crew_member
break
NewPhase('Set Spot Sectors')
else:
# do initial spotting phase
DoSpotting()
# next phase is orders
NewPhase('Orders')
# if all enemies dead
else:
PopUp('Your initial attack has destroyed all enemy forces!')
# skip right to orders phase
NewPhase('Orders')
SaveGame()
# start the encounter handler
DoEncounter()
##########################################################################################
# Campaign Functions #
##########################################################################################
# draw the campaign map onto the console
# done when a new map is generated, or a saved game is loaded
def PaintCampaignMap():
libtcod.console_clear(c_map_con)
# create the RNG based on the saved seed
rng = libtcod.random_new_from_seed(campaign.day_map.seed)
##### Determine colour scheme to use based on season and current ground cover
# Field color, Woods ground color, Coniferous tree, Deciduous tree,
# village ground, marsh ground
LATE_SUMMER_TO_MID_AUTUMN = [(140,110,16), (16,60,16), (80,110,80), (80,110,80),
(60,90,60), (60,30,10)]
MID_TO_LATE_AUTUMN = [(120,90,16), (16,60,16), (80,110,80), (200,80,20),
(60,90,60), (60,30,10)]
EDGE_OF_WINTER = [(110,90,45), (70,45,15), (80,110,80), (80,50,30),
(80,70,45), (60,90,45)]
WINTER_OR_GROUND_SNOW = [(240,240,240), (240,240,240), (80,110,80), (80,50,30),
(240,240,240), (240,240,240)]
SPRING = [(110,170,110), (36,80,36), (80,110,80), (230,135,210),
(60,90,60), (60,30,10)]
SUMMER = [(90,150,90), (16,60,16), (80,110,80), (80,110,80),
(60,90,60), (60,30,10)]
# snow ground automatically means winter colours
if campaign.weather.ground in ['Snow', 'Deep Snow']:
color_scheme = WINTER_OR_GROUND_SNOW
campaign.color_scheme = 'WINTER_OR_GROUND_SNOW'
# late autumn, winter with no snow, or early spring
elif campaign.current_date[1] in [11, 12, 1, 2, 3]:
color_scheme = EDGE_OF_WINTER
campaign.color_scheme = 'EDGE_OF_WINTER'
# spring
elif campaign.current_date[1] in [4, 5]:
color_scheme = SPRING
campaign.color_scheme = 'SPRING'
# summer
elif campaign.current_date[1] in [6, 7]:
color_scheme = SUMMER
campaign.color_scheme = 'SUMMER'
# late summer to mid autumn
elif campaign.current_date[1] in [8, 9]:
color_scheme = LATE_SUMMER_TO_MID_AUTUMN
campaign.color_scheme = 'LATE_SUMMER_TO_MID_AUTUMN'
# autumn
else:
color_scheme = MID_TO_LATE_AUTUMN
campaign.color_scheme = 'MID_TO_LATE_AUTUMN'
##### Paint base display characters for each coordinate #####
for y in range(0, C_MAP_CON_HEIGHT):
for x in range (0, C_MAP_CON_WIDTH):
parent_node = campaign.day_map.char_locations[(x,y)]
# Fields and Farm Buildings, Fields, Bocage base
if parent_node.node_type in ['A', 'B', 'F']:
c_mod = libtcod.random_get_int(rng, -3, 7)
(r,g,b) = color_scheme[0]
bc = libtcod.Color(r+c_mod, g+c_mod, b+c_mod)
fc = libtcod.black
display_char = 0
# if this is an A area, chance of there being a farm
# building here instead
if parent_node.node_type == 'A':
if libtcod.random_get_int(rng, 1, 50) == 1:
bc = libtcod.grey
fc = libtcod.light_grey
display_char = 179
# woods
elif parent_node.node_type == 'D':
c_mod = libtcod.random_get_int(rng, -5, 10)
(r,g,b) = color_scheme[1]
bc = libtcod.Color(r+c_mod, g+c_mod, b+c_mod)
fc = libtcod.black
display_char = 0
# chance of a tree greeble
if libtcod.random_get_int(rng, 1, 10) > 6:
c_mod = libtcod.random_get_int(rng, -20, 20)
if libtcod.random_get_int(rng, 1, 8) == 1:
display_char = libtcod.CHAR_ARROW2_N
(r,g,b) = color_scheme[2]
fc = libtcod.Color(r+c_mod, g+c_mod, b+c_mod)
else:
display_char = libtcod.CHAR_SPADE
(r,g,b) = color_scheme[3]
fc = libtcod.Color(r+c_mod, g+c_mod, b+c_mod)
# villages
elif parent_node.node_type == 'C':
c_mod = libtcod.random_get_int(rng, -5, 10)
(r,g,b) = color_scheme[4]
bc = libtcod.Color(r+c_mod, g+c_mod, b+c_mod)
fc = libtcod.black
display_char = 0
# if within village building radius, chance of a building here
dist = GetDistance(x, y, parent_node.x, parent_node.y)
if dist <= parent_node.village_radius:
chance = int(100.0 * (float(dist) / float(parent_node.village_radius)))
if libtcod.random_get_int(rng, 1, 120) >= chance:
# dirt background
c_mod = libtcod.random_get_int(rng, -5, 10)
bc = libtcod.Color(80+c_mod, 50+c_mod, 30+c_mod)
# possible building building or dirt
if libtcod.random_get_int(rng, 1, 3) == 3:
fc = libtcod.light_grey
display_char = 254
# marshland
else:
if libtcod.random_get_int(rng, 1, 3) <= 2:
c_mod = libtcod.random_get_int(rng, -5, 10)
bc = libtcod.Color(10+c_mod, 30+c_mod, 60+c_mod)
else:
c_mod = libtcod.random_get_int(rng, -5, 10)
(r,g,b) = color_scheme[4]
bc = libtcod.Color(r+c_mod, g+c_mod, b+c_mod)
fc = libtcod.black
display_char = 0
# possible greeble
if libtcod.random_get_int(rng, 1, 8) == 1:
if libtcod.random_get_int(rng, 1, 5) == 1:
c_mod = libtcod.random_get_int(rng, -20, 20)
# use deciduous tree colour
(r,g,b) = color_scheme[3]
fc = libtcod.Color(r+c_mod, g+c_mod, b+c_mod)
if libtcod.random_get_int(rng, 1, 8) == 1:
display_char = libtcod.CHAR_ARROW2_N
else:
display_char = libtcod.CHAR_SPADE
else:
c_mod = libtcod.random_get_int(rng, -5, 10)
fc = libtcod.Color(16, 60+c_mod, 16)
display_char = 19
# if this is an edge coordinate, set a little darker
if (x,y) in parent_node.edges:
bc = bc * libtcod.lighter_grey
fc = fc * libtcod.lighter_grey
# paint the char
libtcod.console_put_char_ex(c_map_con, x, y, display_char, fc, bc)
##### Build Improved Roads #####
# attempt to generate an improved road linking two nodes
def GenerateRoad(node1, node2, dirt=False):
# get path if possible, and link nodes together
path = GetPath(node1, node2)
if path != []:
lastnode = node1
for node in path:
if not dirt:
lastnode.stone_road_links.append(node)
node.stone_road_links.append(lastnode)
else:
lastnode.dirt_road_links.append(node)
node.dirt_road_links.append(lastnode)
lastnode = node
# improved roads should be extended to edge of map
if not dirt:
node1.road_end = True
node2.road_end = True
# 80% chance of a vertical improved road running through area
if libtcod.random_get_int(rng, 1, 10) <= 8:
# select start and end nodes
node1 = None
node2 = None
for node in random.sample(campaign.day_map.nodes, len(campaign.day_map.nodes)):
if node.bottom_edge and node.node_type != 'D' and node not in campaign.day_map.blocked_nodes:
node1 = node
elif node.top_edge and node.node_type != 'D' and node not in campaign.day_map.blocked_nodes:
node2 = node
if node1 is not None and node2 is not None:
break
# attempt to build road
if node1 is not None and node2 is not None:
GenerateRoad(node1, node2)
# 20% chance of a crossroad
if libtcod.random_get_int(rng, 1, 10) <= 2:
# select start and end nodes
node1 = None
node2 = None
for node in random.sample(campaign.day_map.nodes, len(campaign.day_map.nodes)):
if node.top_edge or node.bottom_edge: continue
if node.left_edge and node.node_type != 'D':
node1 = node
elif node.right_edge and node.node_type != 'D':
node2 = node
if node1 is not None and node2 is not None:
break
# attempt to build road
if node1 is not None and node2 is not None:
GenerateRoad(node1, node2)
##### Build Dirt Roads #####
# go through villages nodes, if they are not already connected to an improved road,
# try to link it via a dirt road to the nearest node that is connected
for node1 in campaign.day_map.nodes:
if node1.node_type == 'C':
if len(node1.stone_road_links) > 0: continue
if len(node1.dirt_road_links) > 0: continue
closest = None
for node2 in campaign.day_map.nodes:
if node1 == node2: continue
if len(node2.stone_road_links) > 0 or len(node2.dirt_road_links) > 0:
if closest is None:
closest = node2
continue
dist = GetDistance(node1.x, node1.y, node2.x, node2.y)
if dist < GetDistance(node1.x, node1.y, closest.x, closest.y):
closest = node2
continue
if closest is not None:
GenerateRoad(node1, closest, dirt=True)
continue
# no improved roads on the map, link to closest village or dirt road
closest = None
for node2 in campaign.day_map.nodes:
if node1 == node2: continue
if node2.node_type == 'C' or len(node2.dirt_road_links) > 0:
if closest is None:
closest = node2
continue
dist = GetDistance(node1.x, node1.y, node2.x, node2.y)
if dist < GetDistance(node1.x, node1.y, closest.x, closest.y):
closest = node2
continue
if closest is not None:
GenerateRoad(node1, closest, dirt=True)
continue
##### Paint Roads #####
# draw a road onto map
def DrawRoad(line, dirt=False):
# for each char location along this line, re-paint it
for (x,y) in line:
c_mod = libtcod.random_get_int(rng, -5, 10)
if not dirt:
col = libtcod.Color(60+c_mod, 60+c_mod, 60+c_mod)
else:
col = libtcod.Color(80+c_mod, 50+c_mod, 30+c_mod)
libtcod.console_put_char_ex(c_map_con, x, y, 219, col, col)
# dirt road links
skip_nodes = []
for node1 in campaign.day_map.nodes:
if len(node1.dirt_road_links) == 0: continue
skip_nodes.append(node1)
for node2 in node1.dirt_road_links:
if node2 in skip_nodes: continue
line = GetLine(node1.x, node1.y, node2.x, node2.y)
DrawRoad(line, dirt=True)
# stone road links
skip_nodes = []
for node1 in campaign.day_map.nodes:
if len(node1.stone_road_links) == 0: continue
skip_nodes.append(node1)
for node2 in node1.stone_road_links:
if node2 in skip_nodes: continue
line = GetLine(node1.x, node1.y, node2.x, node2.y)
DrawRoad(line)
# extend stone road ends to edge of map
# if adjacent to another road_end, only pick one to extend
for node in campaign.day_map.nodes:
if node.road_end:
# check that it's not adjacent to one that has already been extended
# produces a "fork" effect at edge of map
fork = False
for node2 in node.links:
if node2.extended:
fork = True
break
if fork: continue
if node.top_edge:
line = GetLine(node.x, node.y, node.x, 0)
elif node.bottom_edge:
line = GetLine(node.x, node.y, node.x, C_MAP_CON_HEIGHT-1)
elif node.left_edge:
line = GetLine(node.x, node.y, 0, node.y)
elif node.right_edge:
line = GetLine(node.x, node.y, C_MAP_CON_WIDTH-1, node.y)
else:
continue
DrawRoad(line)
node.extended = True
# bocage painting method
for node in campaign.day_map.nodes:
if node.node_type == 'F':
def DrawBocage(x,y):
c_mod = libtcod.random_get_int(rng, -5, 10)
col = libtcod.Color(20+c_mod, 60+c_mod, 20+c_mod)
libtcod.console_put_char_ex(c_map_con, x, y, 219, col, col)
# create list of node locations
locations = []
for y in range(0, C_MAP_CON_HEIGHT):
for x in range (0, C_MAP_CON_WIDTH):
if campaign.day_map.char_locations[(x,y)] == node:
locations.append((x,y))
# draw outline
for (x,y) in node.edges:
DrawBocage(x,y)
# fill in squares
for i in range(4):
n = libtcod.random_get_int(rng, 0, len(locations)-1)
(x,y) = locations[n]
w = libtcod.random_get_int(rng, 3, 9)
h = libtcod.random_get_int(rng, 3, 9)
for x1 in range(x-w, x+w+1):
if (x1,y-h) in locations and libtcod.console_get_char(c_map_con, x1, y-h) == 0:
DrawBocage(x1,y-h)
if (x1,y+h) in locations and libtcod.console_get_char(c_map_con, x1, y+h) == 0:
DrawBocage(x1,y+h)
for y1 in range(y-h+1, y+h):
if (x-w,y1) in locations and libtcod.console_get_char(c_map_con, x-w, y1) == 0:
DrawBocage(x-w,y1)
if (x+w,y1) in locations and libtcod.console_get_char(c_map_con, x+w, y1) == 0:
DrawBocage(x+w,y1)
# draw and update the campaign map overlay
# used to show things that change on the campaign map: area control, player location, etc.
def UpdateCOverlay(highlight_node=None, anim_x=-1, anim_y=-1):
# clear to key colour
libtcod.console_set_default_background(c_overlay_con, KEY_COLOR)
libtcod.console_clear(c_overlay_con)
libtcod.console_set_default_background(c_overlay_con, libtcod.black)
# highlight frontline between friendly and hostile map areas
libtcod.console_set_default_foreground(c_overlay_con, FRONTLINE_COLOR)
for node in campaign.day_map.nodes:
if not node.friendly_control: continue
# skip impassible nodes too
if node in campaign.day_map.blocked_nodes: continue
for (x,y) in node.edges:
# check adjacent map character locations
for (x2,y2) in [(x,y-1), (x-1,y), (x+1,y), (x,y+1)]:
# adjacent character location is outside of map
if (x2,y2) not in campaign.day_map.char_locations: continue
node2 = campaign.day_map.char_locations[(x2,y2)]
if node2 != node and not node2.friendly_control:
# draw the character
libtcod.console_put_char(c_overlay_con, x2, y2, 178, libtcod.BKGND_SET)
# set foreground colour based on campaign map colours
if campaign.color_scheme == 'WINTER_OR_GROUND_SNOW':
libtcod.console_set_default_foreground(c_overlay_con, libtcod.blue)
else:
libtcod.console_set_default_foreground(c_overlay_con, libtcod.white)
# draw a line to new location if doing campaign action
# will appear beneath other information drawn below
if campaign.input_mode != 'None' and campaign.selected_node is not None:
if campaign.input_mode in ['Move Into Adjacent Area', 'Call in Strike']:
line = GetLine(campaign.day_map.player_node.x, campaign.day_map.player_node.y,
campaign.selected_node.x, campaign.selected_node.y)
for (x, y) in line:
libtcod.console_put_char(c_overlay_con, x, y, 250, libtcod.BKGND_SET)
# display start / exit nodes, node center point
for node in campaign.day_map.nodes:
if node.start:
libtcod.console_print_ex(c_overlay_con, node.x, node.y-1,
libtcod.BKGND_SET, libtcod.CENTER, 'Start')
elif node.exit:
libtcod.console_print_ex(c_overlay_con, node.x, node.y-1,
libtcod.BKGND_SET, libtcod.CENTER, 'Exit')
# highlight player node or draw node center
# don't draw player indicator if we are animating it
if campaign.day_map.player_node == node and anim_x == -1:
libtcod.console_put_char(c_overlay_con, node.x, node.y, '@', libtcod.BKGND_SET)
for (x,y) in campaign.day_map.player_node.edges:
libtcod.console_put_char(c_overlay_con, x, y, libtcod.CHAR_BULLET, libtcod.BKGND_SET)
else:
libtcod.console_put_char(c_overlay_con, node.x, node.y, libtcod.CHAR_BULLET,
libtcod.BKGND_SET)
if node.friendly_control:
libtcod.console_print_ex(c_overlay_con, node.x, node.y+1,
libtcod.BKGND_SET, libtcod.CENTER, campaign.player_nation)
elif node.res_known and node.resistance is not None:
libtcod.console_print_ex(c_overlay_con, node.x, node.y+1,
libtcod.BKGND_SET, libtcod.CENTER, node.resistance)
# highlighting node (overwrites player node highlight)
if highlight_node == node:
col = libtcod.console_get_default_foreground(c_overlay_con)
libtcod.console_set_default_foreground(c_overlay_con, SELECTED_COLOR)
for (x,y) in node.edges:
libtcod.console_put_char(c_overlay_con, x, y, libtcod.CHAR_BULLET, libtcod.BKGND_SET)
libtcod.console_set_default_foreground(c_overlay_con, col)
if not node.friendly_control:
if node.arty_strike or node.air_strike:
libtcod.console_print_ex(c_overlay_con, node.x, node.y+2,
libtcod.BKGND_SET, libtcod.CENTER, 'Area hit by')
if node.arty_strike:
text = 'Artillery'
else:
text = 'Air Strike'
libtcod.console_print_ex(c_overlay_con, node.x, node.y+3,
libtcod.BKGND_SET, libtcod.CENTER, text)
# active quest node
if node.quest_type is not None:
libtcod.console_print_ex(c_overlay_con, node.x, node.y-2,
libtcod.BKGND_SET, libtcod.CENTER, node.quest_type)
# highlight selected area if any
if campaign.input_mode != 'None' and campaign.selected_node is not None:
libtcod.console_set_default_foreground(c_overlay_con, SELECTED_COLOR)
for (x,y) in campaign.selected_node.edges:
libtcod.console_put_char(c_overlay_con, x, y, 219, libtcod.BKGND_SET)
# draw animated player indicator if any
if anim_x > -1 and anim_y > -1:
libtcod.console_set_default_foreground(c_overlay_con, libtcod.white)
libtcod.console_put_char(c_overlay_con, anim_x, anim_y, '@', libtcod.BKGND_SET)
# draw the campaign action console
def UpdateCActionCon():
libtcod.console_clear(c_action_con)
# if we're doing an action
if campaign.input_mode != 'None':
# we're checking an adjacent area
if campaign.input_mode == 'Check Adjacent Area':
lines = CHECK_AREA
# we're moving into an area
elif campaign.input_mode == 'Move Into Adjacent Area':
lines = MOVE_AREA
elif campaign.input_mode == 'Call in Strike':
lines = ['Call in Strike on Adjacent Area', '']
lines.append('[%cTab%c] Cycle through adjacent areas (Shift to reverse)'%HIGHLIGHT)
lines.append('Call in [%cA%c]rtillery Strike'%HIGHLIGHT)
if campaign.weather.clouds != 'Overcast' and not campaign.weather.fog and campaign.weather.precip != 'Snow':
lines.append('Call in Ai[%cr%c] Strike'%HIGHLIGHT)
lines.append('[%cBackspace%c] Cancel action'%HIGHLIGHT)
dy = 1
for line in lines:
libtcod.console_print(c_action_con, 0, dy, line)
dy += 1
else:
libtcod.console_print(c_action_con, 0, 1, 'Action')
libtcod.console_print(c_action_con, 0, 2, '------')
libtcod.console_print(c_action_con, 41, 1, 'Mins. Required')
libtcod.console_print(c_action_con, 41, 2, '--------------')
dy = 4
for (text, time) in campaign.action_list:
libtcod.console_print(c_action_con, 0, dy, text)
if time is not None:
libtcod.console_print(c_action_con, 47, dy, str(time))
else:
libtcod.console_print(c_action_con, 47, dy, 'N/A')
libtcod.console_set_default_foreground(c_action_con, libtcod.dark_grey)
libtcod.console_hline(c_action_con, 0, dy+1, 50, flag=libtcod.BKGND_DEFAULT)
libtcod.console_set_default_foreground(c_action_con, libtcod.white)
dy += 2
libtcod.console_print(c_action_con, 1, C_ACTION_CON_H-4, '[%cW/S/up/down%c]: Scroll Map'%HIGHLIGHT)
# display time remaining until sunset if hasn't happened yet
(sunset_h, sunset_m) = campaign.GetSunset()
(h, m) = GetTimeUntil(campaign.hour, campaign.minute, sunset_h, sunset_m)
if h < 0: return
text = 'Time until sunset: ' + str(h) + ':' + str(m).zfill(2)
libtcod.console_print(c_action_con, 1, C_ACTION_CON_H-2, text)
# draw the campaign area info console with info based on mouse position
def UpdateCInfoCon(mx, my):
libtcod.console_clear(c_info_con)
# display weather conditions
DisplayWeather(c_info_con, C_INFO_CON_X-5, 0)
libtcod.console_set_default_background(c_info_con, libtcod.black)
libtcod.console_set_default_foreground(c_info_con, libtcod.white)
# display artillery chance
libtcod.console_print(c_info_con, 0, 0, 'Artillery Chance')
libtcod.console_print(c_info_con, 7, 2, '<=' + str(campaign.arty_chance))
libtcod.console_print(c_info_con, 38, 0, 'Air Strike Chance')
if campaign.weather.clouds == 'Overcast' or campaign.weather.fog or campaign.weather.precip == 'Snow':
text = 'N/A'
else:
text = '<=' + str(campaign.air_chance)
libtcod.console_print(c_info_con, 46, 2, text)
# make sure mouse cursor is over map window
if mx < C_MAP_CON_X or mx >= C_MAP_CON_X + C_MAP_CON_WIDTH or my < 4:
libtcod.console_print_ex(c_info_con, C_INFO_CON_X, 5,
libtcod.BKGND_NONE, libtcod.CENTER, 'Mouseover an area for info')
return
# adjust for offset
mx -= C_MAP_CON_X
my = my - 4 + campaign.c_map_y
# check in case of error
if (mx,my) not in campaign.day_map.char_locations:
print ('ERROR: Could not find character location under mouse cursor')
return
node = campaign.day_map.char_locations[(mx,my)]
if node.node_type == 'A':
text = 'Farm Buildings and Fields'
elif node.node_type == 'B':
text = 'Fields'
elif node.node_type == 'C':
text = 'Village'
elif node.node_type == 'D':
text = 'Woods'
elif node.node_type == 'E':
text = 'Marshland'
elif node.node_type == 'F':
text = 'Bocage'
else:
text = ''
libtcod.console_print_ex(c_info_con, C_INFO_CON_X, 5, libtcod.BKGND_NONE,
libtcod.CENTER, text)
if node.friendly_control:
text = 'Friendly Control'
elif node.res_known:
text = node.resistance + ' Enemy Resistance Expected'
else:
text = 'Unknown Enemy Resistance Level'
libtcod.console_print_ex(c_info_con, C_INFO_CON_X, 7, libtcod.BKGND_NONE,
libtcod.CENTER, text)
if campaign.quest_active:
if node.quest_type is not None:
text = 'Active Quest: ' + node.quest_type
libtcod.console_print_ex(c_info_con, C_INFO_CON_X, 9,
libtcod.BKGND_NONE, libtcod.CENTER, text)
text = 'VP Bonus: ' + str(node.quest_vp_bonus)
libtcod.console_print_ex(c_info_con, C_INFO_CON_X, 10,
libtcod.BKGND_NONE, libtcod.CENTER, text)
if node.quest_time_limit is not None:
(h, m) = node.quest_time_limit
text = 'Expires: ' + str(h) + ':' + str(m).zfill(2)
libtcod.console_print_ex(c_info_con, C_INFO_CON_X, 11,
libtcod.BKGND_NONE, libtcod.CENTER, text)
# set up "check adjacent area" action
def SetupCheckArea():
# if no node selected, select first linked node in list
if campaign.selected_node is None:
campaign.selected_node = campaign.day_map.player_node.links[0]
campaign.input_mode = 'Check Adjacent Area'
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
# check the selected area for enemy resistance
def CheckArea():
# if friendly control, don't do anything
if campaign.selected_node.friendly_control or campaign.selected_node.resistance is None:
return
# if known resistance level, don't do anything
if campaign.selected_node.res_known:
return
# set flag to known resistance level
campaign.selected_node.res_known = True
# display results message
text = campaign.selected_node.resistance + ' enemy resistance reported in this area.'
ShowLabel(campaign.selected_node.x+C_MAP_CON_X, campaign.selected_node.y+4-campaign.c_map_y, text)
# spend time if not free action
if not campaign.free_check:
campaign.SpendTime(0, 15)
else:
# clear the flag
campaign.free_check = False
# reset input mode
campaign.input_mode = 'None'
# chance of crew reaction
if campaign.selected_node.resistance == 'Heavy':
if Roll1D10() <= 2:
CrewTalk(random.choice(CREW_TALK_HEAVY_RES))
# might have completed a quest
if campaign.selected_node.quest_type is not None:
if campaign.selected_node.quest_type == 'RECON':
campaign.AddStat('Quests Completed', 1)
text = ('Congratulations, commander. You have reported your ' +
'reconnaissance of the requested map area and have earned ' +
str(campaign.selected_node.quest_vp_bonus) + ' bonus VP.')
PopUp(text)
WriteJournal('RECON quest completed')
# award VP
campaign.day_vp += campaign.selected_node.quest_vp_bonus
# reset node and campaign flag
campaign.selected_node.quest_type = None
campaign.selected_node.quest_vp_bonus = None
campaign.quest_active = False
SaveGame()
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
# select the next adjacent area
def SelectNextArea():
# build list of possible areas
nodes = []
if campaign.scen_type == 'Counterattack':
for node in campaign.day_map.player_node.links:
if not node.friendly_control: continue
if node.top_edge:
nodes.append(node)
continue
for link_node in node.links:
if not link_node.friendly_control:
nodes.append(node)
continue
else:
for node in campaign.day_map.player_node.links:
nodes.append(node)
# no nodes could be found
if len(nodes) == 0:
campaign.selected_node = None
return
if campaign.selected_node is None:
campaign.selected_node = nodes[0]
return
# sort by degree heading to player node
def GetHeading(node):
rads = atan2(node.y-campaign.day_map.player_node.y, node.x-campaign.day_map.player_node.x)
rads %= 2*pi
degs = degrees(rads) + 90
if degs >= 360: degs -= 360
return int(degs)
node_list = []
for node in nodes:
node_list.append((node, GetHeading(node)))
node_list.sort(key=lambda node: node[1])
# find the current selected node
n = 0
for (node, heading) in node_list:
if node == campaign.selected_node: break
n += 1
# reverse if shift is down
if key.shift:
# at start of list
if n == 0:
new_node = node_list[-1]
else:
new_node = node_list[n-1]
else:
# not at end of list
if n < len(node_list) - 1:
new_node = node_list[n+1]
else:
new_node = node_list[0]
# set new selected node (heading is not used here)
(campaign.selected_node, heading) = new_node
UpdateCOverlay()
RenderCampaign()
# set up "move to adjacent area" action
def SetupMoveArea():
if campaign.selected_node is None:
SelectNextArea()
# if no areas could be selected, return
if campaign.selected_node is None:
return
campaign.input_mode = 'Move Into Adjacent Area'
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
# move into selected area
def MoveArea():
# determine how much time to spend
text = 'You move into a new area'
if campaign.selected_node in campaign.day_map.player_node.stone_road_links:
time_req = STONE_ROAD_MOVE_TIME
text += ' along an improved road'
elif campaign.selected_node in campaign.day_map.player_node.dirt_road_links:
time_req = DIRT_ROAD_MOVE_TIME
text += ' along a dirt road'
else:
time_req = NO_ROAD_MOVE_TIME
if campaign.weather.ground != 'Dry' or campaign.weather.precip != 'None' or campaign.weather.fog:
time_req += GROUND_MOVE_TIME_MODIFIER
text += ' which takes ' + str(time_req) + ' minutes.'
# if target area is enemy controlled, see if can use advancing fire
# must have at least 6 HE shells
# NEW: main gun must also be operational
gun_malfunction = 'Main Gun Malfunction' in tank.damage_list or 'Main Gun Broken' in tank.damage_list
total_he = tank.general_ammo['HE'] + tank.rr_ammo['HE']
# NEW: handle situations when there is no node selected
friendly_control = False
if campaign.selected_node is not None:
if campaign.selected_node.friendly_control:
friendly_control = True
if not gun_malfunction and not friendly_control and total_he >= 6:
text += ' Use advancing fire (requires 1-6 HE rounds; currently have '
text += str(total_he) + ' rounds)?'
if PopUp(text, confirm=True):
# determine number of rounds required and expend shells, pulling
# from general stores first, then ready rack
rounds_req = Roll1D6()
for r in range(rounds_req):
if tank.general_ammo['HE'] > 0:
tank.general_ammo['HE'] -= 1
elif tank.rr_ammo['HE'] > 0:
tank.rr_ammo['HE'] -= 1
# set flag
campaign.selected_node.advancing_fire = True
# show result
text = 'You expend ' + str(rounds_req) + ' HE round'
if rounds_req > 1: text += 's'
text += ' entering the area.'
PopUp(text)
else:
PopUp(text)
campaign.SpendTime(0, time_req)
# play sound effect
PlaySound('sherman_movement')
# movement animation
if campaign.animations:
line = GetLine(campaign.day_map.player_node.x, campaign.day_map.player_node.y,
campaign.selected_node.x, campaign.selected_node.y)
for (x,y) in line:
UpdateCOverlay(anim_x=x, anim_y=y)
RenderCampaign()
Wait(100)
# move player to target node
campaign.day_map.player_node = campaign.selected_node
# clean up and reset input mode
campaign.selected_node = None
campaign.input_mode = 'None'
UpdateCOverlay()
UpdateCActionCon()
RenderCampaign()
# if not under friendly control, possibly trigger a combat encounter
# if we moved during a counterattack, battle is automatic
if campaign.scen_type == 'Counterattack' or not campaign.day_map.player_node.friendly_control:
# battle roll
roll = Roll1D10()
if campaign.day_map.player_node.node_type == 'A':
roll += 1
elif campaign.day_map.player_node.node_type in ['C', 'F']:
roll += 2
if campaign.day_map.player_node.resistance == 'Light':
target_score = 8
elif campaign.day_map.player_node.resistance == 'Medium':
target_score = 6
else:
target_score = 4
# check for capture quest: automatic battle
if campaign.day_map.player_node.quest_type is not None:
if campaign.day_map.player_node.quest_type in ['CAPTURE', 'RESCUE']:
target_score = 1
# counterattack: automatic battle
elif campaign.scen_type == 'Counterattack':
target_score = 1
if roll < target_score:
PopUp('You meet no resistance in this area.')
no_combat = True
# award exp to crew
for crew in tank.crew:
crew.AwardExp(1)
# chance of crew reaction
if Roll1D10() == 1:
CrewTalk(random.choice(CREW_TALK_NO_RES))
else:
PopUp('A battle encounter is triggered!')
WriteJournal('')
text = 'Battle encounter triggered at ' + str(campaign.hour) + ':' + str(campaign.minute).zfill(2)
text += ', ' + campaign.GetTerrainDesc(campaign.day_map.player_node) + ' terrain'
WriteJournal(text)
campaign.SpendTime(0, 15)
# enter encounter
InitEncounter()
# if we're exiting, don't bother re-drawing the screen
if campaign.exiting:
return
no_combat = False
# do post-encounter stuff and return
PostEncounter(no_combat)
else:
# possible DEFEND mission
if campaign.day_map.player_node.quest_type is not None:
if campaign.day_map.player_node.quest_type == 'DEFEND':
m = random.choice([15, 30, 45])
PopUp('You arrive to defend the map area. ' + str(m) +
' minutes later, the expected attack occurs.')
campaign.SpendTime(0, m)
InitEncounter(counterattack=True)
if campaign.exiting:
return
PostEncounter(False)
return
# check for sunset
campaign.CheckSunset()
SaveGame()
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
# Await for an enemy attack in the counterattack mission
# if no_time, then we are triggering an attack right away
def AwaitEnemy(no_time=False):
if not no_time:
PopUp('You await an enemy counterattack.')
# roll for how long it takes until the next enemy attack
d1, d2, roll = Roll2D6()
# apply modifier based on expected resistence for the day
if campaign.scen_res == 'Medium':
roll += 2
elif campaign.scen_res == 'Heavy':
roll += 4
h = 1
if roll >= 11:
m = 15
elif roll >= 8:
m = 30
elif roll >= 6:
m = 45
elif roll >= 4:
h = 2
m = 0
else:
h = 2
m = 30
# let time pass
campaign.SpendTime(h, m)
# if sunset has hit, don't do an attack
campaign.CheckSunset()
if campaign.sunset:
return
# show message and start attack
PopUp('A battle encounter is triggered!')
WriteJournal('')
text = 'Battle encounter triggered at ' + str(campaign.hour) + ':' + str(campaign.minute).zfill(2)
text += ', ' + campaign.GetTerrainDesc(campaign.day_map.player_node) + ' terrain'
WriteJournal(text)
campaign.SpendTime(0, 15)
# determine encounter resistance level: default is day resistance level
res_level = campaign.scen_res
nodes = []
for node in campaign.day_map.player_node.links:
if not node.friendly_control:
nodes.append(node)
if len(nodes) > 0:
node = random.choice(nodes)
res_level = node.resistance
# enter encounter
InitEncounter(res_level=res_level)
# if we're exiting, don't bother re-drawing the screen
if campaign.exiting:
return
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
# do post-encounter stuff
PostEncounter()
campaign.CheckSunset()
SaveGame()
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
# set up call in artillery strike action
def SetupCallStrike():
if campaign.selected_node is None:
SelectNextArea()
campaign.input_mode = 'Call in Strike'
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
# attempt to call in an artillery or air strike
def CallStrike(key_char):
# if target area is friendly, don't allow!
if campaign.selected_node.friendly_control:
return
# if air strike called and not allowed, return
if key_char in ['r', 'R'] and (campaign.weather.clouds == 'Overcast' or campaign.weather.fog or campaign.weather.precip == 'Snow'):
return
# if target area already has been hit by either, return
if campaign.selected_node.air_strike or campaign.selected_node.arty_strike:
return
campaign.input_mode = 'None'
# calculate time required and odds of success, spend the time and try to call in strike
d1, d2, roll = Roll2D6()
success = False
if key_char in ['a', 'A']:
campaign.SpendTime(0, 15)
if roll <= campaign.arty_chance:
success = True
text = 'Success: Friendly artillery strikes target area'
# set flag in area
campaign.selected_node.arty_strike = True
if campaign.arty_chance > 2:
campaign.arty_chance -= 1
else:
text = 'Friendly artillery is unable to strike target area'
else:
campaign.SpendTime(0, 30)
if roll <= campaign.air_chance:
success = True
text = 'Success: Friendly air forces strike target area'
# set flag in area
campaign.selected_node.air_strike = True
if campaign.air_chance > 2:
campaign.air_chance -= 1
else:
text = 'Friendly air forces are unable to strike target area'
x,y = campaign.selected_node.x+C_MAP_CON_X, campaign.selected_node.y+4-campaign.c_map_y
ShowLabel(x, y, text)
# play sound and show animation if enabled
if success:
# clear label
Wait(400)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
PlaySound('arty_firing')
ArtyStrikeAnimation(x, y)
# chance of crew reaction
if key_char in ['a', 'A']:
if Roll1D10() <= 3:
CrewTalk(random.choice(CREW_TALK_ARTY_STRIKE))
else:
if key_char in ['a', 'A']:
# chance of crew reaction
if Roll1D10() <= 3:
CrewTalk(random.choice(CREW_TALK_NO_ARTY_STRIKE))
UpdateCActionCon()
UpdateCInfoCon(mouse.cx, mouse.cy)
UpdateCOverlay()
RenderCampaign()
# do post-encounter, or post-area capture, maintenance stuff
# called by MoveArea as well as DoCampaignDay() when loading right into an
# encounter in progress
def PostEncounter(no_combat=False):
# check to see if we're ending the campaign because commander is dead or sent home
if CheckCommander():
campaign.exiting = True
# check for repair attempts if we're not exiting out of the campaign and we just
# finished a battle encounter
if not campaign.exiting and not no_combat:
AttemptRepairs(post_encounter=True)
# check for exiting campaign or tank destroyed or damaged
if campaign.exiting or not tank.alive or tank.swiss_cheese:
campaign.sunset = True
if not tank.alive:
WriteJournal('Player tank knocked out, action day is over')
elif tank.swiss_cheese:
WriteJournal('Player tank damaged beyond repair, action day is over')
else:
return
# award exp for the day to crew, check for level gain
campaign.EndOfDay()
return
# reset tank after encounter
tank.ResetAfterEncounter()
# if player node was not previously under player control, change it now and
# award VP for capturing area
if not campaign.day_map.player_node.friendly_control:
campaign.day_map.player_node.friendly_control = True
campaign.AwardCaptureVP(campaign.day_map.player_node)
# record captured area
campaign.AddStat('Map Areas Captured', 1)
# check quests
if campaign.day_map.player_node.quest_type is not None:
# award VP for capture or rescue quest
if campaign.day_map.player_node.quest_type in ['CAPTURE', 'RESCUE']:
campaign.AddStat('Quests Completed', 1)
text = ('Congratulations, commander. You have captured the ' +
'requested map area')
if campaign.day_map.player_node.quest_type == 'RESCUE':
text += ' and rescued your allied units'
text += ('. You have earned ' +
str(campaign.day_map.player_node.quest_vp_bonus) +
' bonus VP.')
PopUp(text)
WriteJournal(campaign.day_map.player_node.quest_type +
' quest completed.')
# award VP
campaign.day_vp += campaign.day_map.player_node.quest_vp_bonus
# reset node and campaign flag
# this will cancel any RECON mission in the area as well
campaign.day_map.player_node.quest_type = None
campaign.day_map.player_node.quest_vp_bonus = None
campaign.day_map.player_node.quest_time_limit = None
campaign.quest_active = False
else:
# possible completion of DEFEND mission
if campaign.day_map.player_node.quest_type is not None:
if campaign.day_map.player_node.quest_type == 'DEFEND':
campaign.AddStat('Quests Completed', 1)
text = ('Congratulations, commander. You have defended the ' +
'requested map area')
PopUp(text)
WriteJournal('DEFEND quest completed.')
campaign.day_vp += campaign.day_map.player_node.quest_vp_bonus
campaign.day_map.player_node.quest_type = None
campaign.day_map.player_node.quest_vp_bonus = None
campaign.quest_active = False
# possible defense in counterattack mission
elif not no_combat and campaign.scen_type == 'Counterattack':
campaign.AwardCaptureVP(campaign.day_map.player_node, counterattack=True)
# award exp to crew for capturing area
for crew in tank.crew:
crew.AwardExp(1)
# crew recovers, check for crew replacements
ReplaceCrew()
# if tank is immobilized, day of combat also ends
if tank.immobilized:
WriteJournal('Player tank immobilized, action day is over')
campaign.sunset = True
# award exp for the day to crew
for crew in tank.crew:
d1, d2, roll = Roll2D6()
crew.AwardExp(roll)
CampaignMenu()
return
campaign.CheckSunset()
# trigger view tank to allow player to change hatches, gun load, etc.
if not campaign.sunset and not no_combat:
WriteJournal('Battle encounter ended at ' + str(campaign.hour) + ':' + str(campaign.minute).zfill(2))
PopUp('Set up your tank for the next battle.')
CampaignViewTank()
RenderCampaign()
# we're continuing the day, check if we need to add the "head home" campaign action
campaign.BuildActionList()
UpdateCActionCon()
# if sunset hasn't hit, check for some game events
if not campaign.sunset:
# if player just captured the exit area,
# reset nodes, generate new start and exit areas, move player to new start area
if campaign.scen_type != 'Counterattack' and campaign.day_map.player_node.exit:
PopUp('You captured the exit area! Press Enter to move to new map.')
WriteJournal('Captured exit area and moved to new map')
libtcod.console_clear(con)
libtcod.console_print_ex(con, SCREEN_XM, int(SCREEN_HEIGHT/2),
libtcod.BKGND_NONE, libtcod.CENTER, 'Generating Campaign Map...')
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
# generate a new map
good_map = False
while not good_map:
campaign.nodes = []
good_map = GenerateCampaignMap()
PaintCampaignMap()
# move player view to starting node of new map
campaign.MoveViewTo(campaign.day_map.player_node)
# set initial input mode to check adjacent area, no time cost
SetupCheckArea()
campaign.free_check = True
RenderCampaign()
PopUp('Select an adjacent area to check for enemy resistance.')
# otherwise, check for a counterattack advance or campaign event trigger
else:
if campaign.scen_type == 'Counterattack':
campaign.DoEnemyAdvance()
else:
campaign.RandomCampaignEvent()
SaveGame()
UpdateDateCon()
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
# return the highest crew level to use for replacements
def GetHighestCrewLevel():
crew_levels = []
for crewman in tank.crew:
if crewman.alive and not crewman.v_serious_wound:
crew_levels.append(crewman.level)
# no crew left alive!
if len(crew_levels) == 0:
return 1
else:
return max(crew_levels)
# set the next and previous pointers for all crewmembers
def SetCrewPointers():
n = 0
for crewman in tank.crew:
if n == 0:
crewman.prev = tank.crew[-1]
else:
crewman.prev = tank.crew[n-1]
if n == len(tank.crew)-1:
crewman.next = tank.crew[0]
else:
crewman.next = tank.crew[n+1]
n += 1
# replace any dead or very seriously injured crew in the player tank
# replacing 1+ crew takes 30 mins., unless counterattack
def ReplaceCrew():
replaced = False
# calculate upper limit for level of replacement crew
highest_level = GetHighestCrewLevel()
# check for replacements
for crewman in tank.crew:
# generate a replacement crew member for the dead and very seriously wounded
if not crewman.alive or crewman.v_serious_wound:
# display a notification window
text = crewman.name + ' has been '
if not crewman.alive:
text += 'killed in action.'
else:
text += 'severely wounded and has been sent home.'
text += ' A final report on their service will be added to the campaign journal.'
PopUp(text)
# record final report to campaign journal
text = crewman.GenerateReport()
for line in text:
WriteJournal(line)
replaced = True
new_crew = SpawnCrewMember(None, crewman.position, crewman.rank_level, replacement=True, old_member=crewman)
# determine level of replacement crewman
new_level = libtcod.random_get_int(0, 1, highest_level)
new_crew.SetLevel(new_level)
text = crewman.name + ' is replaced by ' + new_crew.name
text += ' in the ' + new_crew.position + "'s position"
PopUp(text)
WriteJournal(text)
ShowSkills(new_crew)
else:
# reset status flags
crewman.stunned = False
crewman.unconscious = False
# remove the dead and very seriously wounded; record in campaign stats
for crewman in reversed(tank.crew):
if not crewman.alive or crewman.v_serious_wound:
if not crewman.alive:
campaign.AddStat('Crewmen KIA', 1)
else:
campaign.AddStat('Crewmen Sent Home', 1)
tank.crew.remove(crewman)
# re-order tank crew list
CREW_ORDER = ['Commander', 'Gunner', 'Loader', 'Driver', 'Asst. Driver']
def GetCrewOrder(crew):
return CREW_ORDER.index(crew.position)
tank.crew.sort(key = GetCrewOrder)
# reset next and previous pointers for all crew
SetCrewPointers()
# if we replaced 1+ crew and we're in a campaign day, takes time
# unless in counterattack
if replaced and campaign.day_in_progress and campaign.scen_type != 'Counterattack':
PopUp('Crew replacement took 30 mins')
campaign.SpendTime(0, 30)
# setup resupply attempt
def SetupResupply():
# do roll
roll = Roll1D10()
# spend time required
if campaign.scen_type == 'Counterattack':
time_used = 15
roll = 1
else:
time_used = 60
campaign.SpendTime(0, time_used)
if roll <= 7:
PopUp('Supply trucks arrive; you may replenish your ammo stores.')
# replenish smoke grenade and bomb stores
tank.smoke_grenades = 6
tank.smoke_bombs = 15
campaign.resupply = True
MainGunAmmoMenu()
campaign.resupply = False
else:
PopUp('You wait for resupply but the supply trucks are delayed and never arrive.')
# update day clock
UpdateCActionCon()
RenderCampaign()
# check for sunset
campaign.CheckSunset()
# show the tank stats and allow the player to change ammo load, etc.
def CampaignViewTank(load_ammo_menu=False):
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
CON_X = MENU_CON_XM - int(TANK_CON_WIDTH/2)
CON_Y = int(MENU_CON_HEIGHT/2) - int(TANK_CON_HEIGHT/2)
# automatically refill smoke grenades and bombs if resupplying
if campaign.resupply:
tank.smoke_grenades = 6
tank.smoke_bombs = 15
# select first crew member if none selected yet
if campaign.selected_crew == None:
campaign.selected_crew = tank.crew[0]
exit_view = False
while not exit_view:
# generate and display menu
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 1,
libtcod.BKGND_NONE, libtcod.CENTER, 'Player Tank View')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# refresh the tank display
UpdateTankCon()
libtcod.console_blit(tank_con, 0, 0, TANK_CON_WIDTH, TANK_CON_HEIGHT, menu_con, CON_X, CON_Y)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh_display = False
while not refresh_display:
libtcod.console_flush()
# exit right away
if libtcod.console_is_window_closed(): sys.exit()
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit view
if key.vk == libtcod.KEY_ENTER:
# if we're resupplying and haven't loaded any ammo,
# confirm that player wants to continue
if campaign.resupply and campaign.day_in_progress:
total = 0
for ammo_type in AMMO_TYPES:
if ammo_type in tank.general_ammo:
total += tank.general_ammo[ammo_type]
if total == 0:
text = 'You have not loaded any ammo, are you sure you want to continue?'
if not PopUp(text, confirm=True, skip_update=True):
refresh_display = True
continue
exit_view = True
break
# get pressed key
key_char = chr(key.c)
# select next crew member
if key_char in ['s', 'S'] or key.vk == libtcod.KEY_DOWN:
campaign.selected_crew = campaign.selected_crew.next
refresh_display = True
# select previous crew member
elif key_char in ['w', 'W'] or key.vk == libtcod.KEY_UP:
campaign.selected_crew = campaign.selected_crew.prev
refresh_display = True
# toggle hatch status for selected crew member
elif key_char in ['h', 'H']:
tank.ToggleHatch(campaign.selected_crew)
refresh_display = True
# cycle through ammo reload selections
elif key_char in ['r', 'R']:
tank.CycleReload()
refresh_display = True
# toggle use of ready rack
elif key_char in ['t', 'T']:
tank.use_rr = not tank.use_rr
refresh_display = True
# change gun load
elif key_char in ['g', 'G']:
tank.ChangeGunLoadMenu()
refresh_display = True
# open main gun ammo menu, can be triggered by function var
elif load_ammo_menu or key_char in ['m', 'M']:
load_ammo_menu = False
MainGunAmmoMenu(no_dark=True)
refresh_display = True
# display the main gun ammunition menu
# allow player to move shells around, load new ammo if possible
def MainGunAmmoMenu(no_dark=False):
# direction flag; if true we are moving shells into tank / ready rack
inward = True
# keys to use for moving shells
KEY_CODES = [('U', 'J'), ('I', 'K'), ('O', 'L'), ('P', ';')]
# darken screen
if not no_dark:
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
exit_menu = False
while not exit_menu:
# refresh menu console
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print(menu_con, MENU_CON_XM, 1, 'Main Gun Ammunition Menu')
x = MENU_CON_XM-28
# display row titles
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_set_default_background(menu_con, libtcod.darkest_grey)
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_print(menu_con, x+2, 5, 'Ammo')
if campaign.resupply:
libtcod.console_print(menu_con, x+2, 7, 'Supply')
libtcod.console_rect(menu_con, x+2, 7, 48, 2, False, flag=libtcod.BKGND_SET)
libtcod.console_print(menu_con, x+2, 12, 'General')
libtcod.console_print(menu_con, x+3, 13, 'Stores')
libtcod.console_rect(menu_con, x+2, 12, 48, 2, False, flag=libtcod.BKGND_SET)
libtcod.console_print(menu_con, x+2, 17, 'Ready')
libtcod.console_print(menu_con, x+3, 18, 'Rack')
libtcod.console_rect(menu_con, x+2, 17, 48, 2, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(menu_con, libtcod.black)
# display info for each ammo type and totals
libtcod.console_set_alignment(menu_con, libtcod.RIGHT)
x += 16
key_code = 0
total_g = 0
total_rr = 0
for ammo_type in AMMO_TYPES:
if ammo_type in tank.general_ammo:
# ammo type header
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print(menu_con, x, 5, ammo_type)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# display amount available
if campaign.resupply:
if ammo_type in ['HE', 'AP', 'WP']:
text = chr(236)
else:
text = '-'
if ammo_type == 'HCBI' and campaign.hcbi > 0:
text = str(campaign.hcbi)
elif ammo_type == 'HVAP' and campaign.hvap > 0:
text = str(campaign.hvap)
elif ammo_type == 'APDS' and campaign.apds > 0:
text = str(campaign.apds)
libtcod.console_print(menu_con, x, 7, text)
# display amount in general stores and ready rack
text = str(tank.general_ammo[ammo_type])
libtcod.console_print(menu_con, x, 12, text)
total_g += tank.general_ammo[ammo_type]
text = str(tank.rr_ammo[ammo_type])
libtcod.console_print(menu_con, x, 17, text)
total_rr += tank.rr_ammo[ammo_type]
# display key commands
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
(k1, k2) = KEY_CODES[key_code]
# only display supply commands if resupply is available
if campaign.resupply:
if not inward:
text = chr(24) + k1 + chr(24)
else:
text = chr(25) + k1 + chr(25)
libtcod.console_print(menu_con, x, 10, text)
libtcod.console_set_char_foreground(menu_con, x-1, 10, KEY_HIGHLIGHT_COLOR)
# directional arrows
if inward:
text = chr(25) + k2 + chr(25)
else:
text = chr(24) + k2 + chr(24)
libtcod.console_print(menu_con, x, 15, text)
libtcod.console_set_char_foreground(menu_con, x-1, 15, KEY_HIGHLIGHT_COLOR)
x += 8
key_code += 1
# display total and max for general stores and ready rack
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print(menu_con, 90, 5, 'Max')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
text = str(total_g) + '/' + str(tank.stats['main_gun_rounds'])
libtcod.console_print(menu_con, 90, 12, text)
text = str(total_rr) + '/' + str(tank.stats['rr_size'])
libtcod.console_print(menu_con, 90, 17, text)
# display frame
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print_frame(menu_con, MENU_CON_XM-28, 3, 52, 18,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
# display ammo type info
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_print(menu_con, MENU_CON_XM-46, 23, 'HE: High Explosive, ' +
'used against infantry targets and in advancing fire')
libtcod.console_print(menu_con, MENU_CON_XM-46, 24, 'AP: Armour-Piercing, ' +
'used against armoured targets')
libtcod.console_print(menu_con, MENU_CON_XM-46, 25, 'WP: White Phosporous, ' +
'generates smoke and can pin infantry')
libtcod.console_print(menu_con, MENU_CON_XM-46, 26, 'HCBI: Hexachlorothane-Base Initiating, ' +
'generates a great deal of smoke but no other effect')
libtcod.console_print(menu_con, MENU_CON_XM-46, 27, 'HVAP: High Velocity Armour-Piercing, ' +
'used against armoured targets, more effective than AP')
libtcod.console_print(menu_con, MENU_CON_XM-46, 28, 'APDS: Armour-Piercing Discarding Sabot, ' +
'used against armoured targets, more effective than AP')
'APDS'
# display instructions
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-6, 'Use listed keys to move ammunition')
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-5,
'[%cAlt%c] Switch Direction of Move'%HIGHLIGHT)
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-4,
'[%c+Shift%c] Move 10 shells'%HIGHLIGHT)
libtcod.console_print(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2,
'[%cEnter%c] Close Menu and Continue'%HIGHLIGHT)
# blit menu console to screen
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0,
MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh_menu = False
while not refresh_menu and not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# update screen
libtcod.console_flush()
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
if key.vk == libtcod.KEY_ENTER:
exit_menu = True
# refresh menu if alt status has changed
elif key.vk == libtcod.KEY_ALT:
inward = not inward
refresh_menu = True
continue
# get pressed key
key_char = chr(key.c)
# ready rack commands (ignores SHIFT)
if key_char in ['j', 'k', 'l', ';', 'J', 'K', 'L', ':']:
# get the index number of the ammo type to use
if key_char in ['j', 'J']:
n = 0
elif key_char in ['k', 'K']:
n = 1
elif key_char in ['l', 'L']:
n = 2
else:
n = 3
i = 0
new_type = None
for ammo_type in AMMO_TYPES:
if ammo_type in tank.rr_ammo:
if i == n:
new_type = ammo_type
i += 1
# only three ammo types, fourth one selected
if new_type is None:
continue
# move a shell from ready rack to to general stores if
# there is room for it
if not inward:
if tank.rr_ammo[new_type] > 0 and total_g < tank.stats['main_gun_rounds'] + EXTRA_AMMO:
tank.rr_ammo[new_type] -= 1
tank.general_ammo[new_type] += 1
PlaySound('shell_move')
# move a shell from general stores to rr
else:
if tank.general_ammo[new_type] > 0 and total_rr < tank.stats['rr_size']:
tank.general_ammo[new_type] -= 1
tank.rr_ammo[new_type] += 1
PlaySound('shell_move')
refresh_menu = True
# move shells between supply and general stores
elif campaign.resupply and key_char in ['u', 'i', 'o', 'p', 'U', 'I', 'O', 'P']:
if key_char in ['u', 'U']:
n = 0
elif key_char in ['i', 'I']:
n = 1
elif key_char in ['o', 'O']:
n = 2
else:
n = 3
if key.shift:
amount = 10
else:
amount = 1
# get the ammo type
i = 0
new_type = None
for ammo_type in AMMO_TYPES:
if ammo_type in tank.general_ammo:
if i == n:
new_type = ammo_type
i += 1
# only three ammo types, fourth one selected
if new_type is None:
continue
# remove a shell from general stores
# if limited amounts, replace into stores (but lost after this morning)
if not inward:
if tank.general_ammo[new_type] >= amount:
tank.general_ammo[new_type] -= amount
if new_type == 'HCBI':
campaign.hcbi += amount
elif new_type == 'HVAP':
campaign.hvap += amount
elif new_type == 'APDS':
campaign.apds += amount
PlaySound('shell_move')
# add to general stores if there's room
# if limited amounts, check that there are enough left
else:
if total_g + amount > tank.stats['main_gun_rounds'] + EXTRA_AMMO:
amount = tank.stats['main_gun_rounds'] + EXTRA_AMMO - total_g
if new_type == 'HCBI':
if campaign.hcbi < amount: continue
campaign.hcbi -= amount
elif new_type == 'HVAP':
if campaign.hvap < amount: continue
campaign.hvap -= amount
elif new_type == 'APDS':
if campaign.apds < amount: continue
campaign.apds -= amount
tank.general_ammo[new_type] += amount
PlaySound('shell_move')
refresh_menu = True
# update screen
libtcod.console_flush()
# display the campaign menu
# if tank is not alive or day has ended, show that
def CampaignMenu():
# darken screen
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0,
0.0, 0.7)
# generate and display menu
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_alignment(menu_con, libtcod.CENTER)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print(menu_con, MENU_CON_XM, 1, 'Campaign Menu')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
# day is over
if campaign.sunset:
libtcod.console_print(menu_con, MENU_CON_XM, 4, '[%cESC%c] Return to Campaign Calendar'%HIGHLIGHT)
else:
libtcod.console_print(menu_con, MENU_CON_XM, 4, '[%cESC%c] Return to Game'%HIGHLIGHT)
libtcod.console_print(menu_con, MENU_CON_XM, 5, '[%cQ%c] Save Game, Return to Main Menu'%HIGHLIGHT)
text = 'VP Today: ' + str(campaign.day_vp)
libtcod.console_print(menu_con, MENU_CON_XM, 8, text)
text = 'Campaign VP: ' + str(campaign.day_vp + campaign.vp)
libtcod.console_print(menu_con, MENU_CON_XM, 9, text)
if not tank.alive:
libtcod.console_print(menu_con, MENU_CON_XM, 10, 'Your tank has been destroyed')
elif tank.swiss_cheese:
libtcod.console_print(menu_con, MENU_CON_XM, 10, 'Your tank has been damaged beyond repair')
elif tank.immobilized:
libtcod.console_print(menu_con, MENU_CON_XM, 10, 'Your tank has been immobilized')
elif campaign.sunset:
libtcod.console_print(menu_con, MENU_CON_XM, 10, 'The combat day has ended')
libtcod.console_set_alignment(menu_con, libtcod.LEFT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
exit_menu = False
while not exit_menu:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
if key.vk == libtcod.KEY_ESCAPE:
exit_menu = True
# get pressed key
key_char = chr(key.c)
if not campaign.sunset and tank.alive:
if key_char in ['q', 'Q']: return True
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
libtcod.console_flush()
# re-draw screen if still playing
if not campaign.sunset and tank.alive:
RenderCampaign()
return False
# render campaign consoles to screen
def RenderCampaign(no_flush=False):
# blit consoles to display console
libtcod.console_clear(con)
# display menu bar
DisplayMenuBar()
libtcod.console_blit(date_con, 0, 0, DATE_CON_WIDTH, DATE_CON_HEIGHT, con, 1, 2)
libtcod.console_blit(c_map_con, 0, campaign.c_map_y, C_MAP_CON_WINDOW_W,
C_MAP_CON_WINDOW_H, con, C_MAP_CON_X, 4)
libtcod.console_blit(c_overlay_con, 0, campaign.c_map_y, C_MAP_CON_WINDOW_W,
C_MAP_CON_WINDOW_H, con, C_MAP_CON_X, 4, 1.0, 0.0)
libtcod.console_blit(c_action_con, 0, 0, C_ACTION_CON_W, C_ACTION_CON_H, con, 1, 4)
libtcod.console_blit(c_info_con, 0, 0, C_INFO_CON_W, C_INFO_CON_H, con, 1, C_ACTION_CON_H+5)
# lines between console displays
libtcod.console_hline(con, 1, 1, SCREEN_WIDTH-2, flag=libtcod.BKGND_DEFAULT)
libtcod.console_hline(con, 1, 3, SCREEN_WIDTH-2, flag=libtcod.BKGND_DEFAULT)
libtcod.console_hline(con, 1, C_ACTION_CON_H+4, C_ACTION_CON_W, flag=libtcod.BKGND_DEFAULT)
libtcod.console_vline(con, C_ACTION_CON_W+1, 4, SCREEN_HEIGHT-4, flag=libtcod.BKGND_DEFAULT)
# blit display console to screen and update screen
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
if not no_flush:
libtcod.console_flush()
##########################################################################################
# Campaign Day Map #
##########################################################################################
# GetPath - based on http://stackoverflow.com/questions/4159331/python-speed-up-an-a-star-pathfinding-algorithm
# and http://www.policyalmanac.org/games/aStarTutorial.htm
# returns a list of nodes to traverse shortest path from node1 to node2
# if enemy_blocks, enemy-held zones are treated as blocked
def GetPath(node1, node2, enemy_blocks=False):
# clear pathfinding info
for node in campaign.day_map.nodes:
node.ClearPathInfo()
open_list = set() # contains the nodes that may be traversed by the path
closed_list = set() # contains the nodes that will be traversed by the path
# calculate the direct distance between two locations
def GetH(x1, y1, x2, y2):
return GetDistance(x1, y1, x2, y2)
# retrace a set of nodes and return the best path
def RetracePath(end_node):
path = []
node = end_node
done = False
while not done:
path.append(node)
if node.parent is None: break # we've reached the end
node = node.parent
path.reverse()
path.pop(0) # remove the first node
# return the path
return path
start = node1
start.h = GetH(node1.x, node1.y, node2.x, node2.y)
start.f = start.g + start.h
end = node2
# add the start node to the open list
open_list.add(start)
last_good_node = None
while open_list: # while there are still tiles in the 'potentials' list
# grab the node with the best H value from the list of open tiles
current = sorted(open_list, key=lambda inst:inst.f)[0]
# we've reached our destination
if current == end:
return RetracePath(current)
# move this tile from the open to the closed list
open_list.remove(current)
closed_list.add(current)
# add the nodes connected to this one to the open list
for node in current.links:
# ignore nodes on closed list
if node in closed_list: continue
# ignore blocked nodes
if node in campaign.day_map.blocked_nodes: continue
# can ignore enemy-held areas
if enemy_blocks and not node.friendly_control: continue
# calculate g value for travel to linked node based on node
# type
if node.node_type == 'D':
cost = 100
else:
cost = 1
g = current.g + cost
# if not in open list, add it
if node not in open_list:
node.g = g
node.h = GetH(node.x, node.y, node2.x, node2.y)
node.f = node.g + node.h
node.parent = current
open_list.add(node)
# if already in open list, check to see if can make a better path
else:
if g < node.g:
node.parent = current
node.g = g
node.f = node.g + node.h
# no path possible
return []
##########################################################################################
# randomly generate a map for a day of the campaign
def GenerateCampaignMap():
MIN_DIST = 7 # minimum distance between node centres
NUM_NODES = 55 # number of nodes to try to create
# check to make sure that this position is not within the minimum distance to
# another already-existing map node
def TooClose(x, y):
for node in campaign.day_map.nodes:
if GetDistance(x, y, node.x, node.y) <= MIN_DIST:
return True
return False
# create a new instance of the day map class
campaign.day_map = CampaignDayMap()
# clear the currently selected node if any
campaign.selected_node = None
# generate the map nodes
for tries in range(0, 300):
# find a random location on the map board
x = libtcod.random_get_int(0, 3, C_MAP_CON_WIDTH-4)
y = libtcod.random_get_int(0, 3, C_MAP_CON_HEIGHT-4)
# check that it's not within the minimum distance away from another
# map node
if TooClose(x, y):
continue
# create the node
campaign.day_map.nodes.append(MapNode(x, y))
# break if we have enough nodes
if len(campaign.day_map.nodes) >= NUM_NODES:
break
# create list of character locations and set their node membership
for y in range(0, C_MAP_CON_HEIGHT):
for x in range (0, C_MAP_CON_WIDTH):
# find nearest node to this coordinate
nearest = None
nearest_dist = None
for node in campaign.day_map.nodes:
dist = GetDistance(x, y, node.x, node.y)
if nearest is None:
nearest = node
nearest_dist = dist
continue
if dist < nearest_dist:
nearest = node
nearest_dist = dist
# create the new character location
campaign.day_map.char_locations[(x,y)] = nearest
# check an adjacent coordinate to see if it belongs to a different map node
def CheckCoord(x, xmod, y, ymod, node):
node2 = campaign.day_map.char_locations[(x+xmod,y+ymod)]
if node2 != node:
# add to set of edge coordinates
node.edges.add((x,y))
# create link if not already present
if node2 not in node.links:
node.links.append(node2)
node2.links.append(node)
# determine edge coordinates of node, and generate links to adjacent nodes
for y in range(0, C_MAP_CON_HEIGHT):
for x in range (0, C_MAP_CON_WIDTH):
parent_node = campaign.day_map.char_locations[(x,y)]
# check the four adjacent coordinates; if any are off the map or
# belong to a different area, this is an edge coordinate
if x-1<0 or x+1>=C_MAP_CON_WIDTH or y-1<0 or y+1>=C_MAP_CON_HEIGHT:
parent_node.edges.add((x,y))
continue
CheckCoord(x, -1, y, 0, parent_node)
CheckCoord(x, 1, y, 0, parent_node)
CheckCoord(x, 0, y, -1, parent_node)
CheckCoord(x, 0, y, 1, parent_node)
# set node terrain chances based on day terrain type if any
today = GetToday()
if 'terrain' in today:
if today['terrain'] == 'bocage':
terrain_chances = [(4,'C'), (5,'A'), (8,'F'), (10,'B'), (11,'D'), (12,'E')]
elif today['terrain'] == 'forest':
terrain_chances = [(3,'C'), (4,'A'), (8,'D'), (10,'B'), (11,'E')]
else:
terrain_chances = [(4,'C'), (5,'A'), (8,'B'), (9,'D'), (12,'E')]
# sort links for each node and set terrain types
for node in campaign.day_map.nodes:
node.links.sort(key=attrgetter('y', 'x'))
d1, d2, roll = Roll2D6()
for (target_score, node_type) in terrain_chances:
if roll <= target_score:
node.node_type = node_type
break
# set special settings for particular nodes types
if node.node_type == 'C': # village
# for village nodes, we need to determine the distance of the
# closest edge coordinate from the node center, and use this for
# the radius of the village buildings
closest = 100
for (x,y) in node.edges:
dist = GetDistance(x, y, node.x, node.y)
if dist < closest:
closest = dist
node.village_radius = closest - 1
# seems to sometimes be possible to have 0 radius villages, so check
if node.village_radius == 0:
node.village_radius = 1
elif node.node_type == 'E': # marshland
campaign.day_map.blocked_nodes.add(node) # mark as impassible
##### Prune any adjacent villages #####
for node in random.sample(campaign.day_map.nodes, len(campaign.day_map.nodes)):
if node.node_type == 'C':
for linked_node in node.links:
if linked_node.node_type == 'C':
node.node_type = 'A'
##### Sever links with impassible nodes #####
for node1 in campaign.day_map.nodes:
if node1 in campaign.day_map.blocked_nodes:
# remove all links from other nodes to this one
for node2 in node1.links:
node2.links.remove(node1)
# remove all links from this node to others
node1.links = []
# mark map edge nodes
for x in range (0, C_MAP_CON_WIDTH):
campaign.day_map.char_locations[(x,0)].top_edge = True
campaign.day_map.char_locations[(x,C_MAP_CON_HEIGHT-1)].bottom_edge = True
for y in range (0, C_MAP_CON_HEIGHT):
campaign.day_map.char_locations[(0,y)].left_edge = True
campaign.day_map.char_locations[(C_MAP_CON_WIDTH-1,y)].right_edge = True
start_node = None
exit_node = None
# determine start node
for node in random.sample(campaign.day_map.nodes, len(campaign.day_map.nodes)):
if node in campaign.day_map.blocked_nodes: continue
# in counterattack missions, we start on the top edge
if campaign.scen_type == 'Counterattack':
if not node.top_edge: continue
else:
if not node.bottom_edge: continue
node.start = True
# set player node to this node
campaign.day_map.player_node = node
node.friendly_control = True
start_node = node
break
# counterattack missions also have an 'exit' node
for node in random.sample(campaign.day_map.nodes, len(campaign.day_map.nodes)):
if campaign.scen_type == 'Counterattack':
if not node.bottom_edge: continue
else:
if not node.top_edge: continue
if node not in campaign.day_map.blocked_nodes:
node.exit = True
exit_node = node
break
# Make sure a path is possible from start to exit node; if not, return
# false since map generation has failed
if start_node is None or exit_node is None:
return False
if GetPath(start_node, exit_node) == []:
return False
# for Counterattack missions, make sure we have a start node
else:
if start_node is None: return False
# determine area resistance levels
for node in campaign.day_map.nodes:
# skip impassible nodes
if node in campaign.day_map.blocked_nodes: continue
# do roll and apply modifiers
roll = Roll1D10()
if node.node_type == 'A':
roll += 1
elif node.node_type == 'B':
roll -= 1
elif node.node_type == 'C':
roll += 2
elif node.node_type == 'D':
roll -= 2
if len(node.stone_road_links) > 0:
roll += 2
if len(node.dirt_road_links) > 0:
roll += 1
# check modified roll against odds for different day resistance levels
if campaign.scen_res == 'Light':
if roll <= 7:
area_res = 'Light'
else:
area_res = 'Medium'
elif campaign.scen_res == 'Medium':
if roll <= 5:
area_res = 'Light'
elif roll <= 9:
area_res = 'Medium'
else:
area_res = 'Heavy'
else:
if roll <= 4:
area_res = 'Light'
elif roll <= 8:
area_res = 'Medium'
else:
area_res = 'Heavy'
node.resistance = area_res
# if counterattack scenario, set all map nodes to friendly control
if campaign.scen_type == 'Counterattack':
node.friendly_control = True
# use the default seed to generate a random seed to use to map painting
# seed is an unsigned 32 bit int
campaign.day_map.seed = libtcod.random_get_int(0, 0, 2147483647)
# paint the map console for the first time
PaintCampaignMap()
# map complete!
return True
##########################################################################################
# check for awards, or rank promotions for the tank commander
def CheckAwardsPromotions(new_month=False):
# check for purple heart awards for USA players
# (no award for light wound)
if campaign.player_nation == 'USA':
for crewman in tank.crew:
if crewman.serious_wound or crewman.v_serious_wound or not crewman.alive:
crewman.AwardDecoration('Purple Heart')
crewman = GetCrewByPosition('Commander')
# only check for other awards for commander if start of new month
if new_month:
# roll 2D6 and add to highest one-day VP score
d1, d2, roll = Roll2D6()
award_score = roll + campaign.record_day_vp
# go through awards and find highest that can be awarded
for (award_name, text, score_req) in reversed(campaign.decorations):
if award_score >= score_req:
crewman.AwardDecoration(award_name)
break
# reset highest one-day VP score for new month
campaign.record_day_vp = 0
# check for commander promotion; no promotion if dead
if not crewman.alive:
return
# check through ranks in reverse order, finding the highest new rank that can be
# awarded
for n in range(7, -1, -1):
if n == crewman.rank_level: break
a, b, vp_req = campaign.ranks[n]
if campaign.vp >= vp_req:
crewman.rank_level = n
WriteJournal(crewman.name + ' promoted to rank of ' + crewman.GetRank())
text = ('Congratulations, commander. Due to your continued service ' +
'and leadership, you have been promoted to the rank of ' +
crewman.GetRank())
PopUp(text)
break
# check the commander's health
# if we're not in casual mode and he's killed or seriously injured, game is over
def CheckCommander():
if campaign.casual_commander: return
crewman = GetCrewByPosition('Commander')
if crewman.v_serious_wound or not crewman.alive:
##### Campaign is over #####
campaign.over = True
if not crewman.alive:
PopUp('You are dead. Your campaign is over')
else:
PopUp('You have been seriously injured and are sent home. Your campaign is over.')
# add high score
AddHighScore()
os.remove('savegame')
# record final journal entries
text = 'Campaign Over: '
if crewman.v_serious_wound:
text += 'Commander was very seriously wounded and was sent home.'
else:
text += 'Commander was killed in action.'
WriteJournal(text)
# record campaign stats to journal
WriteJournal('')
WriteJournal('******')
WriteJournal('Final Campaign Stats')
text = ('Days of Combat: ' + str(campaign.stats['Days of Combat']) +
'/' + str(len(campaign.days)))
WriteJournal(text)
text = 'Total Victory Points: ' + str(campaign.vp + campaign.day_vp)
WriteJournal(text)
# record remainder of campaign stats
for stat_name in C_STATS:
text = stat_name + ': '
if stat_name not in campaign.stats:
text += '0'
else:
text += str(campaign.stats[stat_name])
WriteJournal(text)
WriteJournal('')
WriteJournal('******')
# write journal to file
RecordJournal()
# display final campaign stats to player
ShowCampaignStats()
return True
return False
# generate a random model of sherman based on current date and rarity
def RandomPlayerTankModel():
# build a list of available tank models
model_list = []
total_rf = 0
for vehicle_type in campaign.player_veh_list:
rf = campaign.GetRF(vehicle_type)
if rf > 0:
model_list.append((vehicle_type, rf))
total_rf += rf
random.shuffle(model_list)
result = libtcod.random_get_int(0, 1, total_rf)
new_type = ''
for (vehicle_type, rf) in model_list:
if result <= rf:
new_type = vehicle_type
break
result -= rf
if new_type == '':
print ('ERROR: Could not randomly choose a new tank model')
return 'M4 Turret A'
return new_type
# prompt the player for a tank name
def GetTankName():
libtcod.console_set_default_background(con, libtcod.black)
libtcod.console_clear(con)
tank_name = GetInput(con, 'Choose a name for your Sherman tank', 25, 17, random_list=TANK_NAMES)
# choose a random name if none chosen
if tank_name == '':
tank_name = random.choice(TANK_NAMES)
tank.SetName(tank_name)
# prompt the player for a commander name
def GetCommanderName(crewman):
libtcod.console_set_default_background(con, libtcod.black)
libtcod.console_clear(con)
commander_name = GetInput(con, 'Enter your name', 25, NAME_MAX_LEN, get_name=True)
# select a random name if none chosen
if commander_name == '':
crewman.GenerateName()
else:
crewman.name = commander_name
# handle either assigning a new crewman or removing a crewmen when switching between a tank
# without an assistant driver position and one with
def CheckPlayerTankPositions():
# need a new asst. driver
if 'no_asst_driver' not in tank.stats and GetCrewByPosition('Asst. Driver') is None:
highest_level = GetHighestCrewLevel()
new_crew = SpawnCrewMember(None, 'Asst. Driver', 0)
new_crew.SetLevel(libtcod.random_get_int(0, 1, highest_level))
SetCrewPointers()
text = new_crew.name + ' joins the tank crew as the Assistant Driver.'
PopUp(text)
WriteJournal(text)
ShowSkills(new_crew)
# have an extra asst. driver
elif 'no_asst_driver' in tank.stats and GetCrewByPosition('Asst. Driver') is not None:
crewman = GetCrewByPosition('Asst. Driver')
tank.crew.remove(crewman)
SetCrewPointers()
text = ('Your new tank has no assistant driver position. ' + crewman.name +
' is reassigned elsewhere.')
PopUp(text)
WriteJournal(text)
# check the player tank after a campaign day; if it's been disabled then it's repaired,
# if it's destroyed or damaged beyond repair then the player gets a new tank model
def CheckPlayerTank():
# check for awards and/or rank promotions
CheckAwardsPromotions()
if not tank.alive or tank.swiss_cheese:
# check for crew replacements
ReplaceCrew()
# select tank, or assign a new tank
if campaign.unlimited_tank_selection:
tank_type = ShowTankInfo(select_tank=True)
else:
tank_type = RandomPlayerTankModel()
text = ('HQ assigns your crew to a ' + tank_type + ' tank.')
PopUp(text)
tank.alive = True
tank.unit_type = tank_type
tank.Setup()
SetVehicleStats(tank)
# get the name for the new tank
GetTankName()
WriteJournal('New player tank: ' + tank.unit_type + ' "' + tank.name + '"')
# check for asst driver changeup
CheckPlayerTankPositions()
# reset crew orders, hatch status, and spot ability
for crewman in tank.crew:
crewman.order = crewman.default_order
crewman.CheckHatch()
crewman.SetSpotAbility()
else:
if tank.immobilized:
tank.immobilized = False
PopUp('Your tank is repaired for the next day of combat.')
# clear damage list
tank.damage_list = []
# any crewman with light or serious wounds are healed
for crewman in tank.crew:
crewman.light_wound = False
crewman.serious_wound = False
# return a pointer to the current date in the list of campaign days
def GetToday():
for calendar_day in campaign.days:
if (int(calendar_day['year']) == campaign.current_date[0] and
int(calendar_day['month']) == campaign.current_date[1] and
int(calendar_day['date']) == campaign.current_date[2]):
return calendar_day
return None
# set the campaign date record to a given date in the calendar
def SetToday(new_date):
campaign.current_date[0] = int(new_date['year'])
campaign.current_date[1] = int(new_date['month'])
campaign.current_date[2] = int(new_date['date'])
# advance the combat calendar to the next day in the calendar
# show fade-in and fade-out animations and comment for day (location or refit)
# if action day, set up the campaign variables
def AdvanceDay():
DATE_Y = 24
# get a pointer to the current date in the list of calendar days
today = GetToday()
# clear screen
libtcod.console_clear(0)
libtcod.console_flush()
libtcod.console_set_alignment(0, libtcod.CENTER)
# if this first day of a new campaign
if campaign.current_date == [0,0,0]:
# set to selected start day in the combat calendar
SetToday(campaign.days[campaign.start_date])
# advancing to a new day
else:
n = campaign.days.index(GetToday())
# still have at least one more day in the campaign
if n < len(campaign.days) - 1:
# advance date one day
new_date = campaign.days[n+1]
# start of new calendar month
if campaign.current_date[1] != int(new_date['month']):
# check for awards and/or promotions for commander
CheckAwardsPromotions(new_month=True)
##### Reached end of campaign #####
else:
PopUp('You have survived and reached the end of the ' +
'campaign! Congratulations!')
campaign.over = True
AddHighScore()
os.remove('savegame')
# record final journal entry
WriteJournal('Campaign Over: End of campaign calendar')
RecordJournal()
ShowCampaignStats()
campaign.exiting = True
return
# advance to next date in calendar
SetToday(campaign.days[n+1])
# clear screen (in case we did end-of-month stuff)
libtcod.console_clear(0)
libtcod.console_flush()
date_text = campaign.GetDate()
# do date fade-in
for c in range(0, 255, 5):
libtcod.console_set_default_foreground(0, libtcod.Color(c,c,c))
libtcod.console_print(0, SCREEN_XM, DATE_Y, date_text)
libtcod.console_flush()
Wait(2)
# get a pointer to the new current day
today = GetToday()
# record new day in journal
WriteJournal('')
WriteJournal('******')
text = campaign.GetDate()
if today['comment'] == 'Refitting':
text += ': Start of refitting period'
else:
text += ': ' + today['comment']
WriteJournal(text)
# fade-in day comment
text = today['comment']
for c in range(0, 255, 5):
libtcod.console_set_default_foreground(0, libtcod.Color(c,c,c))
libtcod.console_print(0, SCREEN_XM, DATE_Y+3, text)
libtcod.console_flush()
Wait(2)
# current date is start of a refitting period
if today['comment'] == 'Refitting':
campaign.gyro_skill_avail = True # set gyrostabilier skill flag
campaign.action_day = False # set action flag
# generate new tank model to offer if tank selection is 'strict'
if not campaign.unlimited_tank_selection:
campaign.tank_on_offer = RandomPlayerTankModel()
else:
# reset offered tank
campaign.tank_on_offer = ''
# set action flag
campaign.action_day = True
# get string descriptions of day's action
res = today['resistance_level']
if res == 'L':
campaign.scen_res = 'Light'
elif res == 'M':
campaign.scen_res = 'Medium'
else:
campaign.scen_res = 'Heavy'
mission = today['mission']
if mission == 'A':
campaign.scen_type = 'Advance'
elif mission == 'B':
campaign.scen_type = 'Battle'
else:
campaign.scen_type = 'Counterattack'
# write to journal
WriteJournal('Action Day:')
WriteJournal(' Mission: ' + campaign.scen_type)
WriteJournal(' Expected Resistance: ' + campaign.scen_res)
(h, m) = campaign.GetSunrise()
WriteJournal(' Sun rose at ' + str(h) + ':' + str(m).zfill(2))
# pause to show text
Wait(200)
# fade out
for i in range(255, 0, -10):
libtcod.console_set_fade(i, libtcod.black)
libtcod.console_flush()
Wait(1)
libtcod.console_set_fade(255, libtcod.black)
libtcod.console_set_alignment(0, libtcod.LEFT)
# run through the campaign calendar and get player input
def RunCalendar(load_day):
# loading a campaign
if load_day:
load_day = False
LoadGame()
# set fullscreen mode based on saved settings
if campaign.fullscreen:
libtcod.sys_force_fullscreen_resolution(campaign.fs_res_x, campaign.fs_res_y)
libtcod.console_set_fullscreen(True)
# loading a campaign day in progress
if campaign.day_in_progress:
InitCampaignDay(load=True)
if campaign.exiting: return
campaign.day_in_progress = False
CheckPlayerTank()
# action day is over so reset the flag
campaign.action_day = False
# starting a new campaign
else:
SaveGame()
# load campaign map if any into a console
campaign_map = None
if campaign.map_file != '':
campaign_map = LoadXP(campaign.map_file)
quit_calendar = False
while not quit_calendar:
# get pointer to current day in calendar
today = GetToday()
# draw screen
libtcod.console_set_default_background(con, libtcod.black)
libtcod.console_clear(con)
DisplayMenuBar()
libtcod.console_hline(con, 0, 1, SCREEN_WIDTH, flag=libtcod.BKGND_DEFAULT)
libtcod.console_set_alignment(con, libtcod.CENTER)
# display frames and division lines
libtcod.console_set_default_foreground(con, libtcod.light_grey)
libtcod.console_print_frame(con, 2, 2, 57, 46, clear=False,
flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_hline(con, 6, 6, 48, flag=libtcod.BKGND_DEFAULT)
# current calendar date
libtcod.console_set_default_foreground(con, MENU_TITLE_COLOR)
libtcod.console_print(con, 30, 4, campaign.GetDate())
libtcod.console_set_default_foreground(con, libtcod.white)
# current location or mission
libtcod.console_print(con, 30, 8, today['comment'])
# display offered tank model or narrative description of current location / mission
libtcod.console_set_alignment(con, libtcod.LEFT)
if campaign.tank_on_offer != '':
libtcod.console_print(con, 6, 10, 'HQ offers your crew a transfer to a new tank:')
libtcod.console_print_frame(con, 6, 12, 48, 30,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
ShowVehicleTypeInfo(campaign.tank_on_offer, con, 8, 14, no_image=True)
# unlimited selection of tank
elif today['comment'] == 'Refitting' and campaign.unlimited_tank_selection:
libtcod.console_print(con, 6, 20, 'You have the option of switching')
libtcod.console_print(con, 6, 21, ' to a new tank model during the refit.')
libtcod.console_print(con, 6, 22, 'Press [%cS%c] to select a new model.'%HIGHLIGHT)
# otherwise, description of current location / mission if any
elif today['description'] is not None:
lines = wrap(today['description'], 48, subsequent_indent = ' ')
y = 10
for line in lines:
libtcod.console_print(con, 6, y, line)
y += 1
# cut off if too long
if y >= 43:
break
libtcod.console_set_alignment(con, libtcod.CENTER)
# display mission info if action day
if campaign.action_day:
libtcod.console_print(con, 30, 37, 'Mission Type: ' + campaign.scen_type)
libtcod.console_print(con, 30, 38, 'Expected Resistance for the day: ' + campaign.scen_res)
DisplayWeather(con, 25, 40)
libtcod.console_set_default_foreground(con, libtcod.white)
libtcod.console_set_default_background(con, libtcod.black)
# finally, display current campaign VP score
libtcod.console_print(con, 30, 45, 'Current VP Score: ' + str(campaign.vp))
# display campaign map if any
if campaign_map is not None:
libtcod.console_blit(campaign_map, 0, 0, 0, 0, con, 63, 2)
# display current location on map if any is set
today = GetToday()
if 'map_x' in today and 'map_y' in today:
x = int(today['map_x'])
y = int(today['map_y'])
libtcod.console_put_char(con, x+63, y+2, '@', flag=libtcod.BKGND_NONE)
# display menu options
y = 54
libtcod.console_set_alignment(con, libtcod.LEFT)
if campaign.action_day:
text = '[%cB%c]egin combat day'%HIGHLIGHT
else:
text = '[%cA%c]dvance to next day'%HIGHLIGHT
libtcod.console_print(con, 65, y, text)
libtcod.console_print(con, 65, y+1, '[%cV%c]iew Tank'%HIGHLIGHT)
if today['comment'] == 'Refitting':
if campaign.tank_on_offer != '':
libtcod.console_print(con, 65, y+2, '[%cS%c]witch to Offered Tank'%
HIGHLIGHT)
elif campaign.unlimited_tank_selection:
libtcod.console_print(con, 65, y+2, '[%cS%c]witch to New Tank'%
HIGHLIGHT)
libtcod.console_print(con, 65, y+3, 'Save and [%cQ%c]uit'%
HIGHLIGHT)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
refresh = False
while not refresh:
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
# exiting the campaign
if campaign.exiting: return
# get player input
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# DEBUG / mapping
if DEBUG and mouse.rbutton:
mx, my = mouse.cx, mouse.cy
if mx >= 63 and my >= 2:
print ('Mouse pos: ' + str(mx - 63) + ',' + str(my - 2))
# get pressed key
key_char = chr(key.c)
# help display
if key.vk == libtcod.KEY_F1 or key.vk == libtcod.KEY_1:
ShowHelp()
refresh = True
# tank info display
elif key.vk == libtcod.KEY_F2 or key.vk == libtcod.KEY_2:
ShowTankInfo()
refresh = True
# crew info display
elif key.vk == libtcod.KEY_F3 or key.vk == libtcod.KEY_3:
ShowCrewInfo()
refresh = True
# settings
elif key.vk == libtcod.KEY_F4 or key.vk == libtcod.KEY_4:
ShowSettings()
refresh = True
# campaign stats
elif key.vk == libtcod.KEY_F5 or key.vk == libtcod.KEY_5:
ShowCampaignStats()
refresh = True
# screenshot
elif key.vk == libtcod.KEY_F6 or key.vk == libtcod.KEY_6:
SaveScreenshot()
refresh = True
# sound toggle
elif key.vk == libtcod.KEY_F7 or key.vk == libtcod.KEY_7:
campaign.sounds = not campaign.sounds
if campaign.sounds:
PopUp("Sound turned on")
else:
PopUp("Sound turned off")
refresh = True
# save and quit
if key_char in ['q', 'Q']:
SaveGame()
return True
# view the player tank
elif key_char in ['v', 'V']:
campaign.resupply = True
CampaignViewTank()
campaign.resupply = False
refresh = True
# advance to next day (debug: can always advance day)
elif key_char in ['a', 'A']:
if not campaign.action_day or DEBUG:
campaign.saw_action = False
AdvanceDay()
SaveGame()
refresh = True
# start combat day
elif campaign.action_day and key_char in ['b', 'B']:
campaign.tank_on_offer = ''
InitCampaignDay()
if campaign.exiting: return
campaign.day_in_progress = False
CheckPlayerTank()
# action day is over so reset the flag
campaign.action_day = False
campaign.saw_action = True
refresh = True
# switch to a new tank
if today['comment'] == 'Refitting':
if key_char in ['s', 'S'] and (campaign.tank_on_offer != '' or campaign.unlimited_tank_selection):
if PopUp('Switching to a new tank cannot be undone. Are you sure?', confirm=True):
if campaign.unlimited_tank_selection:
tank_type = ShowTankInfo(select_tank=True)
tank.unit_type = tank_type
else:
tank.unit_type = campaign.tank_on_offer
campaign.tank_on_offer = ''
tank.Setup()
SetVehicleStats(tank)
GetTankName()
CheckPlayerTankPositions()
for crew_member in tank.crew:
crew_member.order = crew_member.default_order
crew_member.CheckHatch()
crew_member.SetSpotAbility()
refresh = True
libtcod.console_flush()
# display campaign settings and allow player to choose
def SetCampaignSettings():
libtcod.console_clear(0)
exit_menu = False
while not exit_menu:
# generate and display menu
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 1,
libtcod.BKGND_NONE, libtcod.CENTER, 'Campaign Settings')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 3,
libtcod.BKGND_NONE, libtcod.CENTER, 'These settings cannot be changed once a campaign has begun')
libtcod.console_print_ex(menu_con, MENU_CON_XM, 4,
libtcod.BKGND_NONE, libtcod.CENTER, 'Your selections will be displayed alongside your final score')
x = 30
text = '[%cT%c]ank Selection: '%HIGHLIGHT
if campaign.unlimited_tank_selection:
text += 'Unlimited'
else:
text += 'Strict'
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print(menu_con, x, 12, text)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
if campaign.unlimited_tank_selection:
libtcod.console_print(menu_con, x, 14, 'You may choose any available model of tank when starting your campaign, when')
libtcod.console_print(menu_con, x, 15, ' replacing a destroyed tank, or during a refit period.')
else:
libtcod.console_print(menu_con, x, 14, 'You must begin the campaign with the assigned tank model, and must accept the')
libtcod.console_print(menu_con, x, 15, ' model offered to you when replacing a destroyed tank. One randomly selected model')
libtcod.console_print(menu_con, x, 16, ' will be offered to you during each refit period. Tank model selection is weighted')
libtcod.console_print(menu_con, x, 17, ' by historical availability and rarity.')
text = '[%cC%c]ommander Replacement: '%HIGHLIGHT
if campaign.casual_commander:
text += 'Casual'
else:
text += 'Realistic'
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print(menu_con, x, 21, text)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
if campaign.casual_commander:
libtcod.console_print(menu_con, x, 23, 'If your tank commander is killed or sent home due to injuries, he will be replaced')
libtcod.console_print(menu_con, x, 24, ' by a new crewman and you may continue playing.')
else:
libtcod.console_print(menu_con, x, 23, 'If your tank commander is killed or sent home due to injuries, your campaign is over.')
text = 'Campaign Start Date: '
day = campaign.days[campaign.start_date]
year = int(day['year'])
month = int(day['month'])
date = int(day['date'])
text += campaign.GetDate(lookup_date = [year, month, date])
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print(menu_con, x, 26, text)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, x, 28, '[%cA/D%c] to change starting date of campaign.'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-3,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cC/T%c] Toggle Setting'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cEnter%c] Continue'%HIGHLIGHT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
refresh = False
while not refresh:
libtcod.console_flush()
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed(): sys.exit()
# continue with these settings
if key.vk == libtcod.KEY_ENTER:
exit_menu = True
break
# get pressed key
key_char = chr(key.c)
if key_char in ['t', 'T']:
campaign.unlimited_tank_selection = not campaign.unlimited_tank_selection
refresh = True
elif key_char in ['c', 'C']:
campaign.casual_commander = not campaign.casual_commander
refresh = True
elif key_char in ['a', 'A']:
if campaign.start_date == 0:
campaign.start_date = len(campaign.days) - 1
else:
campaign.start_date -= 1
refresh = True
elif key_char in ['d', 'D']:
if campaign.start_date < len(campaign.days) - 1:
campaign.start_date += 1
else:
campaign.start_date = 0
refresh = True
# allow player to choose from a list of available campaigns to play
def ChooseCampaign():
# build list of available campaigns: (filename, name, description)
campaign_list = []
filenames = next(os.walk(DATAPATH))[2]
for f in filenames:
if f.endswith('.xml'):
# try to parse this xml file
test = xml.parse(DATAPATH + f)
if test is None:
continue
# get campaign name and description
root = test.getroot()
name = root.find('name').text
description = root.find('description').text
campaign_list.append((f, name, description))
if len(campaign_list) == 0:
PopUp('Error: No Campaign files found!')
return None
# new: sort the list reverse alphabetically
campaign_list.sort(reverse=True)
# select first one in list by default
selected = 0
libtcod.console_clear(0)
exit_menu = False
while not exit_menu:
# generate and display menu
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 1,
libtcod.BKGND_NONE, libtcod.CENTER, 'Select a Campaign')
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
# display info on selected campaign
(f, name, description) = campaign_list[selected]
libtcod.console_set_default_foreground(menu_con, SELECTED_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 12,
libtcod.BKGND_NONE, libtcod.CENTER, name)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
lines = wrap(description, 80, subsequent_indent = ' ')
y = 14
for line in lines:
libtcod.console_print(menu_con, 35, y, line)
y += 1
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-3,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cA/D/Left/Right%c] Select Different Campaign'%HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT-2,
libtcod.BKGND_NONE, libtcod.CENTER, '[%cEnter%c] Continue with Selected Campaign'%HIGHLIGHT)
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
refresh = False
while not refresh:
libtcod.console_flush()
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed(): sys.exit()
# get pressed key
key_char = chr(key.c)
if key.vk == libtcod.KEY_LEFT or key_char in ['a', 'A']:
if selected > 0:
selected -= 1
else:
selected = len(campaign_list) - 1
refresh = True
elif key.vk == libtcod.KEY_RIGHT or key_char in ['d', 'D']:
if selected < len(campaign_list) - 1:
selected += 1
else:
selected = 0
refresh = True
# return selected campaign filename
elif key.vk == libtcod.KEY_ENTER:
n = 0
for (f, name, description) in campaign_list:
if n == selected:
return f
n += 1
# set up and start a new campaign
def NewCampaign():
global tank, battle, campaign
TutorialMessage('welcome')
# create a new campaign object and an empty battle pointer
campaign = Campaign()
battle = None
# allow player to select from available campaigns
campaign.campaign_file = ChooseCampaign()
# write the header entries for the campaign journal
WriteJournal('*** Armoured Commander Campaign Journal ***')
WriteJournal('Program Version: ' + VERSION + SUBVERSION)
WriteJournal('')
WriteJournal('Campaign Started at ' + datetime.now().strftime("%I:%M%p on %B %d, %Y"))
WriteJournal('')
# load basic campaign info into campaign object
LoadCampaignInfo()
WriteJournal('Campaign Name: ' + campaign.campaign_name)
# allow player to select campaign settings
SetCampaignSettings()
text = ' Unlimited Tank Selection: '
if campaign.unlimited_tank_selection:
text += 'On'
else:
text += 'Off'
WriteJournal(text)
text = ' Casual Commander Replacement: '
if campaign.casual_commander:
text += 'On'
else:
text += 'Off'
WriteJournal(text)
WriteJournal('')
# set starting date now
AdvanceDay()
# if tank selection is unlimited, allow player to select tank model
tank_type = None
if campaign.unlimited_tank_selection:
tank_type = ShowTankInfo(select_tank=True)
# create a new player tank object
tank = PlayerTank(tank_type)
SetVehicleStats(tank)
# get or generate tank name
GetTankName()
WriteJournal('Starting Tank: ' + tank.unit_type + ' "' + tank.name + '"')
# set up player tank crew
# Commander
crewman = SpawnCrewMember(None, 'Commander', 3)
crewman.SetLevel(3)
# get or generate commander name
GetCommanderName(crewman)
PopUp(crewman.name + ' is assigned as your tank Commander. You may add/upgrade ' +
'his skills now or save your skill points for later.')
# allow player to spend skill points on commander
ShowSkills(crewman)
crewman = SpawnCrewMember(None, 'Gunner', 2)
crewman.SetLevel(2)
PopUp(crewman.name + ' is assigned as your Gunner.')
ShowSkills(crewman)
crewman = SpawnCrewMember(None, 'Loader', 1)
PopUp(crewman.name + ' is assigned as your Loader.')
ShowSkills(crewman)
crewman = SpawnCrewMember(None, 'Driver', 0)
PopUp(crewman.name + ' is assigned as your Driver.')
ShowSkills(crewman)
# some tank models have no assistant driver
if 'no_asst_driver' not in tank.stats:
crewman = SpawnCrewMember(None, 'Asst. Driver', 0)
PopUp(crewman.name + ' is assigned as your Assistant Driver.')
ShowSkills(crewman)
# start the campaign calendar
RunCalendar(False)
# start or continue a campaign day of action
def InitCampaignDay(load=False):
global campaign, tank, battle
# if we're loading from a saved game
if load:
# reset the selected crew pointer
if campaign.selected_crew is not None:
campaign.selected_crew = tank.crew[0]
# reset the exit flag
campaign.exiting = False
# paint the campaign map console
PaintCampaignMap()
# set up other consoles
UpdateCActionCon()
UpdateCInfoCon(mouse.cx, mouse.cy)
libtcod.console_clear(con)
else:
# record the day of combat
campaign.AddStat('Days of Combat', 1)
# reset the camapaign object for a new day
campaign.ResetForNewDay()
# set flag that day is in progress
campaign.day_in_progress = True
# create an empty battle object
battle = None
# set time to dawn
(campaign.hour, campaign.minute) = campaign.GetSunrise()
# allow player to set up tank
campaign.resupply = True
campaign.GenerateAmmo()
# pop right into the main gun ammo menu
CampaignViewTank(load_ammo_menu=True)
campaign.resupply = False
campaign.ClearAmmo()
# display mission type briefing if first time for this mission
if campaign.scen_type == 'Advance':
TutorialMessage('advance_mission')
elif campaign.scen_type == 'Battle':
TutorialMessage('battle_mission')
elif campaign.scen_type == 'Counterattack':
TutorialMessage('counterattack_mission')
# head to start area: apply time and ammo usage
roll = Roll1D10()
hours_elapsed = int(floor(roll / 2)) + 1
ammo_expended = roll * 2
# don't expend more HE shells than we have
if tank.general_ammo['HE'] < ammo_expended:
ammo_expended = tank.general_ammo['HE']
tank.general_ammo['HE'] -= ammo_expended
campaign.hour += hours_elapsed
UpdateDateCon()
# display message with time and ammo expended
libtcod.console_clear(con)
libtcod.console_set_alignment(con, libtcod.CENTER)
text = 'Heading to your start area took ' + str(hours_elapsed) + ' hour'
if hours_elapsed > 1:
text += 's'
text += ' and your tank expended ' + str(ammo_expended) + ' HE round'
if ammo_expended > 1 or ammo_expended == 0:
text += 's'
text += '.'
libtcod.console_print(con, SCREEN_XM, 21, text)
libtcod.console_print(con, SCREEN_XM, SCREEN_HEIGHT-16, 'Press Enter to continue')
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
WaitForEnter()
# reset and clear console
libtcod.console_clear(con)
libtcod.console_print(con, SCREEN_XM, int(SCREEN_HEIGHT/2), 'Generating Campaign Map...')
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
libtcod.console_set_default_background(con, libtcod.black)
libtcod.console_set_alignment(con, libtcod.LEFT)
# generate Campaign Day Map
# generate a new map
good_map = False
while not good_map:
campaign.nodes = []
good_map = GenerateCampaignMap()
# paint the campaign map console
PaintCampaignMap()
# set up other consoles
UpdateCActionCon()
UpdateCInfoCon(mouse.cx, mouse.cy)
libtcod.console_clear(con)
# determine whether tank is lead tank for the day
tank.SetLeadTank()
# make sure player node is on screen
campaign.MoveViewTo(campaign.day_map.player_node)
# counterattack missions start with one wave of enemy attack
if campaign.scen_type == 'Counterattack':
campaign.DoEnemyAdvance()
# all other missions allow an initial check of an adjacent area, no time cost
else:
SetupCheckArea()
campaign.free_check = True
PopUp('Select an adjacent area to check for enemy resistance')
RenderCampaign()
# save the game before continuing
SaveGame()
DoCampaignDay()
# if we just finished a campaign day
if campaign.sunset:
# add VP earned from day to total
campaign.vp += campaign.day_vp
# set highest one-day VP score this month if higher
if campaign.day_vp > campaign.record_day_vp:
campaign.record_day_vp = campaign.day_vp
campaign.day_vp = 0
campaign.day_in_progress = False
# handle campaign actions for a day of action in the campaign
def DoCampaignDay():
global key, mouse
# cancel a campaign action in progress
def CancelAction():
campaign.input_mode = 'None'
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
# make sure player node is on screen
campaign.MoveViewTo(campaign.day_map.player_node)
# if we are loading a saved game and we were in an encounter, init it now
if battle is not None:
InitEncounter(load=True)
if campaign.exiting: return
PostEncounter()
else:
# draw consoles and screen for first time
UpdateDateCon()
UpdateCActionCon()
UpdateCOverlay()
RenderCampaign()
exit_campaign = False
while not exit_campaign:
# check for exit flag
if campaign.exiting or campaign.sunset:
return
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
# check for keyboard or mouse input
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# do mouse stuff first
mx, my = mouse.cx, mouse.cy
# check mouse position against last recorded one
if (mx, my) != campaign.mouseover:
campaign.mouseover = (mx, my)
# update campaign info console
UpdateCInfoCon(mx, my)
RenderCampaign()
# campaign menu
if key.vk == libtcod.KEY_ESCAPE:
if CampaignMenu():
campaign.exiting = True
SaveGame()
return
# help display
elif key.vk == libtcod.KEY_F1 or key.vk == libtcod.KEY_1:
ShowHelp()
# tank info display
elif key.vk == libtcod.KEY_F2 or key.vk == libtcod.KEY_2:
ShowTankInfo()
# crew info display
elif key.vk == libtcod.KEY_F3 or key.vk == libtcod.KEY_3:
ShowCrewInfo()
# settings
elif key.vk == libtcod.KEY_F4 or key.vk == libtcod.KEY_4:
ShowSettings()
# campaign stats
elif key.vk == libtcod.KEY_F5 or key.vk == libtcod.KEY_5:
ShowCampaignStats()
# screenshot
elif key.vk == libtcod.KEY_F6 or key.vk == libtcod.KEY_6:
SaveScreenshot()
# sound toggle
elif key.vk == libtcod.KEY_F7 or key.vk == libtcod.KEY_7:
campaign.sounds = not campaign.sounds
if campaign.sounds:
PopUp("Sound turned on")
else:
PopUp("Sound turned off")
# get pressed key
key_char = chr(key.c)
# debug commands: end campaign day
if DEBUG:
if key_char == 'e' and (key.lctrl or key.rctrl):
(new_hour, new_minute) = campaign.GetSunset()
campaign.hour = new_hour
campaign.minute = new_minute
campaign.CheckSunset()
# generate a new campaign day map
elif key_char == 'g' and (key.lctrl or key.rctrl):
# generate a new map
good_map = False
while not good_map:
campaign.nodes = []
good_map = GenerateCampaignMap()
PaintCampaignMap()
UpdateCOverlay()
RenderCampaign()
if key_char in ['w', 'W'] or key.vk == libtcod.KEY_UP:
campaign.c_map_y -= 10
campaign.CheckYOffset()
UpdateCInfoCon(mx, my)
RenderCampaign()
elif key_char in ['s', 'S'] or key.vk == libtcod.KEY_DOWN:
campaign.c_map_y += 10
campaign.CheckYOffset()
UpdateCInfoCon(mx, my)
RenderCampaign()
# if we're in check adjacent area mode
if campaign.input_mode == 'Check Adjacent Area':
if key.vk == libtcod.KEY_TAB:
SelectNextArea()
campaign.MoveViewTo(campaign.selected_node)
elif key.vk == libtcod.KEY_ENTER:
campaign.MoveViewTo(campaign.selected_node)
CheckArea()
campaign.CheckSunset()
campaign.RandomCampaignEvent()
# cancel action if not free check
elif key.vk == libtcod.KEY_BACKSPACE and not campaign.free_check:
CancelAction()
# we're in move to area mode
elif campaign.input_mode == 'Move Into Adjacent Area':
if key.vk == libtcod.KEY_TAB:
SelectNextArea()
campaign.MoveViewTo(campaign.selected_node)
elif key.vk == libtcod.KEY_ENTER:
campaign.MoveViewTo(campaign.selected_node)
MoveArea()
if campaign.exiting: return
campaign.CheckSunset()
# in case we've moved to a new map, shift view to selected area
campaign.MoveViewTo(campaign.selected_node)
# we don't check for a RandomCampaignEvent() here because it's
# handled by PostEncounter
# cancel action
elif key.vk == libtcod.KEY_BACKSPACE:
CancelAction()
# call strike mode
elif campaign.input_mode == 'Call in Strike':
if key.vk == libtcod.KEY_TAB:
SelectNextArea()
campaign.MoveViewTo(campaign.selected_node)
elif key_char in ['a', 'r']:
campaign.MoveViewTo(campaign.selected_node)
CallStrike(key_char)
campaign.CheckSunset()
campaign.RandomCampaignEvent()
# cancel action
elif key.vk == libtcod.KEY_BACKSPACE:
CancelAction()
# otherwise, we can choose a new campaign action
else:
# only allowed if not counterattack mission
if campaign.scen_type != 'Counterattack':
if key_char in ['c', 'C']:
SetupCheckArea()
campaign.MoveViewTo(campaign.selected_node)
elif key_char in ['a', 'A']:
SetupCallStrike()
campaign.MoveViewTo(campaign.selected_node)
# only allowed in counterattack
else:
# await counterattack
if key_char in ['a', 'A']:
AwaitEnemy()
# allowed in any mission (with some restrictions checked in function)
if key_char in ['e', 'E']:
SetupMoveArea()
campaign.MoveViewTo(campaign.selected_node)
elif key_char in ['r', 'R']:
if PopUp('Spend 15 mins. trying to get resupplied?', confirm=True):
SetupResupply()
# if sunset has hit, don't trigger a battle
campaign.CheckSunset()
if campaign.sunset:
return
if campaign.scen_type == 'Counterattack':
AwaitEnemy(no_time=True)
else:
RenderCampaign()
elif key_char in ['v', 'V']:
CampaignViewTank()
RenderCampaign()
elif key_char in ['h', 'H']:
# check that we can head home
if [i for i in ENDING_DAMAGES if i in tank.damage_list]:
if PopUp('Return to HQ and allow the rest of your ' +
'battlegroup to continue on alone?',
confirm=True):
campaign.HeadHome()
libtcod.console_flush()
##########################################################################################
# Images, Sound, and Music #
##########################################################################################
# load an image, fix its path and filename, and return it
def LoadImage(image_name):
pathname = DATAPATH + image_name + '.png'
return libtcod.image_load(pathname)
# return the proper sound file to use for the given main gun type
def GetFiringSound(gun_type):
if gun_type in ['20L', '50L']:
return '20_mm_gun'
if gun_type in ['75', '75L', '75LL']:
return '75_mm_gun'
if gun_type in ['76L', '76LL']:
return '76_mm_gun'
if gun_type in ['88L', '88LL']:
return '88_mm_gun'
return None
# try to init SDL mixer and load sound files
def InitMixer():
global MIXER_ACTIVE
if not MIXER_ACTIVE: return
SOUND_LIST = ['20_mm_gun', '75_mm_gun', '76_mm_gun', '88_mm_gun', 'main_gun_misfire',
'menu_select', 'aa_mg_firing', 'armour_save',
'arty_firing', 'bow_mg_firing', 'coax_mg_firing', 'dice_roll',
'engine_noise', 'german_rifle_fire', 'german_mg_fire', 'infantry_moving',
'panzerfaust_firing', 'radio', 'screenshot', 'shell_move',
'sherman_movement', 'smoke_hit', 'tank_knocked_out',
'hatch_open', 'hatch_close', 'he_hit', 'ap_hit', 'main_gun_miss',
'new_skill'
]
global mixer_active, SOUNDS
mixer.Mix_Init(mixer.MIX_INIT_OGG)
if mixer.Mix_OpenAudio(44100, mixer.MIX_DEFAULT_FORMAT, 2, 1024) == -1:
print('Unable to init sounds.')
MIXER_ACTIVE = False
return
mixer.Mix_AllocateChannels(16)
# load the sounds into memory
for sound_name in SOUND_LIST:
SOUNDS[sound_name] = mixer.Mix_LoadWAV(('sounds' + os.sep + sound_name + '.wav').encode('ascii'))
print('Sound mixer initialized.')
# play a sound
def PlaySound(sound_name):
if not MIXER_ACTIVE: return
if campaign is not None:
if not campaign.sounds:
return
if not sound_name in SOUNDS: return
if SOUNDS[sound_name] is None: return
mixer.Mix_PlayChannel(-1, SOUNDS[sound_name], 0)
##########################################################################################
# Main Menu #
##########################################################################################
# display game credits
def DisplayCredits():
current_line = 0
paused = False
# animation timing
animate_time = time.time()
exit_menu = False
while not exit_menu:
refresh = False
libtcod.console_clear(con)
libtcod.console_print_frame(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_alignment(con, libtcod.CENTER)
libtcod.console_set_default_foreground(con, MENU_TITLE_COLOR)
libtcod.console_print(con, SCREEN_XM, 3, '-- Credits --')
libtcod.console_set_default_foreground(con, libtcod.white)
n = 0
for line in CREDITS_TEXT:
y = SCREEN_HEIGHT - 10 - (current_line - n)
n += 1
if y < 10: continue
if y > SCREEN_HEIGHT-9: break
libtcod.console_print(con, SCREEN_XM, y, line)
libtcod.console_print(con, SCREEN_XM, SCREEN_HEIGHT-4, '[%cP%c] to Pause'%HIGHLIGHT)
libtcod.console_print(con, SCREEN_XM, SCREEN_HEIGHT-3, '[%cEnter or ESC%c] to Return'%HIGHLIGHT)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
while not refresh:
# get input from user
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
elif key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE:
return
elif chr(key.c) in ['p', 'P']:
paused = not paused
# update animation
if not paused:
if time.time() - animate_time >= 0.75:
animate_time = time.time()
current_line += 1
if current_line >= len(CREDITS_TEXT) + 40:
current_line = 0
refresh = True
# display a list of high scores
def DisplayHighScores():
try:
# open bones file
save = shelve.open('bones')
bones = save['bones']
save.close()
except:
print('ERROR: Could not open bones file.')
return
libtcod.console_clear(con)
libtcod.console_print_frame(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,
clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_alignment(con, libtcod.CENTER)
libtcod.console_set_default_foreground(con, MENU_TITLE_COLOR)
libtcod.console_print(con, SCREEN_XM, 2, '-- High Scores --')
libtcod.console_set_default_foreground(con, libtcod.white)
libtcod.console_set_alignment(con, libtcod.LEFT)
libtcod.console_print(con, 10, 6, 'Campaign')
libtcod.console_print(con, 30, 6, 'Tank')
libtcod.console_print(con, 50, 6, 'Commander')
libtcod.console_print(con, 70, 6, 'VP Score')
libtcod.console_print(con, 84, 6, 'Outcome')
libtcod.console_hline(con, 10, 7, 120, flag=libtcod.BKGND_DEFAULT)
y = 9
for (tank, commander, score, outcome, ts, cc, campaign_name) in bones.score_list:
libtcod.console_print(con, 10, y, campaign_name)
libtcod.console_print(con, 30, y, tank)
libtcod.console_print(con, 50, y, commander)
text = str(score)
# add notes for campaign options
if ts or cc:
text += ' ('
if ts: text += 'U'
if cc: text += 'C'
text += ')'
libtcod.console_print(con, 70, y, text)
libtcod.console_print(con, 84, y, outcome)
y += 1
libtcod.console_set_alignment(con, libtcod.CENTER)
libtcod.console_print(con, SCREEN_XM, SCREEN_HEIGHT-6, '[%cEnter%c] Continue'%HIGHLIGHT)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
WaitForEnter()
PlaySound('menu_select')
# Display the main menu for the game
def MainMenu():
# grab a random entry from the graveyard to display in the main menu
def GetRandomGrave():
try:
save = shelve.open('bones')
bones = save['bones']
save.close()
if len(bones.graveyard) == 0:
return None
return random.choice(bones.graveyard)
except:
print('ERROR: Could not open bones file.')
return None
# update the cloud layer console
def UpdateCloudLayer():
global cloud_altitude, cloud_height, cloud_length, c_cloud_length
# shift console content over
libtcod.console_set_key_color(title_cloud_layer, libtcod.black)
libtcod.console_blit(title_cloud_layer, 1, 0, SCREEN_WIDTH-1,
30, title_cloud_layer, 0, 0)
libtcod.console_set_key_color(title_cloud_layer, KEY_COLOR)
libtcod.console_set_default_background(title_cloud_layer, KEY_COLOR)
libtcod.console_rect(title_cloud_layer, SCREEN_WIDTH-1,
0, 1, 30, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(title_cloud_layer, libtcod.white)
# see if we need to continue drawing a cloud
if c_cloud_length > 0:
h = cloud_height
if c_cloud_length == cloud_length or c_cloud_length == 1:
h -= 2
libtcod.console_rect(title_cloud_layer, SCREEN_WIDTH-1,
30 - cloud_altitude - int(h/2), 1, h, False,
flag=libtcod.BKGND_SET)
c_cloud_length -= 1
# otherwise, chance of new cloud
elif libtcod.random_get_int(0, 1, 14) == 1:
# generate a new altitude, height, length
cloud_altitude = libtcod.random_get_int(0, 10, 29)
cloud_height = random.choice([3, 5, 7])
cloud_length = libtcod.random_get_int(0, 10, 17)
c_cloud_length = cloud_length
# randomly change the height of the ground being drawn
def GetNewGroundHeight(current_height):
roll = libtcod.random_get_int(0, 1, 10)
if roll == 1 and current_height > 3:
return current_height - 1
elif roll == 10 and current_height < 8:
return current_height + 1
return current_height
# draw the main menu image layers to the main console
def DrawLayers():
libtcod.console_blit(title_background, 0, 0, 149, 30, con, 0, 2)
libtcod.console_blit(title_cloud_layer, 0, 0, 149, 30, con, 0, 2)
libtcod.console_blit(title_ground_layer, 0, 0, 149, 30, con, 0, 2)
libtcod.console_blit(title, 0, 0, 80, 25, con, 34, 4)
# Start of actual Main Menu stuff:
# generate consoles for main menu
# generate background console
title_background = libtcod.console_new(149, 30)
COLOR1 = libtcod.Color(255,255,114)
COLOR2 = libtcod.Color(193,8,3)
for y in range(0, 30):
color = libtcod.color_lerp(COLOR1, COLOR2, float(y) / 30.0)
libtcod.console_set_default_background(title_background, color)
libtcod.console_rect(title_background, 0, y, 149, 1, False, flag=libtcod.BKGND_SET)
# generate title text console: "ARMOURED COMMANDER"
title = libtcod.console_new(80, 25)
libtcod.console_set_key_color(title, KEY_COLOR)
libtcod.console_set_default_background(title, KEY_COLOR)
libtcod.console_clear(title)
libtcod.console_set_default_background(title, libtcod.black)
y = 0
for line in TITLE:
x = 0
black = False
for width in line:
if black:
libtcod.console_rect(title, x, y, width, 1, False, flag=libtcod.BKGND_SET)
black = False
else:
black = True
x += width
y += 1
# generate cloud layer console
title_cloud_layer = libtcod.console_new(SCREEN_WIDTH, 30)
libtcod.console_set_default_background(title_cloud_layer, KEY_COLOR)
libtcod.console_clear(title_cloud_layer)
libtcod.console_set_key_color(title_cloud_layer, KEY_COLOR)
libtcod.console_set_default_background(title_cloud_layer, libtcod.white)
global cloud_altitude, cloud_height, cloud_length, c_cloud_length
c_cloud_length = 0
for x in range(0, SCREEN_WIDTH):
UpdateCloudLayer()
# generate ground layer console
title_ground_layer = libtcod.console_new(SCREEN_WIDTH, 30)
libtcod.console_set_default_background(title_ground_layer, KEY_COLOR)
libtcod.console_clear(title_ground_layer)
libtcod.console_set_default_background(title_ground_layer, TITLE_GROUND_COLOR)
libtcod.console_set_key_color(title_ground_layer, KEY_COLOR)
ground_height = libtcod.random_get_int(0, 3, 8)
for x in range(0, SCREEN_WIDTH):
libtcod.console_rect(title_ground_layer, x, 30-ground_height, 1,
ground_height, False, flag=libtcod.BKGND_SET)
ground_height = GetNewGroundHeight(ground_height)
# animation timing
ground_click = time.time()
cloud_click = time.time()
# grab a random graveyard entry to display
tombstone = GetRandomGrave()
exit_game = False
while not exit_game:
# flag to say that there's a good saved game that can (presumably) be loaded
good_saved_game = False
# display main menu
libtcod.console_set_default_background(con, libtcod.black)
libtcod.console_clear(con)
libtcod.console_set_alignment(con, libtcod.CENTER)
libtcod.console_print(con, SCREEN_XM, 33, 'The World War II Tank Commander Roguelike')
if os.path.exists('savegame.dat'):
libtcod.console_print(con, SCREEN_XM, 36, '[%cC%c]ontinue Campaign:'%
HIGHLIGHT)
# get info from saved game
save = shelve.open('savegame')
game_info = save['info']
save.close()
# check saved game version against current
# also checks against a list of compatible previous versions
if game_info.game_version != VERSION and game_info.game_version not in COMPATIBLE_VERSIONS:
libtcod.console_set_default_foreground(con, libtcod.light_red)
text = 'Saved game does not match current game version'
libtcod.console_print(con, SCREEN_XM, 38, text)
text = 'You must reload this saved game with version ' + game_info.game_version
libtcod.console_print(con, SCREEN_XM, 39, text)
else:
good_saved_game = True
libtcod.console_set_default_foreground(con, libtcod.light_grey)
libtcod.console_print(con, SCREEN_XM, 38, game_info.campaign_name)
libtcod.console_print(con, SCREEN_XM, 39, game_info.commander_name)
libtcod.console_print(con, SCREEN_XM, 40, game_info.tank_name)
libtcod.console_print(con, SCREEN_XM, 41, game_info.current_date)
libtcod.console_set_default_foreground(con, libtcod.white)
libtcod.console_print(con, SCREEN_XM, 43, '[%cN%c]ew Campaign'%
HIGHLIGHT)
libtcod.console_print(con, SCREEN_XM, 44, '[%cH%c]igh Scores'%
HIGHLIGHT)
libtcod.console_print(con, SCREEN_XM, 45, '[%cV%c]iew Credits'%
HIGHLIGHT)
libtcod.console_print(con, SCREEN_XM, 46, '[%cQ%c]uit'%
HIGHLIGHT)
libtcod.console_set_default_foreground(con, libtcod.light_grey)
libtcod.console_print_ex(con, SCREEN_XM, SCREEN_HEIGHT-7, libtcod.BKGND_NONE, libtcod.CENTER,
"version " + VERSION + SUBVERSION)
text = 'Copyright 2015-2017 Gregory Adam Scott'
libtcod.console_print_ex(con, SCREEN_XM, SCREEN_HEIGHT-4, libtcod.BKGND_NONE, libtcod.CENTER, text)
text = 'Free Software under the GNU General Public License'
libtcod.console_print_ex(con, SCREEN_XM, SCREEN_HEIGHT-3, libtcod.BKGND_NONE, libtcod.CENTER, text)
libtcod.console_print_ex(con, SCREEN_XM, SCREEN_HEIGHT-2, libtcod.BKGND_NONE, libtcod.CENTER, WEBSITE)
libtcod.console_print_ex(con, SCREEN_XM, SCREEN_HEIGHT-1, libtcod.BKGND_NONE, libtcod.CENTER, GITHUB)
# display ascii poppy
libtcod.console_set_default_foreground(con, libtcod.red)
libtcod.console_set_alignment(con, libtcod.LEFT)
libtcod.console_print(con, 4, SCREEN_HEIGHT-6, ",--.")
libtcod.console_print(con, 3, SCREEN_HEIGHT-5, ".\\ /.")
libtcod.console_print(con, 2, SCREEN_HEIGHT-4, "( () )")
libtcod.console_print(con, 3, SCREEN_HEIGHT-3, "`/ \\'")
libtcod.console_print(con, 4, SCREEN_HEIGHT-2, "`--'")
libtcod.console_set_default_foreground(con, libtcod.white)
libtcod.console_set_alignment(con, libtcod.CENTER)
if datetime.now().month == 11 and datetime.now().day == 11:
text = 'NEVER AGAIN'
else:
text = 'In Memoriam'
libtcod.console_print(con, 16, SCREEN_HEIGHT-3, text)
# New: display gravestone info if any
if tombstone is not None:
libtcod.console_set_default_background(con, libtcod.dark_grey)
libtcod.console_set_default_foreground(con, libtcod.black)
y = 8
libtcod.console_rect(con, SCREEN_WIDTH-28, SCREEN_HEIGHT-y-2, 16, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_rect(con, SCREEN_WIDTH-29, SCREEN_HEIGHT-y-1, 18, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_rect(con, SCREEN_WIDTH-30, SCREEN_HEIGHT-y, 20, 8, False, flag=libtcod.BKGND_SET)
for text in tombstone:
libtcod.console_print(con, SCREEN_WIDTH-20, SCREEN_HEIGHT-y, text)
y -= 1
libtcod.console_set_default_background(con, libtcod.black)
libtcod.console_set_default_foreground(con, libtcod.white)
# do an initial draw of the animation layers
DrawLayers()
refresh_menu = False
while not refresh_menu and not exit_game:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
# exit right away
if libtcod.console_is_window_closed():
sys.exit()
key_char = chr(key.c)
if key_char in ['c', 'C']:
if os.path.exists('savegame.dat') and good_saved_game:
PlaySound('menu_select')
RunCalendar(True)
tombstone = GetRandomGrave()
refresh_menu = True
elif key_char in ['n', 'N']:
PlaySound('menu_select')
# if there's already a savegame, make sure we want to replace it
if os.path.exists('savegame.dat'):
if PopUp('Starting a new campaign will erase the currently saved one in progress. Are you sure?', confirm=True, skip_update=True):
NewCampaign()
else:
NewCampaign()
tombstone = GetRandomGrave()
refresh_menu = True
elif key_char in ['h', 'H']:
PlaySound('menu_select')
DisplayHighScores()
refresh_menu = True
elif key_char in ['v', 'V']:
PlaySound('menu_select')
DisplayCredits()
refresh_menu = True
elif key_char in ['q', 'Q']:
exit_game = True
# update animated ground layer
if time.time() - ground_click >= 0.15:
animation_click = time.time()
# re-draw the ground layer image
libtcod.console_set_key_color(title_ground_layer, libtcod.black)
libtcod.console_blit(title_ground_layer, 1, 0, SCREEN_WIDTH-1,
30, title_ground_layer, 0, 0)
libtcod.console_set_key_color(title_ground_layer, KEY_COLOR)
libtcod.console_set_default_background(title_ground_layer, KEY_COLOR)
libtcod.console_rect(title_ground_layer, SCREEN_WIDTH-1,
0, 1, 30-ground_height, False,
flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(title_ground_layer, TITLE_GROUND_COLOR)
libtcod.console_rect(title_ground_layer, SCREEN_WIDTH-1,
30-ground_height, 1, ground_height, False,
flag=libtcod.BKGND_SET)
ground_height = GetNewGroundHeight(ground_height)
DrawLayers()
# reset timer
ground_click = time.time()
# update animated cloud layer
if time.time() - cloud_click >= 0.30:
UpdateCloudLayer()
DrawLayers()
# reset timer
cloud_click = time.time()
# blit main console to screen
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
##########################################################################################
# Main Script #
##########################################################################################
global mouse, key
global map_con, overlay_con, map_info_con, msg_con, tank_con, date_con, menu_con, text_con
global c_map_con, c_overlay_con, c_action_con, c_info_con
global tk_table
global campaign, battle
# set campaign variable to None, will be reset later on
campaign = None
# create a new console and return it
def CreateConsole(w, h, bc, fc, a):
new_con = libtcod.console_new(w, h)
libtcod.console_set_default_background(new_con, bc)
libtcod.console_set_default_foreground(new_con, fc)
libtcod.console_set_alignment(new_con, a)
libtcod.console_clear(new_con)
return new_con
# set up empty bones file if doesn't exist yet
if not os.path.exists('bones.dat'):
print ('No bones file found; creating a new empty bones file.')
bones = Bones()
save = shelve.open('bones', 'n')
save['bones'] = bones
save.close()
# set up basic stuff
os.environ['SDL_VIDEO_CENTERED'] = '1' # center window on screen
libtcod.console_set_custom_font('terminal8x12_armcom.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_ASCII_INROW, 0, 0)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, NAME + ' - ' + VERSION + SUBVERSION,
fullscreen=False, renderer=libtcod.RENDERER_OPENGL2, vsync=True)
libtcod.sys_set_fps(LIMIT_FPS)
# set defaults for screen console
libtcod.console_set_default_background(0, libtcod.black)
libtcod.console_set_default_foreground(0, libtcod.white)
# create the main display console
con = libtcod.console_new(SCREEN_WIDTH, SCREEN_HEIGHT)
libtcod.console_set_default_background(con, libtcod.black)
libtcod.console_set_default_foreground(con, libtcod.white)
libtcod.console_set_alignment(con, libtcod.LEFT)
libtcod.console_clear(con)
# create game consoles
map_con = CreateConsole(MAP_CON_WIDTH, MAP_CON_HEIGHT, libtcod.black, libtcod.black,
libtcod.LEFT) # map
overlay_con = CreateConsole(MAP_CON_WIDTH, MAP_CON_HEIGHT, KEY_COLOR, libtcod.black,
libtcod.LEFT) # map overlay
libtcod.console_set_key_color(overlay_con, KEY_COLOR)
c_map_con = CreateConsole(C_MAP_CON_WIDTH, C_MAP_CON_HEIGHT, libtcod.black, libtcod.black,
libtcod.LEFT) # campaign map
c_overlay_con = CreateConsole(C_MAP_CON_WIDTH, C_MAP_CON_HEIGHT, KEY_COLOR, libtcod.white,
libtcod.LEFT) # campaign map overlay
libtcod.console_set_key_color(c_overlay_con, KEY_COLOR)
map_info_con = CreateConsole(MAP_INFO_CON_WIDTH, MAP_INFO_CON_HEIGHT, libtcod.black,
libtcod.white, libtcod.CENTER) # map info
msg_con = CreateConsole(MSG_CON_WIDTH, MSG_CON_HEIGHT, libtcod.black, libtcod.white,
libtcod.LEFT) # messages
tank_con = CreateConsole(TANK_CON_WIDTH, TANK_CON_HEIGHT, libtcod.black, libtcod.white,
libtcod.LEFT) # tank info
date_con = CreateConsole(DATE_CON_WIDTH, DATE_CON_HEIGHT, libtcod.black, libtcod.white,
libtcod.LEFT) # date, time, etc. info
menu_con = CreateConsole(MENU_CON_WIDTH, MENU_CON_HEIGHT, libtcod.black, libtcod.white,
libtcod.LEFT) # menu console
text_con = CreateConsole(TEXT_CON_WIDTH, TEXT_CON_HEIGHT, libtcod.black, libtcod.white,
libtcod.LEFT) # text display console console
c_action_con = CreateConsole(C_ACTION_CON_W, C_ACTION_CON_H, libtcod.black, libtcod.white,
libtcod.LEFT) # campaign action console
c_info_con = CreateConsole(C_INFO_CON_W, C_INFO_CON_H, libtcod.black, libtcod.white,
libtcod.LEFT) # campaign message console
# create mouse and key event holders
mouse = libtcod.Mouse()
key = libtcod.Key()
# try to start up steamworks; if it fails, it may just mean that Steam is offline, so do nothing
try:
steamworks = STEAMWORKS()
steamworks.initialize()
except:
pass
# set up colour control for highlighting command keys
libtcod.console_set_color_control(libtcod.COLCTRL_1, KEY_HIGHLIGHT_COLOR, libtcod.black)
# display loading screen
libtcod.console_clear(con)
libtcod.console_set_alignment(con, libtcod.CENTER)
libtcod.console_print(con, SCREEN_XM, 30, 'Loading...')
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_flush()
# init SDL mixer
InitMixer()
# start main menu
MainMenu()
| sudasana/armcom | armcom.py | Python | gpl-3.0 | 572,525 | [
"BLAST",
"Firefly"
] | 394d407fa6c733d7ad67c3f5bbf2cb1873475f3ec2b0821c03158c56c6c19207 |
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2014-2017 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2014 Vlad Temian <vladtemian@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Cezar <celnazli@bitdefender.com>
# Copyright (c) 2015 Chris Rebert <code@rebertia.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Jared Garst <cultofjared@gmail.com>
# Copyright (c) 2017 Martin <MartinBasti@users.noreply.github.com>
# Copyright (c) 2017 Christopher Zurcher <zurcher@users.noreply.github.com>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Checkers for various standard library functions."""
import sys
import six
import astroid
from astroid.bases import Instance
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers import utils
OPEN_FILES = {'open', 'file'}
UNITTEST_CASE = 'unittest.case'
THREADING_THREAD = 'threading.Thread'
COPY_COPY = 'copy.copy'
OS_ENVIRON = 'os._Environ'
if sys.version_info >= (3, 0):
OPEN_MODULE = '_io'
else:
OPEN_MODULE = '__builtin__'
def _check_mode_str(mode):
# check type
if not isinstance(mode, six.string_types):
return False
# check syntax
modes = set(mode)
_mode = "rwatb+U"
creating = False
if six.PY3:
_mode += "x"
creating = "x" in modes
if modes - set(_mode) or len(mode) > len(modes):
return False
# check logic
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending or creating and six.PY3:
return False
reading = True
if not six.PY3:
binary = True
if text and binary:
return False
total = reading + writing + appending + (creating if six.PY3 else 0)
if total > 1:
return False
if not (reading or writing or appending or creating and six.PY3):
return False
# other 2.x constraints
if not six.PY3:
if "U" in mode:
mode = mode.replace("U", "")
if "r" not in mode:
mode = "r" + mode
return mode[0] in ("r", "w", "a", "U")
return True
class StdlibChecker(BaseChecker):
__implements__ = (IAstroidChecker,)
name = 'stdlib'
msgs = {
'W1501': ('"%s" is not a valid mode for open.',
'bad-open-mode',
'Python supports: r, w, a[, x] modes with b, +, '
'and U (only with r) options. '
'See http://docs.python.org/2/library/functions.html#open'),
'W1502': ('Using datetime.time in a boolean context.',
'boolean-datetime',
'Using datetime.time in a boolean context can hide '
'subtle bugs when the time they represent matches '
'midnight UTC. This behaviour was fixed in Python 3.5. '
'See http://bugs.python.org/issue13936 for reference.',
{'maxversion': (3, 5)}),
'W1503': ('Redundant use of %s with constant '
'value %r',
'redundant-unittest-assert',
'The first argument of assertTrue and assertFalse is '
'a condition. If a constant is passed as parameter, that '
'condition will be always true. In this case a warning '
'should be emitted.'),
'W1505': ('Using deprecated method %s()',
'deprecated-method',
'The method is marked as deprecated and will be removed in '
'a future version of Python. Consider looking for an '
'alternative in the documentation.'),
'W1506': ('threading.Thread needs the target function',
'bad-thread-instantiation',
'The warning is emitted when a threading.Thread class '
'is instantiated without the target function being passed. '
'By default, the first parameter is the group param, not the target param. '),
'W1507': ('Using copy.copy(os.environ). Use os.environ.copy() '
'instead. ',
'shallow-copy-environ',
'os.environ is not a dict object but proxy object, so '
'shallow copy has still effects on original object. '
'See https://bugs.python.org/issue15373 for reference. '),
}
deprecated = {
0: [
'cgi.parse_qs', 'cgi.parse_qsl',
'ctypes.c_buffer',
'distutils.command.register.register.check_metadata',
'distutils.command.sdist.sdist.check_metadata',
'tkinter.Misc.tk_menuBar',
'tkinter.Menu.tk_bindForTraversal',
],
2: {
(2, 6, 0): [
'commands.getstatus',
'os.popen2',
'os.popen3',
'os.popen4',
'macostools.touched',
],
(2, 7, 0): [
'unittest.case.TestCase.assertEquals',
'unittest.case.TestCase.assertNotEquals',
'unittest.case.TestCase.assertAlmostEquals',
'unittest.case.TestCase.assertNotAlmostEquals',
'unittest.case.TestCase.assert_',
'xml.etree.ElementTree.Element.getchildren',
'xml.etree.ElementTree.Element.getiterator',
'xml.etree.ElementTree.XMLParser.getiterator',
'xml.etree.ElementTree.XMLParser.doctype',
],
},
3: {
(3, 0, 0): [
'inspect.getargspec',
'unittest.case.TestCase._deprecate.deprecated_func',
],
(3, 1, 0): [
'base64.encodestring', 'base64.decodestring',
'ntpath.splitunc',
],
(3, 2, 0): [
'cgi.escape',
'configparser.RawConfigParser.readfp',
'xml.etree.ElementTree.Element.getchildren',
'xml.etree.ElementTree.Element.getiterator',
'xml.etree.ElementTree.XMLParser.getiterator',
'xml.etree.ElementTree.XMLParser.doctype',
],
(3, 3, 0): [
'inspect.getmoduleinfo',
'logging.warn', 'logging.Logger.warn',
'logging.LoggerAdapter.warn',
'nntplib._NNTPBase.xpath',
'platform.popen',
],
(3, 4, 0): [
'importlib.find_loader',
'plistlib.readPlist', 'plistlib.writePlist',
'plistlib.readPlistFromBytes',
'plistlib.writePlistToBytes',
],
(3, 4, 4): [
'asyncio.tasks.async',
],
(3, 5, 0): [
'fractions.gcd',
'inspect.getargvalues',
'inspect.formatargspec', 'inspect.formatargvalues',
'inspect.getcallargs',
'platform.linux_distribution', 'platform.dist',
],
(3, 6, 0): [
'importlib._bootstrap_external.FileLoader.load_module',
],
},
}
def _check_bad_thread_instantiation(self, node):
if not node.kwargs and node.args:
self.add_message('bad-thread-instantiation', node=node)
def _check_shallow_copy_environ(self, node):
arg = utils.get_argument_from_call(node, position=0)
for inferred in arg.inferred():
if inferred.qname() == OS_ENVIRON:
self.add_message('shallow-copy-environ', node=node)
break
@utils.check_messages('bad-open-mode', 'redundant-unittest-assert',
'deprecated-method',
'bad-thread-instantiation',
'shallow-copy-environ')
def visit_call(self, node):
"""Visit a Call node."""
try:
for inferred in node.func.infer():
if inferred is astroid.Uninferable:
continue
if inferred.root().name == OPEN_MODULE:
if getattr(node.func, 'name', None) in OPEN_FILES:
self._check_open_mode(node)
if inferred.root().name == UNITTEST_CASE:
self._check_redundant_assert(node, inferred)
if isinstance(inferred, astroid.ClassDef) and inferred.qname() == THREADING_THREAD:
self._check_bad_thread_instantiation(node)
if isinstance(inferred, astroid.FunctionDef) and inferred.qname() == COPY_COPY:
self._check_shallow_copy_environ(node)
self._check_deprecated_method(node, inferred)
except astroid.InferenceError:
return
@utils.check_messages('boolean-datetime')
def visit_unaryop(self, node):
if node.op == 'not':
self._check_datetime(node.operand)
@utils.check_messages('boolean-datetime')
def visit_if(self, node):
self._check_datetime(node.test)
@utils.check_messages('boolean-datetime')
def visit_ifexp(self, node):
self._check_datetime(node.test)
@utils.check_messages('boolean-datetime')
def visit_boolop(self, node):
for value in node.values:
self._check_datetime(value)
def _check_deprecated_method(self, node, inferred):
py_vers = sys.version_info[0]
if isinstance(node.func, astroid.Attribute):
func_name = node.func.attrname
elif isinstance(node.func, astroid.Name):
func_name = node.func.name
else:
# Not interested in other nodes.
return
# Reject nodes which aren't of interest to us.
acceptable_nodes = (astroid.BoundMethod,
astroid.UnboundMethod,
astroid.FunctionDef)
if not isinstance(inferred, acceptable_nodes):
return
qname = inferred.qname()
if qname in self.deprecated[0]:
self.add_message('deprecated-method', node=node,
args=(func_name, ))
else:
for since_vers, func_list in self.deprecated[py_vers].items():
if since_vers <= sys.version_info and qname in func_list:
self.add_message('deprecated-method', node=node,
args=(func_name, ))
break
def _check_redundant_assert(self, node, infer):
if (isinstance(infer, astroid.BoundMethod) and
node.args and isinstance(node.args[0], astroid.Const) and
infer.name in ['assertTrue', 'assertFalse']):
self.add_message('redundant-unittest-assert',
args=(infer.name, node.args[0].value, ),
node=node)
def _check_datetime(self, node):
""" Check that a datetime was infered.
If so, emit boolean-datetime warning.
"""
try:
infered = next(node.infer())
except astroid.InferenceError:
return
if (isinstance(infered, Instance) and
infered.qname() == 'datetime.time'):
self.add_message('boolean-datetime', node=node)
def _check_open_mode(self, node):
"""Check that the mode argument of an open or file call is valid."""
try:
mode_arg = utils.get_argument_from_call(node, position=1,
keyword='mode')
except utils.NoSuchArgumentError:
return
if mode_arg:
mode_arg = utils.safe_infer(mode_arg)
if (isinstance(mode_arg, astroid.Const)
and not _check_mode_str(mode_arg.value)):
self.add_message('bad-open-mode', node=node,
args=mode_arg.value)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(StdlibChecker(linter))
| AtomLinter/linter-pylama | bin/deps/pylint/checkers/stdlib.py | Python | mit | 12,544 | [
"VisIt"
] | cbfe63408816a049730b04c40481ad65731bb49ad2c759f19387f939ec32cdc3 |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import ctypes
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.fci import direct_spin1
from pyscf.fci import selected_ci
libfci = lib.load_library('libfci')
def contract_2e(eri, civec_strs, norb, nelec, link_index=None):
ci_coeff, nelec, ci_strs = selected_ci._unpack(civec_strs, nelec)
if link_index is None:
link_index = selected_ci._all_linkstr_index(ci_strs, norb, nelec)
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = nb, nlinkb = cd_indexa.shape[:2]
eri = ao2mo.restore(1, eri, norb)
eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)
idx,idy = numpy.tril_indices(norb, -1)
idx = idx * norb + idy
eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2
lib.transpose_sum(eri1, inplace=True)
eri1 *= .5
fcivec = ci_coeff.reshape(na,nb)
# (aa|aa)
ci1 = numpy.zeros_like(fcivec)
if nelec[0] > 1:
ma, mlinka = mb, mlinkb = dd_indexa.shape[:2]
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
h_ps = numpy.einsum('pqqs->ps', eri) * (.5/nelec[0])
eri1 = eri.copy()
for k in range(norb):
eri1[:,:,k,k] += h_ps
eri1[k,k,:,:] += h_ps
eri1 = ao2mo.restore(4, eri1, norb)
lib.transpose_sum(eri1, inplace=True)
eri1 *= .5
# (bb|aa)
libfci.SCIcontract_2e_bbaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
cd_indexa.ctypes.data_as(ctypes.c_void_p),
cd_indexb.ctypes.data_as(ctypes.c_void_p))
lib.transpose_sum(ci1, inplace=True)
return selected_ci._as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)
def enlarge_space(myci, civec_strs, eri, norb, nelec):
if isinstance(civec_strs, (tuple, list)):
nelec, (strsa, strsb) = selected_ci._unpack(civec_strs[0], nelec)[1:]
ci_coeff = lib.asarray(civec_strs)
else:
ci_coeff, nelec, (strsa, strsb) = selected_ci._unpack(civec_strs, nelec)
na = nb = len(strsa)
ci0 = ci_coeff.reshape(-1,na,nb)
abs_ci = abs(ci0).max(axis=0)
eri = ao2mo.restore(1, eri, norb)
eri_pq_max = abs(eri.reshape(norb**2,-1)).max(axis=1).reshape(norb,norb)
civec_a_max = abs_ci.max(axis=1)
ci_aidx = numpy.where(civec_a_max > myci.ci_coeff_cutoff)[0]
civec_a_max = civec_a_max[ci_aidx]
strsa = strsa[ci_aidx]
strsa_add = selected_ci.select_strs(myci, eri, eri_pq_max, civec_a_max, strsa, norb, nelec[0])
strsa = numpy.append(strsa, strsa_add)
aidx = numpy.argsort(strsa)
strsa = strsa[aidx]
aidx = numpy.where(aidx < len(ci_aidx))[0]
ci_bidx = ci_aidx
strsb = strsa
bidx = aidx
ma = mb = len(strsa)
cs = []
for i in range(ci0.shape[0]):
ci1 = numpy.zeros((ma,mb))
tmp = lib.take_2d(ci0[i], ci_aidx, ci_bidx)
lib.takebak_2d(ci1, tmp, aidx, bidx)
cs.append(selected_ci._as_SCIvector(ci1, (strsa,strsb)))
if ci_coeff[0].ndim == 0 or ci_coeff[0].shape[-1] != nb:
cs = [c.ravel() for c in cs]
if (isinstance(ci_coeff, numpy.ndarray) and
ci_coeff.shape[0] == na or ci_coeff.shape[0] == na*nb):
cs = cs[0]
return cs
def make_hdiag(h1e, eri, ci_strs, norb, nelec):
hdiag = selected_ci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
na = len(ci_strs[0])
lib.transpose_sum(hdiag.reshape(na,na), inplace=True)
hdiag *= .5
return hdiag
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):
return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,
level_shift, tol, lindep, max_cycle,
max_space, nroots, davidson_only,
pspace_size, select_cutoff=select_cutoff,
ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,
**kwargs)
make_rdm1s = selected_ci.make_rdm1s
make_rdm2s = selected_ci.make_rdm2s
make_rdm1 = selected_ci.make_rdm1
make_rdm2 = selected_ci.make_rdm2
trans_rdm1s = selected_ci.trans_rdm1s
trans_rdm1 = selected_ci.trans_rdm1
class SelectedCI(selected_ci.SelectedCI):
def contract_2e(self, eri, civec_strs, norb, nelec, link_index=None, **kwargs):
# The argument civec_strs is a CI vector in function FCISolver.contract_2e.
# Save and patch self._strs to make this contract_2e function compatible to
# FCISolver.contract_2e.
if getattr(civec_strs, '_strs', None) is not None:
self._strs = civec_strs._strs
else:
assert(civec_strs.size == len(self._strs[0])*len(self._strs[1]))
civec_strs = selected_ci._as_SCIvector(civec_strs, self._strs)
return contract_2e(eri, civec_strs, norb, nelec, link_index)
def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):
return make_hdiag(h1e, eri, ci_strs, norb, nelec)
enlarge_space = enlarge_space
SCI = SelectedCI
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf.fci import direct_spin0
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
['H', ( 1., 2. , 3. )],
['H', ( 1., 2. , 4. )],
]
mol.basis = 'sto-3g'
mol.build()
m = scf.RHF(mol)
m.kernel()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.kernel(m._eri, m.mo_coeff, compact=False)
eri = eri.reshape(norb,norb,norb,norb)
e1, c1 = kernel(h1e, eri, norb, nelec)
e2, c2 = direct_spin0.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.894559902235565, 'diff to FCI', e1-e2)
| sunqm/pyscf | pyscf/fci/selected_ci_spin0.py | Python | apache-2.0 | 7,596 | [
"PySCF"
] | 3863569a49cbdb2c8e39edc8f868aca843250f096b1759e58f82e95422956860 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests many modules to compute energy of LiH."""
from __future__ import absolute_import
import os
import numpy
import scipy.sparse
import unittest
from openfermion.config import *
from openfermion.hamiltonians import *
from openfermion.ops import *
from openfermion.transforms import *
from openfermion.utils import *
class LiHIntegrationTest(unittest.TestCase):
def setUp(self):
# Set up molecule.
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., 1.45))]
basis = 'sto-3g'
multiplicity = 1
filename = os.path.join(THIS_DIRECTORY, 'data',
'H1-Li1_sto-3g_singlet_1.45')
self.molecule = MolecularData(
geometry, basis, multiplicity, filename=filename)
self.molecule.load()
# Get molecular Hamiltonian.
self.molecular_hamiltonian = self.molecule.get_molecular_hamiltonian()
self.molecular_hamiltonian_no_core = (
self.molecule.
get_molecular_hamiltonian(occupied_indices=[0],
active_indices=range(1,
self.molecule.
n_orbitals)))
# Get FCI RDM.
self.fci_rdm = self.molecule.get_molecular_rdm(use_fci=1)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get fermion Hamiltonian.
self.fermion_hamiltonian = normal_ordered(get_fermion_operator(
self.molecular_hamiltonian))
# Get qubit Hamiltonian.
self.qubit_hamiltonian = jordan_wigner(self.fermion_hamiltonian)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get matrix form.
self.hamiltonian_matrix = get_sparse_operator(
self.molecular_hamiltonian)
self.hamiltonian_matrix_no_core = get_sparse_operator(
self.molecular_hamiltonian_no_core)
def test_all(self):
# Test reverse Jordan-Wigner.
fermion_hamiltonian = reverse_jordan_wigner(self.qubit_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test mapping to interaction operator.
fermion_hamiltonian = get_fermion_operator(self.molecular_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test RDM energy.
fci_rdm_energy = self.nuclear_repulsion
fci_rdm_energy += numpy.sum(self.fci_rdm.one_body_tensor *
self.one_body)
fci_rdm_energy += numpy.sum(self.fci_rdm.two_body_tensor *
self.two_body)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Confirm expectation on qubit Hamiltonian using reverse JW matches.
qubit_rdm = self.fci_rdm.get_qubit_expectations(self.qubit_hamiltonian)
qubit_energy = 0.0
for term, coefficient in qubit_rdm.terms.items():
qubit_energy += coefficient * self.qubit_hamiltonian.terms[term]
self.assertAlmostEqual(qubit_energy, self.molecule.fci_energy)
# Confirm fermionic RDMs can be built from measured qubit RDMs.
new_fermi_rdm = get_interaction_rdm(qubit_rdm)
fermi_rdm_energy = new_fermi_rdm.expectation(
self.molecular_hamiltonian)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Test sparse matrices.
energy, wavefunction = get_ground_state(self.hamiltonian_matrix)
self.assertAlmostEqual(energy, self.molecule.fci_energy)
expected_energy = expectation(self.hamiltonian_matrix, wavefunction)
self.assertAlmostEqual(expected_energy, energy)
# Make sure you can reproduce Hartree-Fock energy.
hf_state = jw_hartree_fock_state(
self.molecule.n_electrons, count_qubits(self.qubit_hamiltonian))
hf_density = get_density_matrix([hf_state], [1.])
expected_hf_density_energy = expectation(self.hamiltonian_matrix,
hf_density)
expected_hf_energy = expectation(self.hamiltonian_matrix, hf_state)
self.assertAlmostEqual(expected_hf_energy, self.molecule.hf_energy)
self.assertAlmostEqual(expected_hf_density_energy,
self.molecule.hf_energy)
# Check that frozen core result matches frozen core FCI from psi4.
# Recore frozen core result from external calculation.
self.frozen_core_fci_energy = -7.8807607374168
no_core_fci_energy = scipy.linalg.eigh(
self.hamiltonian_matrix_no_core.todense())[0][0]
self.assertAlmostEqual(no_core_fci_energy,
self.frozen_core_fci_energy)
# Check that the freeze_orbitals function has the same effect as the
# as the occupied_indices option of get_molecular_hamiltonian.
frozen_hamiltonian = freeze_orbitals(
get_fermion_operator(self.molecular_hamiltonian), [0, 1])
self.assertTrue(frozen_hamiltonian ==
get_fermion_operator(self.molecular_hamiltonian_no_core))
| jarrodmcc/OpenFermion | src/openfermion/tests/_lih_integration_test.py | Python | apache-2.0 | 6,229 | [
"Psi4"
] | aa8fa19bb148f979135cf74217ee58e7e1cb59ba1a5fa1dfb21226feb25ab371 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************
**espressopp.Int3D**
********************
.. function:: espressopp.__Int3D(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.__Int3D.x(v, [0)
:param v:
:param [0:
:type v:
:type [0:
:rtype:
.. function:: espressopp.__Int3D.y(v, [1)
:param v:
:param [1:
:type v:
:type [1:
:rtype:
.. function:: espressopp.__Int3D.z(v, [2)
:param v:
:param [2:
:type v:
:type [2:
:rtype:
.. function:: espressopp.toInt3DFromVector(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.toInt3D(\*args)
:param \*args:
:type \*args:
"""
from _espressopp import Int3D
from espressopp import esutil
# This injects additional methods into the Int3D class and pulls it
# into this module
class __Int3D(Int3D) :
__metaclass__ = esutil.ExtendBaseClass
__originit = Int3D.__init__
def __init__(self, *args):
if len(args) == 0:
x = y = z = 0.0
elif len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Int3D):
x = arg0.x
y = arg0.y
z = arg0.z
# test whether the argument is iterable and has 3 elements
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
x, y, z = arg0
elif isinstance(arg0, int):
x = y = z = arg0
else :
raise TypeError("Cannot initialize Int3D from %s" % (args))
elif len(args) == 3 :
x, y, z = args
else :
raise TypeError("Cannot initialize Int3D from %s" % (args))
return self.__originit(x, y, z)
# create setters and getters
@property
def x(self): return self[0]
@x.setter
def x(self, v): self[0] = v
@property
def y(self) : return self[1]
@y.setter
def y(self, v) : self[1] = v
@property
def z(self) : return self[2]
@z.setter
def z(self, v) : self[2] = v
# string conversion
def __str__(self) :
return str((self[0], self[1], self[2]))
def __repr__(self) :
return 'Int3D' + str(self)
def toInt3DFromVector(*args):
"""Try to convert the arguments to a Int3D.
This function will only convert to a Int3D if x, y and z are
specified."""
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Int3D):
return arg0
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
return Int3D(*args)
elif len(args) == 3:
return Int3D(*args)
raise TypeError("Specify x, y and z.")
def toInt3D(*args):
"""Try to convert the arguments to a Int3D, returns the argument,
if it is already a Int3D."""
if len(args) == 1 and isinstance(args[0], Int3D):
return args[0]
else:
return Int3D(*args)
| capoe/espressopp.soap | src/Int3D.py | Python | gpl-3.0 | 3,721 | [
"ESPResSo"
] | bed7ef5dbdc76889c4224314267ef11d95035e96b5892901833958e806abfdf0 |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
# Nelson Liu <nelson@nelsonliu.me>
#
# License: BSD 3 clause
import numbers
import warnings
import copy
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import clone
from ..base import RegressorMixin
from ..base import is_classifier
from ..base import MultiOutputMixin
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import check_scalar
from ..utils.deprecation import deprecated
from ..utils.validation import _check_sample_weight
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from ._tree import _build_pruned_tree_ccp
from ._tree import ccp_pruning_path
from . import _tree, _splitter, _criterion
__all__ = [
"DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor",
]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
# TODO: Remove "mse" and "mae" in version 1.2.
CRITERIA_REG = {
"squared_error": _criterion.MSE,
"mse": _criterion.MSE,
"friedman_mse": _criterion.FriedmanMSE,
"absolute_error": _criterion.MAE,
"mae": _criterion.MAE,
"poisson": _criterion.Poisson,
}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter, "random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {
"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter,
}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(
self,
*,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_decrease,
class_weight=None,
ccp_alpha=0.0,
):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.random_state = random_state
self.min_impurity_decrease = min_impurity_decrease
self.class_weight = class_weight
self.ccp_alpha = ccp_alpha
def get_depth(self):
"""Return the depth of the decision tree.
The depth of a tree is the maximum distance between the root
and any leaf.
Returns
-------
self.tree_.max_depth : int
The maximum depth of the tree.
"""
check_is_fitted(self)
return self.tree_.max_depth
def get_n_leaves(self):
"""Return the number of leaves of the decision tree.
Returns
-------
self.tree_.n_leaves : int
Number of leaves.
"""
check_is_fitted(self)
return self.tree_.n_leaves
def fit(self, X, y, sample_weight=None, check_input=True):
random_state = check_random_state(self.random_state)
check_scalar(
self.ccp_alpha,
name="ccp_alpha",
target_type=numbers.Real,
min_val=0.0,
)
if check_input:
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be
# csr.
check_X_params = dict(dtype=DTYPE, accept_sparse="csc")
check_y_params = dict(ensure_2d=False, dtype=None)
X, y = self._validate_data(
X, y, validate_separately=(check_X_params, check_y_params)
)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError(
"No support for np.int64 index based sparse matrices"
)
if self.criterion == "poisson":
if np.any(y < 0):
raise ValueError(
"Some value(s) of y are negative which is"
" not allowed for Poisson regression."
)
if np.sum(y) <= 0:
raise ValueError(
"Sum of y is not positive which is "
"necessary for Poisson regression."
)
# Determine output settings
n_samples, self.n_features_in_ = X.shape
is_classification = is_classifier(self)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original
)
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
if self.max_depth is not None:
check_scalar(
self.max_depth,
name="max_depth",
target_type=numbers.Integral,
min_val=1,
)
max_depth = np.iinfo(np.int32).max if self.max_depth is None else self.max_depth
if isinstance(self.min_samples_leaf, numbers.Integral):
check_scalar(
self.min_samples_leaf,
name="min_samples_leaf",
target_type=numbers.Integral,
min_val=1,
)
min_samples_leaf = self.min_samples_leaf
else: # float
check_scalar(
self.min_samples_leaf,
name="min_samples_leaf",
target_type=numbers.Real,
min_val=0.0,
include_boundaries="neither",
)
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, numbers.Integral):
check_scalar(
self.min_samples_split,
name="min_samples_split",
target_type=numbers.Integral,
min_val=2,
)
min_samples_split = self.min_samples_split
else: # float
check_scalar(
self.min_samples_split,
name="min_samples_split",
target_type=numbers.Real,
min_val=0.0,
max_val=1.0,
include_boundaries="right",
)
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
check_scalar(
self.min_weight_fraction_leaf,
name="min_weight_fraction_leaf",
target_type=numbers.Real,
min_val=0.0,
max_val=0.5,
)
if isinstance(self.max_features, str):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else:
max_features = self.n_features_in_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_in_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_in_)))
else:
raise ValueError(
"Invalid value for max_features. "
"Allowed string values are 'auto', "
"'sqrt' or 'log2'."
)
elif self.max_features is None:
max_features = self.n_features_in_
elif isinstance(self.max_features, numbers.Integral):
check_scalar(
self.max_features,
name="max_features",
target_type=numbers.Integral,
min_val=1,
include_boundaries="left",
)
max_features = self.max_features
else: # float
check_scalar(
self.max_features,
name="max_features",
target_type=numbers.Real,
min_val=0.0,
max_val=1.0,
include_boundaries="right",
)
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_in_))
else:
max_features = 0
self.max_features_ = max_features
if self.max_leaf_nodes is not None:
check_scalar(
self.max_leaf_nodes,
name="max_leaf_nodes",
target_type=numbers.Integral,
min_val=2,
)
max_leaf_nodes = -1 if self.max_leaf_nodes is None else self.max_leaf_nodes
check_scalar(
self.min_impurity_decrease,
name="min_impurity_decrease",
target_type=numbers.Real,
min_val=0.0,
)
if len(y) != n_samples:
raise ValueError(
"Number of labels=%d does not match number of samples=%d"
% (len(y), n_samples)
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = self.min_weight_fraction_leaf * n_samples
else:
min_weight_leaf = self.min_weight_fraction_leaf * np.sum(sample_weight)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](
self.n_outputs_, self.n_classes_
)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples)
# TODO: Remove in v1.2
if self.criterion == "mse":
warnings.warn(
"Criterion 'mse' was deprecated in v1.0 and will be "
"removed in version 1.2. Use `criterion='squared_error'` "
"which is equivalent.",
FutureWarning,
)
elif self.criterion == "mae":
warnings.warn(
"Criterion 'mae' was deprecated in v1.0 and will be "
"removed in version 1.2. Use `criterion='absolute_error'` "
"which is equivalent.",
FutureWarning,
)
else:
# Make a deepcopy in case the criterion has mutable attributes that
# might be shared and modified concurrently during parallel fitting
criterion = copy.deepcopy(criterion)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](
criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
)
if is_classifier(self):
self.tree_ = Tree(self.n_features_in_, self.n_classes_, self.n_outputs_)
else:
self.tree_ = Tree(
self.n_features_in_,
# TODO: tree shouldn't need this in this case
np.array([1] * self.n_outputs_, dtype=np.intp),
self.n_outputs_,
)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(
splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
self.min_impurity_decrease,
)
else:
builder = BestFirstTreeBuilder(
splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_decrease,
)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1 and is_classifier(self):
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self._prune_tree()
return self
def _validate_X_predict(self, X, check_input):
"""Validate the training data on predict (probabilities)."""
if check_input:
X = self._validate_data(X, dtype=DTYPE, accept_sparse="csr", reset=False)
if issparse(X) and (
X.indices.dtype != np.intc or X.indptr.dtype != np.intc
):
raise ValueError("No support for np.int64 index based sparse matrices")
else:
# The number of features is checked regardless of `check_input`
self._check_n_features(X, reset=False)
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if is_classifier(self):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
class_type = self.classes_[0].dtype
predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1), axis=0
)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""Return the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array-like of shape (n_samples,)
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree.
.. versionadded:: 0.18
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator CSR matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
def _prune_tree(self):
"""Prune tree using Minimal Cost-Complexity Pruning."""
check_is_fitted(self)
if self.ccp_alpha == 0.0:
return
# build pruned tree
if is_classifier(self):
n_classes = np.atleast_1d(self.n_classes_)
pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_)
else:
pruned_tree = Tree(
self.n_features_in_,
# TODO: the tree shouldn't need this param
np.array([1] * self.n_outputs_, dtype=np.intp),
self.n_outputs_,
)
_build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha)
self.tree_ = pruned_tree
def cost_complexity_pruning_path(self, X, y, sample_weight=None):
"""Compute the pruning path during Minimal Cost-Complexity Pruning.
See :ref:`minimal_cost_complexity_pruning` for details on the pruning
process.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
Returns
-------
ccp_path : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
ccp_alphas : ndarray
Effective alphas of subtree during pruning.
impurities : ndarray
Sum of the impurities of the subtree leaves for the
corresponding alpha value in ``ccp_alphas``.
"""
est = clone(self).set_params(ccp_alpha=0.0)
est.fit(X, y, sample_weight=sample_weight)
return Bunch(**ccp_pruning_path(est.tree_))
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
Normalized total reduction of criteria by feature
(Gini importance).
"""
check_is_fitted(self)
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"gini", "entropy"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"auto", "sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
class_weight : dict, list of dict or "balanced", default=None
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_classes_ : int or list of int
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
`n_features_` is deprecated in 1.0 and will be removed in
1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeRegressor : A decision tree regressor.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The :meth:`predict` method operates using the :func:`numpy.argmax`
function on the outputs of :meth:`predict_proba`. This means that in
case the highest predicted probabilities are tied, the classifier will
predict the tied class with the lowest index in :term:`classes_`.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(
self,
*,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
class_weight=None,
ccp_alpha=0.0,
):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
ccp_alpha=ccp_alpha,
)
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
"""
super().fit(
X,
y,
sample_weight=sample_weight,
check_input=check_input,
)
return self
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, : self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, : self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
@deprecated( # type: ignore
"The attribute `n_features_` is deprecated in 1.0 and will be removed "
"in 1.2. Use `n_features_in_` instead."
)
@property
def n_features_(self):
return self.n_features_in_
def _more_tags(self):
return {"multilabel": True}
class DecisionTreeRegressor(RegressorMixin, BaseDecisionTree):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"squared_error", "friedman_mse", "absolute_error", \
"poisson"}, default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion and minimizes the L2
loss using the mean of each terminal node, "friedman_mse", which uses
mean squared error with Friedman's improvement score for potential
splits, "absolute_error" for the mean absolute error, which minimizes
the L1 loss using the median of each terminal node, and "poisson" which
uses reduction in Poisson deviance to find splits.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
.. versionadded:: 0.24
Poisson deviance criterion.
.. deprecated:: 1.0
Criterion "mse" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="squared_error"` which is equivalent.
.. deprecated:: 1.0
Criterion "mae" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="absolute_error"` which is equivalent.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"auto", "sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
feature_importances_ : ndarray of shape (n_features,)
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
`n_features_` is deprecated in 1.0 and will be removed in
1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeClassifier : A decision tree classifier.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, X, y, cv=10)
... # doctest: +SKIP
...
array([-0.39..., -0.46..., 0.02..., 0.06..., -0.50...,
0.16..., 0.11..., -0.73..., -0.30..., -0.00...])
"""
def __init__(
self,
*,
criterion="squared_error",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
ccp_alpha=0.0,
):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
ccp_alpha=ccp_alpha,
)
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
"""
super().fit(
X,
y,
sample_weight=sample_weight,
check_input=check_input,
)
return self
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order="C")
averaged_predictions = np.zeros(
shape=grid.shape[0], dtype=np.float64, order="C"
)
self.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions
)
return averaged_predictions
@deprecated( # type: ignore
"The attribute `n_features_` is deprecated in 1.0 and will be removed "
"in 1.2. Use `n_features_in_` instead."
)
@property
def n_features_(self):
return self.n_features_in_
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"gini", "entropy"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : {"random", "best"}, default="random"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, {"auto", "sqrt", "log2"} or None, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Used to pick randomly the `max_features` used at each split.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
class_weight : dict, list of dict or "balanced", default=None
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
max_features_ : int
The inferred value of max_features.
n_classes_ : int or list of int
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
`n_features_` is deprecated in 1.0 and will be removed in
1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
ExtraTreeRegressor : An extremely randomized tree regressor.
sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.
sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.
sklearn.ensemble.RandomForestClassifier : A random forest classifier.
sklearn.ensemble.RandomForestRegressor : A random forest regressor.
sklearn.ensemble.RandomTreesEmbedding : An ensemble of
totally random trees.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.ensemble import BaggingClassifier
>>> from sklearn.tree import ExtraTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> extra_tree = ExtraTreeClassifier(random_state=0)
>>> cls = BaggingClassifier(extra_tree, random_state=0).fit(
... X_train, y_train)
>>> cls.score(X_test, y_test)
0.8947...
"""
def __init__(
self,
*,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
class_weight=None,
ccp_alpha=0.0,
):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_decrease=min_impurity_decrease,
random_state=random_state,
ccp_alpha=ccp_alpha,
)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"squared_error", "friedman_mse"}, default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion and "mae" for the
mean absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
.. versionadded:: 0.24
Poisson deviance criterion.
.. deprecated:: 1.0
Criterion "mse" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="squared_error"` which is equivalent.
.. deprecated:: 1.0
Criterion "mae" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="absolute_error"` which is equivalent.
splitter : {"random", "best"}, default="random"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, {"auto", "sqrt", "log2"} or None, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Used to pick randomly the `max_features` used at each split.
See :term:`Glossary <random_state>` for details.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
max_features_ : int
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
`n_features_` is deprecated in 1.0 and will be removed in
1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
feature_importances_ : ndarray of shape (n_features,)
Return impurity-based feature importances (the higher, the more
important the feature).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
ExtraTreeClassifier : An extremely randomized tree classifier.
sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.
sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.ensemble import BaggingRegressor
>>> from sklearn.tree import ExtraTreeRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> extra_tree = ExtraTreeRegressor(random_state=0)
>>> reg = BaggingRegressor(extra_tree, random_state=0).fit(
... X_train, y_train)
>>> reg.score(X_test, y_test)
0.33...
"""
def __init__(
self,
*,
criterion="squared_error",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="auto",
random_state=None,
min_impurity_decrease=0.0,
max_leaf_nodes=None,
ccp_alpha=0.0,
):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
random_state=random_state,
ccp_alpha=ccp_alpha,
)
| manhhomienbienthuy/scikit-learn | sklearn/tree/_classes.py | Python | bsd-3-clause | 70,831 | [
"Brian"
] | 2fda80a9fe831b44eb9d370abc18612197854841e95d40a6a683beeef5167016 |
__author__ = 'Victoria'
from src.common.Observable import *
from src.game.AttackSpell import *
from random import *
class HeroState(object):
def __init__(self, name):
self._name = name
super(HeroState, self).__init__()
def north(self): pass
def south(self): pass
def attack(self): pass
class RoamState(HeroState):
def north(self):
self.currentState = RoamState
print ("In the roam state going north")
def south(self):
self.currentState = RoamState
print ("In the roam state going south")
class AttackState(HeroState):
def attack(self):
self.currentState = AttackState
print ("Attack state")
class Hero(Observable,Observer, AttackSpell):
def __init__(self, name):
super(Hero, self).__init__()
self._name = name
self.ready = None
self._health = 10
self._healthMax = 10
self.roamState = RoamState(self)
self.attackState = AttackState(self)
self.currentState = None
self.setRoam()
self.notifyObservers()
def __str__(self):
return self.__class__.__name__
def northB(self):
self.ready = True
self.currentState.north()
def south(self):
self.ready = True
self.currentState.south()
def roam(self):
self.ready = True
if self.currentState:
d = input( "What direction would you like to go in north or south? \n")
if d == "north":
self.currentState.north()
if d == 'south':
self.currentState.south()
if randint(0,1):
print ("%s comes across a monster!" % self._name)
self.monster = Monster(self)
self.monster.display()
self.monster.attack()
self.attacked()
def attacked(self):
self.ready = True
self.currentState.attack()
while self.currentState is not self.roamState:
# while self.currentState != self.roamState:
print ("%s is being silly, there is nothing to attack!" % self._name)
pass
else:
self.ready = True
if self.currentState:
print ("How would you like to attack the %s?" % Monster)
text = input("spell or weapon? \n")
if text == 'spell':
useSpell = input("Would you like to use fireball, thunderbolt, or shield \n")
if useSpell == 'fireball':
print ("%s attacked the monster with a %s" % (self._name, FireBall()))
monst.doDamage()
if monst._health >= 0:
self.attacked()
def setRoam(self):
self.currentState = self.roamState
def setAttack(self):
self.currentState = self.attackState
def northward(self):
self.currentState.north()
def south(self):
self.currentState.south()
def attacking(self):
self.currentState.attack()
def act(self):
if self.ready:
self.ready = False
self.northward()
if self.ready:
self.attacking()
self.ready = False
def accept(self, visitor):
visitor.visit(self)
def visit(self, room):
room.occupy(self)
print ("Would you like to open the chest?")
text = input("yes or no \n")
if text == 'yes':
room.chest(self)
if text == 'no':
pass
room.attack(self)
room.chest(self)
room.getItem(self)
def healthStatus(self):
print ("%s's health: %d/%d" % (self._name, self._health, self._healthMax))
def update(self, Observable):
self.northward()
self.south()
self.attacking()
class Room(object):
def accept(self, visitor):
visitor.visit(self)
def occupy(self, occupier):
print("A room is being visited by", occupier)
def chest(self, opener):
print(opener,"has opened the chest.")
def getItem(self, item):
print("You have received an %s, from the chest" % ThunderBolt)
def north(self, north):
print(self, "is going north in the room", north)
def attack(self, occupy):
print(Hero,"attacked by", Monster)
def __str__(self):
return self.__class__.__name__
class Room1(Room):pass
class Room2(Room): pass
class Visitor:
def __str__(self):
return self.__class__.__name__
class Monster(Observable, Observer):
def __init__(self, name):
super(Monster, self).__init__()
self._name = name
self.ready = None
self._health = 1
self._healthMax = 10
self.roamState = RoamState(self)
self.attackState = AttackState(self)
self.currentState = None
self.setRoam()
def accept(self, visitor):
visitor.visit(self)
def visit(self, room):
room.monster(self)
room.occupy(self)
def roam(self):
self.ready = True
# self.currentState.north()
print ("%s am a monster roaming" % self._name)
self.notifyObservers()
def attack(self):
self.ready = True
self.currentState.attack()
print ("%s am a monster attacking youuu!" % self._name)
#self.doDamage()
def doDamage(self):
self.damage = min(
max(randint(0, 2) - randint(0, 2), 0),
self._health)
self._health -= self.damage
if self.damage == 0:
print ("monster avoids heros's attack.")
else:
print ("hero injures monster!")
self._health -= 1
if self._health <= 0:
print ("Monster died!")
def act(self):
if self.ready:
self.roam()
self.ready = False
def healthStatus(self):
print ("%s's health: %d/%d" % (self._name, self._health, self._healthMax))
def update(self, Observable):
self.roam()
def setRoam(self):
self.currentState = self.roamState
def northward(self):
self.currentState.north()
def setAttack(self):
self.currentState = self.attackState
def attacking(self):
self.currentState.attack()
def display(self):
print ("%s am a monster about to attack you!" % self._name)
if __name__ == '__main__':
hero = Hero('hero')
room = Room1()
monst = Monster('monster')
# state = hero.roamState.attack()
# state = hero.attackState.attack()
# state = hero.roamState.north()
hero.addObserver(monst)
monst.addObserver(hero)
# hero.south()
hero.roam()
# hero.attacked()
# hero.attack()
# monst.roam()
# room.accept(hero)
# monst.attack()
# monst.doDamage()
# hero.healthStatus()
# monst.healthStatus()
# room.attack(monst)
| victorianorton/SimpleRPGGame | src/game/SimpleGame.py | Python | mit | 6,900 | [
"VisIt"
] | 379302933ca97feb3737c645aaa7d8b61656b4750236d001982b93a1522f5696 |
import unittest
import numpy as np
import pysal
#import pysal.spreg as EC
from scipy import sparse
from pysal.contrib.handler import Model
from functools import partial
OLS = partial(Model, mtype='OLS')
PEGP = pysal.examples.get_path
class TestOLS(unittest.TestCase):
def setUp(self):
db = pysal.open(PEGP('columbus.dbf'),'r')
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.weights.rook_from_shapefile(PEGP("columbus.shp"))
def test_OLS(self):
self.X = sparse.csr_matrix(self.X)
ols = OLS(self.y, self.X, w=self.w, spat_diag=True, moran=True, \
name_y='home value', name_x=['income','crime'], \
name_ds='columbus', nonspat_diag=True, white_test=True)
np.testing.assert_array_almost_equal(ols.aic, \
408.73548964604873 ,7)
np.testing.assert_array_almost_equal(ols.ar2, \
0.32123239427957662 ,7)
np.testing.assert_array_almost_equal(ols.betas, \
np.array([[ 46.42818268], [ 0.62898397], \
[ -0.48488854]]), 7)
bp = np.array([2, 5.7667905131212587, 0.05594449410070558])
ols_bp = np.array([ols.breusch_pagan['df'], ols.breusch_pagan['bp'], ols.breusch_pagan['pvalue']])
np.testing.assert_array_almost_equal(bp, ols_bp, 7)
np.testing.assert_array_almost_equal(ols.f_stat, \
(12.358198885356581, 5.0636903313953024e-05), 7)
jb = np.array([2, 39.706155069114878, 2.387360356860208e-09])
ols_jb = np.array([ols.jarque_bera['df'], ols.jarque_bera['jb'], ols.jarque_bera['pvalue']])
np.testing.assert_array_almost_equal(ols_jb,jb, 7)
white = np.array([5, 2.90606708, 0.71446484])
ols_white = np.array([ols.white['df'], ols.white['wh'], ols.white['pvalue']])
np.testing.assert_array_almost_equal(ols_white,white, 7)
np.testing.assert_equal(ols.k, 3)
kb = {'df': 2, 'kb': 2.2700383871478675, 'pvalue': 0.32141595215434604}
for key in kb:
self.assertAlmostEqual(ols.koenker_bassett[key], kb[key], 7)
np.testing.assert_array_almost_equal(ols.lm_error, \
(4.1508117035117893, 0.041614570655392716),7)
np.testing.assert_array_almost_equal(ols.lm_lag, \
(0.98279980617162233, 0.32150855529063727), 7)
np.testing.assert_array_almost_equal(ols.lm_sarma, \
(4.3222725729143736, 0.11519415308749938), 7)
np.testing.assert_array_almost_equal(ols.logll, \
-201.3677448230244 ,7)
np.testing.assert_array_almost_equal(ols.mean_y, \
38.436224469387746,7)
np.testing.assert_array_almost_equal(ols.moran_res[0], \
0.20373540938,7)
np.testing.assert_array_almost_equal(ols.moran_res[1], \
2.59180452208,7)
np.testing.assert_array_almost_equal(ols.moran_res[2], \
0.00954740031251,7)
np.testing.assert_array_almost_equal(ols.mulColli, \
12.537554873824675 ,7)
np.testing.assert_equal(ols.n, 49)
np.testing.assert_equal(ols.name_ds, 'columbus')
np.testing.assert_equal(ols.name_gwk, None)
np.testing.assert_equal(ols.name_w, 'unknown')
np.testing.assert_equal(ols.name_x, ['CONSTANT', 'income', 'crime'])
np.testing.assert_equal(ols.name_y, 'home value')
np.testing.assert_array_almost_equal(ols.predy[3], np.array([
33.53969014]),7)
np.testing.assert_array_almost_equal(ols.r2, \
0.34951437785126105 ,7)
np.testing.assert_array_almost_equal(ols.rlm_error, \
(3.3394727667427513, 0.067636278225568919),7)
np.testing.assert_array_almost_equal(ols.rlm_lag, \
(0.17146086940258459, 0.67881673703455414), 7)
np.testing.assert_equal(ols.robust, 'unadjusted')
np.testing.assert_array_almost_equal(ols.schwarz, \
414.41095054038061,7 )
np.testing.assert_array_almost_equal(ols.sig2, \
231.4568494392652,7 )
np.testing.assert_array_almost_equal(ols.sig2ML, \
217.28602192257551,7 )
np.testing.assert_array_almost_equal(ols.sig2n, \
217.28602192257551, 7)
np.testing.assert_array_almost_equal(ols.t_stat[2][0], \
-2.65440864272,7)
np.testing.assert_array_almost_equal(ols.t_stat[2][1], \
0.0108745049098,7)
if __name__ == '__main__':
unittest.main()
| TaylorOshan/pysal | pysal/contrib/handler/tests/test_ols_sparse.py | Python | bsd-3-clause | 4,665 | [
"COLUMBUS"
] | 59398de822e1743ff5d60df9141818317850823e328d3e8c531e9f7c7af62340 |
# -*- coding: utf-8 -*-
# 145. Binary Tree postorder Traversal
#
# Given a binary tree, return the postorder traversal of its nodes' values.
#
# For example:
# Given binary tree {1,#,2,3},
# 1
# \
# 2
# /
# 3
# return [1,2,3].
#
# Note: Recursive solution is trivial, could you do it iteratively?
#
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
# recursive
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
self.postorder(root, res)
return res
def postorder(self, root, values):
if root:
self.postorder(root.left, values)
self.postorder(root.right, values)
values.append(root.val)
# iterative
# https://shenjie1993.gitbooks.io/leetcode-python/145%20Binary%20Tree%20Postorder%20Traversal.html
# 二叉树进行后序遍历时, 先后序遍历左子树,再后序遍历右子树,最后访问该节点。
# 也就是说第一次遍历到一个节点的时候,我们不将其加入到结果中,
# 只有当它的左右子树都遍历完后,我们将该节点加入到结果中。
# 跟先序遍历中一样,我们也通过栈来解决,把接下去要访问的节点压入栈中。
# 由于现在每个节点都要遍历两次,我们给节点添加一个标志位,
# 如果一个节点还没有访问过,我们给的标志为visit,表示下一次遇到它只是第一次访问它,
# 在访问它之后,我们把它的标志改为get并再次压栈,表示下一次遇到它要访问它的值。
# 同时还要将它的右子树和左子树分别压栈,表示要后续遍历左子树和右子树。
# 对于第二次访问的节点,将其加入结果中。
def postorderTraversal2(self, root):
result = [] # List[int] stores tree node values
stack = [(root, 'visit')] # List[TreeNode]
while stack:
node, label = stack.pop()
if label == 'visit':
stack.append((node, 'get'))
if node.right:
stack.append((node.right, 'visit'))
if node.left:
stack.append((node.left, 'visit'))
else:
result.append(node.val)
return result
if __name__ == '__main__':
root = TreeNode(5)
root.left = TreeNode(4)
root.right = TreeNode(8)
root.left.left = TreeNode(11)
root.left.left.right = TreeNode(2)
print Solution().postorderTraversal(root)
print Solution().postorderTraversal2(root)
| gengwg/leetcode | 145_binary_tree_postorder_traversal.py | Python | apache-2.0 | 2,759 | [
"VisIt"
] | 335c899e3f0f6b8893aeb11364d41e8e26a7e93ff3b71cfa552232ea2e5e959b |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
import json
from io import open
from pymatgen.electronic_structure.bandstructure import Kpoint
from pymatgen import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class KpointTest(unittest.TestCase):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.kpoint = Kpoint([0.1, 0.4, -0.5], self.lattice, label="X")
def test_properties(self):
self.assertEqual(self.kpoint.frac_coords[0], 0.1)
self.assertEqual(self.kpoint.frac_coords[1], 0.4)
self.assertEqual(self.kpoint.frac_coords[2], -0.5)
self.assertEqual(self.kpoint.a, 0.1)
self.assertEqual(self.kpoint.b, 0.4)
self.assertEqual(self.kpoint.c, -0.5)
self.assertEqual(self.lattice, Lattice.cubic(10.0))
self.assertEqual(self.kpoint.cart_coords[0], 1.0)
self.assertEqual(self.kpoint.cart_coords[1], 4.0)
self.assertEqual(self.kpoint.cart_coords[2], -5.0)
self.assertEqual(self.kpoint.label, "X")
class BandStructureSymmLine_test(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "Cu2O_361_bandstructure.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
self.bs = BandStructureSymmLine.from_dict(d)
self.assertListEqual(self.bs._projections[Spin.up][10][12][Orbital.s], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "wrong projections")
self.assertListEqual(self.bs._projections[Spin.up][25][0][Orbital.dyz], [0.0, 0.0, 0.0011, 0.0219, 0.0219, 0.069], "wrong projections")
self.assertAlmostEqual(self.bs.get_projection_on_elements()[Spin.up][25][10]['O'], 0.0328)
self.assertAlmostEqual(self.bs.get_projection_on_elements()[Spin.up][22][25]['Cu'], 0.8327)
self.assertAlmostEqual(self.bs.get_projections_on_elts_and_orbitals({'Cu':['s','d']})[Spin.up][25][0]['Cu']['s'], 0.0027)
self.assertAlmostEqual(self.bs.get_projections_on_elts_and_orbitals({'Cu':['s','d']})[Spin.up][25][0]['Cu']['d'], 0.8495999999999999)
with open(os.path.join(test_dir, "CaO_2605_bandstructure.json"), "r",
encoding='utf-8') as f:
d = json.load(f)
#print d.keys()
self.bs = BandStructureSymmLine.from_dict(d)
#print self.bs.as_dict().keys()
#this doesn't really test as_dict() -> from_dict very well
#self.assertEqual(self.bs.as_dict().keys(), d.keys())
self.one_kpoint = self.bs.kpoints[31]
self.assertEqual(self.bs._nb_bands, 16)
self.assertAlmostEqual(self.bs._bands[Spin.up][5][10], 0.5608)
self.assertAlmostEqual(self.bs._bands[Spin.up][5][10], 0.5608)
self.assertEqual(self.bs._branches[5]['name'], "L-U")
self.assertEqual(self.bs._branches[5]['start_index'], 80)
self.assertEqual(self.bs._branches[5]['end_index'], 95)
self.assertAlmostEqual(self.bs._distance[70], 4.2335127528765737)
with open(os.path.join(test_dir, "NiO_19009_bandstructure.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
self.bs_spin = BandStructureSymmLine.from_dict(d)
#this doesn't really test as_dict() -> from_dict very well
#self.assertEqual(self.bs_spin.as_dict().keys(), d.keys())
self.assertEqual(self.bs_spin._nb_bands, 27)
self.assertAlmostEqual(self.bs_spin._bands[Spin.up][5][10], 0.262)
self.assertAlmostEqual(self.bs_spin._bands[Spin.down][5][10],
1.6156)
def test_properties(self):
self.assertEqual(self.one_kpoint.frac_coords[0], 0.5)
self.assertEqual(self.one_kpoint.frac_coords[1], 0.25)
self.assertEqual(self.one_kpoint.frac_coords[2], 0.75)
self.assertAlmostEqual(self.one_kpoint.cart_coords[0], 0.64918757)
self.assertAlmostEqual(self.one_kpoint.cart_coords[1], 1.29837513)
self.assertAlmostEqual(self.one_kpoint.cart_coords[2], 0.0)
self.assertEqual(self.one_kpoint.label, "W")
self.assertAlmostEqual(self.bs.efermi, 2.6211967, "wrong fermi energy")
def test_get_branch(self):
self.assertAlmostEqual(self.bs.get_branch(110)[0]['name'], "U-W")
def test_is_metal(self):
self.assertFalse(self.bs.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs.get_cbm()
self.assertAlmostEqual(cbm['energy'], 5.8709, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 8, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 15, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "X", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 8.0458, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 12, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 0, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs.get_vbm()
self.assertAlmostEqual(vbm['energy'], 2.2361, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 3, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 5, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 0, "wrong VBM kpoint index")
self.assertEqual(vbm['kpoint'].frac_coords[0], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[1], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, "\Gamma", "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 5.731, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 2, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 0, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 10, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 79, "wrong VBM kpoint index")
self.assertEqual(vbm_spin['kpoint'].frac_coords[0], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[1], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[2], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, "L", "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs.get_band_gap()
self.assertAlmostEqual(bg['energy'], 3.6348, "wrong gap energy")
self.assertEqual(bg['transition'], "\\Gamma-X", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 2.3148, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "L-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
if __name__ == '__main__':
unittest.main()
| migueldiascosta/pymatgen | pymatgen/electronic_structure/tests/test_bandstructure.py | Python | mit | 8,456 | [
"pymatgen"
] | da4b07b57b3835560d36dce513ffdc2371cc858a3089a97385756b0799fc08ab |
class node(object):
"""
This class represents a node in the AST built while parsing command lines.
It's basically an object container for various attributes, with a slightly
specialised representation to make it a little easier to debug the parser.
"""
def __init__(self, **kwargs):
assert 'kind' in kwargs
self.__dict__.update(kwargs)
def dump(self, indent=' '):
return _dump(self, indent)
def __repr__(self):
chunks = []
d = dict(self.__dict__)
kind = d.pop('kind')
for k, v in sorted(d.items()):
chunks.append('%s=%r' % (k, v))
return '%sNode(%s)' % (kind.title(), ' '.join(chunks))
def __eq__(self, other):
if not isinstance(other, node):
return False
return self.__dict__ == other.__dict__
def __hash__(self):
return hash(tuple(sorted(self.__dict__)))
class nodevisitor(object):
def _visitnode(self, n, *args, **kwargs):
k = n.kind
self.visitnode(n)
return getattr(self, 'visit%s' % k)(n, *args, **kwargs)
def visit(self, n):
k = n.kind
if k == 'operator':
self._visitnode(n, n.op)
elif k == 'list':
dochild = self._visitnode(n, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'reservedword':
self._visitnode(n, n.word)
elif k == 'pipe':
self._visitnode(n, n.pipe)
elif k == 'pipeline':
dochild = self._visitnode(n, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'compound':
dochild = self._visitnode(n, n.list, n.redirects)
if dochild is None or dochild:
for child in n.list:
self.visit(child)
for child in n.redirects:
self.visit(child)
elif k in ('if', 'for', 'while', 'until'):
dochild = self._visitnode(n, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'command':
dochild = self._visitnode(n, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'function':
dochild = self._visitnode(n, n.name, n.body, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'redirect':
dochild = self._visitnode(n, n.input, n.type, n.output, n.heredoc)
if dochild is None or dochild:
if isinstance(n.output, node):
self.visit(n.output)
if n.heredoc:
self.visit(n.heredoc)
elif k in ('word', 'assignment'):
dochild = self._visitnode(n, n.word)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k in ('parameter', 'tilde', 'heredoc'):
self._visitnode(n, n.value)
elif k in ('commandsubstitution', 'processsubstitution'):
dochild = self._visitnode(n, n.command)
if dochild is None or dochild:
self.visit(n.command)
else:
raise ValueError('unknown node kind %r' % k)
self.visitnodeend(n)
def visitnode(self, n):
pass
def visitnodeend(self, n):
pass
def visitoperator(self, n, op):
pass
def visitlist(self, n, parts):
pass
def visitpipe(self, n, pipe):
pass
def visitpipeline(self, n, parts):
pass
def visitcompound(self, n, list, redirects):
pass
def visitif(self, node, parts):
pass
def visitfor(self, node, parts):
pass
def visitwhile(self, node, parts):
pass
def visituntil(self, node, parts):
pass
def visitcommand(self, n, parts):
pass
def visitfunction(self, n, name, body, parts):
pass
def visitword(self, n, word):
pass
def visitassignment(self, n, word):
pass
def visitreservedword(self, n, word):
pass
def visitparameter(self, n, value):
pass
def visittilde(self, n, value):
pass
def visitredirect(self, n, input, type, output, heredoc):
pass
def visitheredoc(self, n, value):
pass
def visitprocesssubstitution(self, n, command):
pass
def visitcommandsubstitution(self, n, command):
pass
def _dump(tree, indent=' '):
def _format(n, level=0):
if isinstance(n, node):
d = dict(n.__dict__)
kind = d.pop('kind')
if kind == 'list' and level > 0:
level = level + 1
fields = []
v = d.pop('s', None)
if v:
fields.append(('s', _format(v, level)))
for k, v in sorted(d.items()):
if not v or k == 'parts':
continue
llevel = level
if isinstance(v, node):
llevel += 1
fields.append((k, '\n' + (indent * llevel) + _format(v, llevel)))
else:
fields.append((k, _format(v, level)))
if kind == 'function':
fields = [f for f in fields if f[0] not in ('name', 'body')]
v = d.pop('parts', None)
if v:
fields.append(('parts', _format(v, level)))
return ''.join([
'%sNode' % kind.title(),
'(',
', '.join(('%s=%s' % field for field in fields)),
')'])
elif isinstance(n, list):
lines = ['[']
lines.extend((indent * (level + 1) + _format(x, level + 1) + ','
for x in n))
if len(lines) > 1:
lines.append(indent * (level) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(n)
if not isinstance(tree, node):
raise TypeError('expected node, got %r' % tree.__class__.__name__)
return _format(tree)
def findfirstkind(parts, kind):
for i, node in enumerate(parts):
if node.kind == kind:
return i
return -1
class posconverter(nodevisitor):
def __init__(self, string):
self.string = string
def visitnode(self, node):
assert hasattr(node, 'pos'), 'node %r is missing pos attr' % node
start, end = node.__dict__.pop('pos')
node.s = self.string[start:end]
class posshifter(nodevisitor):
def __init__(self, count):
self.count = count
def visitnode(self, node):
#assert node.pos[1] + base <= endlimit
node.pos = (node.pos[0] + self.count, node.pos[1] + self.count)
| idank/bashlex | bashlex/ast.py | Python | gpl-3.0 | 7,080 | [
"VisIt"
] | 24ae9d556f9d8afc6e1339366f7cf62745465ac0bfccccf96ef211f49ea16ecf |
import numpy as np
from mpl_toolkits.basemap import pyproj
from datetime import datetime
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
class BGrid_POP(object):
"""
BGrid object for POP
"""
def __init__(self, lon_t, lat_t, lon_u, lat_u, angle, h_t, h_u, z_t, z_w, name, xrange, yrange):
self.name = name
self.xrange = xrange
self.yrange = yrange
self.lon_t = lon_t[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lat_t = lat_t[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lon_u = lon_u[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lat_u = lat_u[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.angle = angle[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.h_t = h_t[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1] / 100.
self.h_u = h_u[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1] / 100.
self.z_t = np.tile(z_t,(self.h_t.shape[1],self.h_t.shape[0],1)).T / 100.
self.z_w = np.tile(z_w,(self.h_t.shape[1],self.h_t.shape[0],1)).T / 100.
self.lon_t_vert = lon_u[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1]
self.lat_t_vert = lat_u[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1]
self.lon_u_vert = lon_t[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2]
self.lat_u_vert = lat_t[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2]
# compute the mask at t point from h_t
self.mask_t = np.ones(self.z_t.shape)
for n in range(self.z_t.shape[0]):
depth = self.z_w[n,0,0]
rtol=1e-6
midx = np.where(np.abs(self.h_t - depth) <= rtol * np.abs(depth))
self.mask_t[n:,midx[0],midx[1]] = 0.
# compute the mask at u point from h_u
self.mask_u = np.ones(self.z_t.shape)
for n in range(self.z_t.shape[0]):
depth = self.z_w[n,0,0]
rtol=1e-6
midx = np.where(np.abs(self.h_u - depth) <= rtol * np.abs(depth))
self.mask_u[n:,midx[0],midx[1]] = 0.
| dcherian/pyroms | pyroms_toolbox/pyroms_toolbox/BGrid_POP/BGrid_POP.py | Python | bsd-3-clause | 2,079 | [
"NetCDF"
] | bbd932cfc67078858e8c42297cbb9ed3676cfb4cde36834a8e22732bb893e3b0 |
#!/usr/bin/env python
#
# A basic functional test of the total impact API
#
import urllib2
import urllib
import json
import time
import sys
import pickle
from pprint import pprint
from optparse import OptionParser
TEST_ITEMS = {
('doi', '10.1371/journal.pcbi.1000361') :
{
'aliases': ['doi', "title", "url"],
'biblio': [u'authors', u'journal', u'year', u'title'],
'metrics' : {
'wikipedia:mentions' : 1,
u'plosalm:crossref': 133,
'plosalm:html_views': 17455,
'plosalm:pdf_views': 2106,
u'plosalm:pmc_abstract': 19,
u'plosalm:pmc_figure': 71,
u'plosalm:pmc_full-text': 1092,
u'plosalm:pmc_pdf': 419,
u'plosalm:pmc_supp-data': 157,
u'plosalm:pmc_unique-ip': 963,
u'plosalm:pubmed_central': 102,
u'plosalm:scopus': 218
}
},
('url', 'http://total-impact.org/') : #note trailing slash
{
'aliases': ["url"],
'biblio': ['title'],
'metrics' : {
'delicious:bookmarks' : 65
}
},
('url', 'http://total-impact.org'): #no trailing slash
{
'aliases': ["url"],
'biblio': ['title'],
'metrics' : {
'topsy:tweets' : 282,
'topsy:influential_tweets' : 26
}
},
('doi', '10.5061/dryad.18') :
{
'aliases': ['doi', 'url', 'title'],
'biblio': [u'authors', u'year', u'repository', u'title'],
'metrics' : {
'dryad:most_downloaded_file' : 63,
'dryad:package_views' : 149,
'dryad:total_downloads' : 169
}
},
('github', 'egonw,cdk') :
{
'aliases': ['github', 'url', 'title'],
'biblio': [u'last_push_date', u'create_date', u'description', u'title', u'url', u'owner', 'h1'],
'metrics' : {
'github:forks' : 27,
'github:watchers' : 31
}
},
('url', 'http://nescent.org/'):
{
'aliases': ['url'],
'biblio': [u'title', "h1"],
'metrics' : {}
},
('url', 'http://www.slideshare.net/cavlec/manufacturing-serendipity-12176916') :
{
'aliases' : ['url', 'title'],
'biblio': [u'username', u'repository', u'created', u'h1', u'genre', u'title'],
'metrics' : {
'slideshare:downloads' : 4,
'slideshare:views' : 337,
'slideshare:favorites' : 2
}
}
}
class TotalImpactAPI:
base_url = 'http://localhost:5001/'
def request_item(self, alias):
""" Attempt to obtain an item from the server using the given
namespace and namespace id. For example,
namespace = 'pubmed', nid = '234234232'
Will request the item related to pubmed item 234234232
"""
(namespace, nid) = alias
url = self.base_url + urllib.quote('item/%s/%s' % (namespace, nid))
req = urllib2.Request(url)
data = {} # fake a POST
response = urllib2.urlopen(req, data)
tiid = json.loads(urllib.unquote(response.read()))
print "tiid %s for %s" %(tiid, alias)
return tiid
def request_item_result(self, item_id):
url = self.base_url + urllib.quote('item/%s' % (item_id))
req = urllib2.Request(url)
response = urllib2.urlopen(req)
return json.loads(response.read())
def checkItem(item, data, alias, items_for_use, options):
if options.debug:
print "Checking %s result (%s)..." % (alias, item)
success = True
for section in ["biblio", "aliases", "metrics"]:
result = checkItemSection(alias,
item,
section,
data[section],
items_for_use[alias],
options)
if not result:
success = False
return success
def checkItemSection(alias, id, section, api_response, gold_item, options):
success = True
if options.debug:
print "Checking %s result (%s)..." % (alias, id)
# Check aliases are correct
if section=="aliases":
gold_aliases = gold_item['aliases']
alias_result = set(api_response.keys())
expected_result = set(gold_aliases + [u'last_modified', u'created'])
if (alias_result == expected_result):
if options.debug:
print "ALIASES CORRECT! %s" %(alias_result)
else:
if options.debug:
print "ALIASES **NOT** CORRECT, for %s, %s, have %s, want %s" %(alias, id, alias_result, expected_result)
success = False
# Check biblio are correct
elif section=="biblio":
gold_biblio = gold_item['biblio']
if api_response:
biblio_result = set(api_response.keys())
else:
biblio_result = set([])
expected_result = set(gold_biblio + ['genre'])
if (biblio_result == expected_result):
if options.debug:
print "BIBLIO CORRECT! %s" %(biblio_result)
else:
if options.debug:
print "BIBLIO **NOT** CORRECT, have %s, want %s" %(biblio_result, expected_result)
success = False
# Check we've got some metric values
elif section=="metrics":
gold_metrics = gold_item['metrics']
for metric in gold_metrics.keys():
try:
metric_data = api_response[metric].values()[0]
except KeyError:
# didn't return anything. problem!
if options.debug:
print "METRICS **NOT** CORRECT for %s: metric missing" % (metric)
success = False
# expect the returned value to be equal or larger than reference
if success:
if metric_data >= gold_metrics:
if options.debug:
print "METRICS CORRECT! %s" %(metric_data)
else:
if options.debug:
print "METRICS **NOT** CORRECT for %s - %s, expected at least %s" % (metric, metric_data, gold_metrics)
return False
if options.debug:
print #blank line
return success
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--numrepeats", dest="numrepeats",
default=1, help="Number of repeated requests to make")
parser.add_option("-i", "--items", dest="numdiverseitems",
default=999,
help="Number of diverse items to use (up to max defined)")
parser.add_option("-m", "--missing", dest="missing",
default=False, action="store_true",
help="Display any outstanding items")
parser.add_option("-p", "--printdata", dest="printdata",
default=False, action="store_true", help="Display item data")
parser.add_option("-v", "--verbose", dest="debug",
default=False, action="store_true", help="Display verbose debug data")
(options, args) = parser.parse_args()
item_count = int(options.numrepeats)
num_diverse_items = min(len(TEST_ITEMS), int(options.numdiverseitems))
aliases = TEST_ITEMS.keys()[0:num_diverse_items]
items_for_use = dict((alias, TEST_ITEMS[alias]) for alias in aliases)
ti = TotalImpactAPI()
complete = {}
itemid = {}
for alias in aliases:
complete[alias] = {}
itemid[alias] = {}
for idx in range(item_count):
# Request the items to be generated
itemid[alias][idx] = ti.request_item(alias)
complete[alias][idx] = False
while True:
for idx in range(item_count):
for alias in aliases:
if not complete[alias][idx]:
if options.missing:
print alias, idx, itemid[alias][idx]
itemdata = ti.request_item_result(itemid[alias][idx])
complete[alias][idx] = checkItem(
itemid[alias][idx],
itemdata,
alias,
items_for_use,
options
)
if complete[alias][idx] and options.printdata:
pprint(itemdata)
total = sum([sum(complete[alias].values()) for alias in aliases])
print "%i of %i responses are complete" %(total, item_count * len(aliases))
if total == item_count * len(aliases):
sys.exit(0)
time.sleep(0.5)
| total-impact/total-impact-core | extras/functional_tests/functional_test.py | Python | mit | 8,918 | [
"CDK"
] | 4fd538a0e0307610a6776d870f82d75c7db7487ccd5e49c5c0f8f50375e67f09 |
"""
Scematic Diagram of PCA
-----------------------
Figure 7.2
A distribution of points drawn from a bivariate Gaussian and centered on the
origin of x and y. PCA defines a rotation such that the new axes (x' and y')
are aligned along the directions of maximal variance (the principal components)
with zero covariance. This is equivalent to minimizing the square of the
perpendicular distances between the points and the principal components.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set parameters and draw the random sample
np.random.seed(42)
r = 0.9
sigma1 = 0.25
sigma2 = 0.08
rotation = np.pi / 6
s = np.sin(rotation)
c = np.cos(rotation)
X = np.random.normal(0, [sigma1, sigma2], size=(100, 2)).T
R = np.array([[c, -s],
[s, c]])
X = np.dot(R, X)
#------------------------------------------------------------
# Plot the diagram
fig = plt.figure(figsize=(5, 5), facecolor='w')
ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False)
# draw axes
ax.annotate(r'$x$', (-r, 0), (r, 0),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
ax.annotate(r'$y$', (0, -r), (0, r),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
# draw rotated axes
ax.annotate(r'$x^\prime$', (-r * c, -r * s), (r * c, r * s),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
ax.annotate(r'$y^\prime$', (r * s, -r * c), (-r * s, r * c),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
# scatter points
ax.scatter(X[0], X[1], s=25, lw=0, c='k', zorder=2)
# draw lines
vnorm = np.array([s, -c])
for v in (X.T):
d = np.dot(v, vnorm)
v1 = v - d * vnorm
ax.plot([v[0], v1[0]], [v[1], v1[1]], '-k')
# draw ellipses
for sigma in (1, 2, 3):
ax.add_patch(Ellipse((0, 0), 2 * sigma * sigma1, 2 * sigma * sigma2,
rotation * 180. / np.pi,
ec='k', fc='gray', alpha=0.2, zorder=1))
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plt.show()
| nhuntwalker/astroML | book_figures/chapter7/fig_PCA_rotation.py | Python | bsd-2-clause | 3,000 | [
"Gaussian"
] | 00be0b21cebafa6d7b0a179efd5c90daca6e4f7f80c04aa20e1a7d664cff0d0c |
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import urllib
import copy
import itertools
import mask
import os
from collections import defaultdict
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, "annotation file format %s not supported"%(type(dataset))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns,cats,imgs = dict(),dict(),dict()
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception("datasetType not supported")
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print 'Loading and preparing results... '
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print("Converting ndarray to lists...")
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print("%d/%d" % (i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
| wenhuchen/ETHZ-Bootstrapped-Captioning | visual-concepts/coco/PythonAPI/pycocotools/coco.py | Python | bsd-3-clause | 16,953 | [
"VisIt"
] | 6cf0c67f60cb87372e2944a9d1199eff9b1bad66271c8c48ce520a7181decbe5 |
"""
Acceptance tests for Home Page (My Courses / My Libraries).
"""
from bok_choy.web_app_test import WebAppTest
from opaque_keys.edx.locator import LibraryLocator
from unittest import skip
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.index import DashboardPage
class CreateLibraryTest(WebAppTest):
"""
Test that we can create a new content library on the studio home page.
"""
def setUp(self):
"""
Load the helper for the home page (dashboard page)
"""
super(CreateLibraryTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_create_library(self):
"""
From the home page:
Click "New Library"
Fill out the form
Submit the form
We should be redirected to the edit view for the library
Return to the home page
The newly created library should now appear in the list of libraries
"""
name = "New Library Name"
org = "TestOrgX"
number = "TESTLIB"
self.auth_page.visit()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.has_library(name=name, org=org, number=number))
self.assertTrue(self.dashboard_page.has_new_library_button())
self.dashboard_page.click_new_library()
self.assertTrue(self.dashboard_page.is_new_library_form_visible())
self.dashboard_page.fill_new_library_form(name, org, number)
self.assertTrue(self.dashboard_page.is_new_library_form_valid())
self.dashboard_page.submit_new_library_form()
# The next page is the library edit view; make sure it loads:
lib_page = LibraryEditPage(self.browser, LibraryLocator(org, number))
lib_page.wait_for_page()
# Then go back to the home page and make sure the new library is listed there:
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.has_library(name=name, org=org, number=number))
| valtech-mooc/edx-platform | common/test/acceptance/tests/studio/test_studio_home.py | Python | agpl-3.0 | 2,146 | [
"VisIt"
] | 9428df4b4044f677849108f79971d3b68edc73fb8dd4ed48246c97fda728fe0c |
#!/usr/bin/env python
import unittest
import shutil
import os
import subprocess
import sys
from distutils.version import StrictVersion
def run(command):
""" run shell command & return unix exit code """
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
return(err, process.returncode)
class _01_CheckEnv(unittest.TestCase):
def setUp(self):
self.path_contents = []
for _ in os.environ['PATH'].strip(':').split(':'):
if os.path.isdir(_): self.path_contents += os.listdir(_)
self.python_contents = []
for _ in os.environ['PYTHONPATH'].strip(':').split(':'):
if os.path.isdir(_): self.python_contents += os.listdir(_)
def test_class(self):
self.assertTrue(
'run_midas.py' in self.path_contents,
msg="""\n\n'run_midas.py' not found in PATH environmental variable.\nMake sure '/path/to/MIDAS/scripts' has been added to your PATH:\nexport PATH=$PATH:/path/to/MIDAS/scripts"""
)
self.assertTrue(
'midas' in self.python_contents,
msg="""\n\n'midas' not found in PYTHONPATH environmental variable.\nMake sure '/path/to/MIDAS' has been added to your PYTHONPATH:\nexport PYTHONPATH=$PYTHONPATH:/path/to/MIDAS"""
)
self.assertTrue(
'MIDAS_DB' in os.environ,
msg="""\n\n'MIDAS_DB' environmental variable not set.\nSet this variable and rerun the test:\nexport MIDAS_DB=/path/to/midas_db_v1.1"""
)
class _02_ImportDependencies(unittest.TestCase):
def setUp(self):
self.failures = []
try: import numpy
except Exception: self.failures.append('numpy')
try: import pandas
except Exception: self.failures.append('pandas')
try: import pysam
except Exception: self.failures.append('pysam')
try: import midas
except Exception: self.failures.append('midas')
try: import Bio.SeqIO
except Exception: self.failures.append('Bio.SeqIO')
def test_class(self):
self.assertTrue(len(self.failures)==0,
msg="""\n\nThe following dependencies failed to import: %s.\nMake sure that dependencies have been properly installed""" % str(self.failures))
class _03_CheckVersions(unittest.TestCase):
def setUp(self):
self.modules = ['numpy', 'pandas', 'pysam', 'Bio.SeqIO']
self.installeds = [module.__version__ for module in map(__import__, self.modules)]
self.requireds = ['1.7.0', '0.17.1', '0.8.1', '1.6.2']
def test_class(self):
for module, installed, required in zip(self.modules, self.installeds, self.requireds):
if len(installed.split('.')) > 3:
installed = '.'.join(installed.split('.')[0:3])
self.assertTrue(
StrictVersion(installed) >= StrictVersion(required),
msg="""\n\nImported library '%s %s' is out of date. Required version is >= %s""" % (module, installed, required) )
class _04_HelpText(unittest.TestCase):
def test_class(self):
commands = [
'run_midas.py -h',
'run_midas.py species -h',
'run_midas.py genes -h',
'run_midas.py snps -h',
'merge_midas.py -h',
'merge_midas.py species -h',
'merge_midas.py genes -h',
'merge_midas.py snps -h']
for cmd in commands:
err, code = run(cmd)
self.assertTrue(code==0, msg=err)
class _05_RunSpecies(unittest.TestCase):
def test_class(self):
command = 'run_midas.py species ./sample -1 ./test.fq.gz -n 100'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _06_RunGenes(unittest.TestCase):
def test_class(self):
command = 'run_midas.py genes ./sample -1 ./test.fq.gz -n 100 --species_id Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _07_RunSNPs(unittest.TestCase):
def test_class(self):
command = 'run_midas.py snps ./sample -1 ./test.fq.gz -n 100 --species_id Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _08_MergeSpecies(unittest.TestCase):
def test_class(self):
command = 'merge_midas.py species ./species -i ./sample -t list'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _09_MergeGenes(unittest.TestCase):
def test_class(self):
command = 'merge_midas.py genes ./genes -i ./sample -t list --species_id Bacteroides_vulgatus_57955 --sample_depth 0.0'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _10_MergeSNPs(unittest.TestCase):
def test_class(self):
command = 'merge_midas.py snps ./snps -i ./sample -t list --species_id Bacteroides_vulgatus_57955 --all_samples --all_sites --max_sites 10000'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _11_SNPdiversity(unittest.TestCase):
def test_class(self):
command = 'snp_diversity.py snps/Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _12_CallConsensus(unittest.TestCase):
def test_class(self):
command = 'call_consensus.py snps/Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _13_CompareGeneContent(unittest.TestCase):
def test_class(self):
command = 'compare_genes.py genes/Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _14_QueryByCompound(unittest.TestCase):
def test_class(self):
command = 'query_by_compound.py -i sample -t list -c C00312'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _15_BuildDB(unittest.TestCase):
def test_class(self):
command = 'tar -zxvf genomes.tar.gz'
err, code = run(command)
command = 'build_midas_db.py genomes genomes.mapfile db --threads 10'
err, code = run(command)
self.assertTrue(code==0, msg=err)
if __name__ == '__main__':
try:
dir_name = os.path.dirname(os.path.abspath(__file__))
os.chdir(dir_name)
unittest.main(exit=False)
for dir in ['sample', 'species', 'genes', 'snps', 'genomes', 'db']:
shutil.rmtree(dir)
except:
print("")
for dir in ['sample', 'species', 'genes', 'snps', 'genomes', 'db']:
if os.path.exists(dir): shutil.rmtree(dir)
| snayfach/PhyloCNV | test/test_midas.py | Python | gpl-3.0 | 5,928 | [
"pysam"
] | d194e0c84d8a0067e58fa02fb687a6825705cc3142b89a9689b63b46933e7c58 |
# Copyright 2010 Dan Smith <dsmith@danplanet.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import os
import sys
import time
import logging
from chirp import chirp_common, errors, directory, util
from chirp.settings import RadioSetting, RadioSettingGroup, \
RadioSettingValueInteger, RadioSettingValueBoolean, \
RadioSettingValueString, RadioSettingValueList, RadioSettings
LOG = logging.getLogger(__name__)
NOCACHE = "CHIRP_NOCACHE" in os.environ
DUPLEX = {0: "", 1: "+", 2: "-"}
MODES = {0: "FM", 1: "AM"}
STEPS = list(chirp_common.TUNING_STEPS)
STEPS.append(100.0)
KENWOOD_TONES = list(chirp_common.TONES)
KENWOOD_TONES.remove(159.8)
KENWOOD_TONES.remove(165.5)
KENWOOD_TONES.remove(171.3)
KENWOOD_TONES.remove(177.3)
KENWOOD_TONES.remove(183.5)
KENWOOD_TONES.remove(189.9)
KENWOOD_TONES.remove(196.6)
KENWOOD_TONES.remove(199.5)
THF6_MODES = ["FM", "WFM", "AM", "LSB", "USB", "CW"]
LOCK = threading.Lock()
COMMAND_RESP_BUFSIZE = 8
LAST_BAUD = 9600
LAST_DELIMITER = ("\r", " ")
# The Kenwood TS-2000 uses ";" as a CAT command message delimiter, and all
# others use "\n". Also, TS-2000 doesn't space delimite the command fields,
# but others do.
def command(ser, cmd, *args):
"""Send @cmd to radio via @ser"""
global LOCK, LAST_DELIMITER, COMMAND_RESP_BUFSIZE
start = time.time()
LOCK.acquire()
if args:
cmd += LAST_DELIMITER[1] + LAST_DELIMITER[1].join(args)
cmd += LAST_DELIMITER[0]
LOG.debug("PC->RADIO: %s" % cmd.strip())
ser.write(cmd)
result = ""
while not result.endswith(LAST_DELIMITER[0]):
result += ser.read(COMMAND_RESP_BUFSIZE)
if (time.time() - start) > 0.5:
LOG.error("Timeout waiting for data")
break
if result.endswith(LAST_DELIMITER[0]):
LOG.debug("RADIO->PC: %s" % result.strip())
result = result[:-1]
else:
LOG.error("Giving up")
LOCK.release()
return result.strip()
def get_id(ser):
"""Get the ID of the radio attached to @ser"""
global LAST_BAUD
bauds = [9600, 19200, 38400, 57600]
bauds.remove(LAST_BAUD)
bauds.insert(0, LAST_BAUD)
global LAST_DELIMITER
command_delimiters = [("\r", " "), (";", "")]
for i in bauds:
for delimiter in command_delimiters:
LAST_DELIMITER = delimiter
LOG.info("Trying ID at baud %i with delimiter \"%s\"" %
(i, repr(delimiter)))
ser.baudrate = i
ser.write(LAST_DELIMITER[0])
ser.read(25)
resp = command(ser, "ID")
# most kenwood radios
if " " in resp:
LAST_BAUD = i
return resp.split(" ")[1]
# TS-2000
if "ID019" == resp:
LAST_BAUD = i
return "TS-2000"
raise errors.RadioError("No response from radio")
def get_tmode(tone, ctcss, dcs):
"""Get the tone mode based on the values of the tone, ctcss, dcs"""
if dcs and int(dcs) == 1:
return "DTCS"
elif int(ctcss):
return "TSQL"
elif int(tone):
return "Tone"
else:
return ""
def iserr(result):
"""Returns True if the @result from a radio is an error"""
return result in ["N", "?"]
class KenwoodLiveRadio(chirp_common.LiveRadio):
"""Base class for all live-mode kenwood radios"""
BAUD_RATE = 9600
VENDOR = "Kenwood"
MODEL = ""
_vfo = 0
_upper = 200
_kenwood_split = False
_kenwood_valid_tones = list(chirp_common.TONES)
def __init__(self, *args, **kwargs):
chirp_common.LiveRadio.__init__(self, *args, **kwargs)
self._memcache = {}
if self.pipe:
self.pipe.timeout = 0.1
radio_id = get_id(self.pipe)
if radio_id != self.MODEL.split(" ")[0]:
raise Exception("Radio reports %s (not %s)" % (radio_id,
self.MODEL))
command(self.pipe, "AI", "0")
def _cmd_get_memory(self, number):
return "MR", "%i,0,%03i" % (self._vfo, number)
def _cmd_get_memory_name(self, number):
return "MNA", "%i,%03i" % (self._vfo, number)
def _cmd_get_split(self, number):
return "MR", "%i,1,%03i" % (self._vfo, number)
def _cmd_set_memory(self, number, spec):
if spec:
spec = "," + spec
return "MW", "%i,0,%03i%s" % (self._vfo, number, spec)
def _cmd_set_memory_name(self, number, name):
return "MNA", "%i,%03i,%s" % (self._vfo, number, name)
def _cmd_set_split(self, number, spec):
return "MW", "%i,1,%03i,%s" % (self._vfo, number, spec)
def get_raw_memory(self, number):
return command(self.pipe, *self._cmd_get_memory(number))
def get_memory(self, number):
if number < 0 or number > self._upper:
raise errors.InvalidMemoryLocation(
"Number must be between 0 and %i" % self._upper)
if number in self._memcache and not NOCACHE:
return self._memcache[number]
result = command(self.pipe, *self._cmd_get_memory(number))
if result == "N" or result == "E":
mem = chirp_common.Memory()
mem.number = number
mem.empty = True
self._memcache[mem.number] = mem
return mem
elif " " not in result:
LOG.error("Not sure what to do with this: `%s'" % result)
raise errors.RadioError("Unexpected result returned from radio")
value = result.split(" ")[1]
spec = value.split(",")
mem = self._parse_mem_spec(spec)
self._memcache[mem.number] = mem
result = command(self.pipe, *self._cmd_get_memory_name(number))
if " " in result:
value = result.split(" ", 1)[1]
if value.count(",") == 2:
_zero, _loc, mem.name = value.split(",")
else:
_loc, mem.name = value.split(",")
if mem.duplex == "" and self._kenwood_split:
result = command(self.pipe, *self._cmd_get_split(number))
if " " in result:
value = result.split(" ", 1)[1]
self._parse_split_spec(mem, value.split(","))
return mem
def _make_mem_spec(self, mem):
pass
def _parse_mem_spec(self, spec):
pass
def _parse_split_spec(self, mem, spec):
mem.duplex = "split"
mem.offset = int(spec[2])
def _make_split_spec(self, mem):
return ("%011i" % mem.offset, "0")
def set_memory(self, memory):
if memory.number < 0 or memory.number > self._upper:
raise errors.InvalidMemoryLocation(
"Number must be between 0 and %i" % self._upper)
spec = self._make_mem_spec(memory)
spec = ",".join(spec)
r1 = command(self.pipe, *self._cmd_set_memory(memory.number, spec))
if not iserr(r1):
time.sleep(0.5)
r2 = command(self.pipe, *self._cmd_set_memory_name(memory.number,
memory.name))
if not iserr(r2):
memory.name = memory.name.rstrip()
self._memcache[memory.number] = memory
else:
raise errors.InvalidDataError("Radio refused name %i: %s" %
(memory.number,
repr(memory.name)))
else:
raise errors.InvalidDataError("Radio refused %i" % memory.number)
if memory.duplex == "split" and self._kenwood_split:
spec = ",".join(self._make_split_spec(memory))
result = command(self.pipe, *self._cmd_set_split(memory.number,
spec))
if iserr(result):
raise errors.InvalidDataError("Radio refused %i" %
memory.number)
def erase_memory(self, number):
if number not in self._memcache:
return
resp = command(self.pipe, *self._cmd_set_memory(number, ""))
if iserr(resp):
raise errors.RadioError("Radio refused delete of %i" % number)
del self._memcache[number]
def _kenwood_get(self, cmd):
resp = command(self.pipe, cmd)
if " " in resp:
return resp.split(" ", 1)
else:
if resp == cmd:
return [resp, ""]
else:
raise errors.RadioError("Radio refused to return %s" % cmd)
def _kenwood_set(self, cmd, value):
resp = command(self.pipe, cmd, value)
if resp[:len(cmd)] == cmd:
return
raise errors.RadioError("Radio refused to set %s" % cmd)
def _kenwood_get_bool(self, cmd):
_cmd, result = self._kenwood_get(cmd)
return result == "1"
def _kenwood_set_bool(self, cmd, value):
return self._kenwood_set(cmd, str(int(value)))
def _kenwood_get_int(self, cmd):
_cmd, result = self._kenwood_get(cmd)
return int(result)
def _kenwood_set_int(self, cmd, value, digits=1):
return self._kenwood_set(cmd, ("%%0%ii" % digits) % value)
def set_settings(self, settings):
for element in settings:
if not isinstance(element, RadioSetting):
self.set_settings(element)
continue
if not element.changed():
continue
if isinstance(element.value, RadioSettingValueBoolean):
self._kenwood_set_bool(element.get_name(), element.value)
elif isinstance(element.value, RadioSettingValueList):
options = self._SETTINGS_OPTIONS[element.get_name()]
self._kenwood_set_int(element.get_name(),
options.index(str(element.value)))
elif isinstance(element.value, RadioSettingValueInteger):
if element.value.get_max() > 9:
digits = 2
else:
digits = 1
self._kenwood_set_int(element.get_name(),
element.value, digits)
elif isinstance(element.value, RadioSettingValueString):
self._kenwood_set(element.get_name(), str(element.value))
else:
LOG.error("Unknown type %s" % element.value)
class KenwoodOldLiveRadio(KenwoodLiveRadio):
_kenwood_valid_tones = list(chirp_common.OLD_TONES)
def set_memory(self, memory):
supported_tones = list(chirp_common.OLD_TONES)
supported_tones.remove(69.3)
if memory.rtone not in supported_tones:
raise errors.UnsupportedToneError("This radio does not support " +
"tone %.1fHz" % memory.rtone)
if memory.ctone not in supported_tones:
raise errors.UnsupportedToneError("This radio does not support " +
"tone %.1fHz" % memory.ctone)
return KenwoodLiveRadio.set_memory(self, memory)
@directory.register
class THD7Radio(KenwoodOldLiveRadio):
"""Kenwood TH-D7"""
MODEL = "TH-D7"
_kenwood_split = True
_SETTINGS_OPTIONS = {
"BAL": ["4:0", "3:1", "2:2", "1:3", "0:4"],
"BEP": ["Off", "Key", "Key+Data", "All"],
"BEPT": ["Off", "Mine", "All New"], # D700 has fourth "All"
"DS": ["Data Band", "Both Bands"],
"DTB": ["A", "B"],
"DTBA": ["A", "B", "A:TX/B:RX"], # D700 has fourth A:RX/B:TX
"DTX": ["Manual", "PTT", "Auto"],
"ICO": ["Kenwood", "Runner", "House", "Tent", "Boat", "SSTV",
"Plane", "Speedboat", "Car", "Bicycle"],
"MNF": ["Name", "Frequency"],
"PKSA": ["1200", "9600"],
"POSC": ["Off Duty", "Enroute", "In Service", "Returning",
"Committed", "Special", "Priority", "Emergency"],
"PT": ["100ms", "200ms", "500ms", "750ms",
"1000ms", "1500ms", "2000ms"],
"SCR": ["Time", "Carrier", "Seek"],
"SV": ["Off", "0.2s", "0.4s", "0.6s", "0.8s", "1.0s",
"2s", "3s", "4s", "5s"],
"TEMP": ["F", "C"],
"TXI": ["30sec", "1min", "2min", "3min", "4min", "5min",
"10min", "20min", "30min"],
"UNIT": ["English", "Metric"],
"WAY": ["Off", "6 digit NMEA", "7 digit NMEA", "8 digit NMEA",
"9 digit NMEA", "6 digit Magellan", "DGPS"],
}
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.has_settings = True
rf.has_dtcs = False
rf.has_dtcs_polarity = False
rf.has_bank = False
rf.has_mode = True
rf.has_tuning_step = False
rf.can_odd_split = True
rf.valid_duplexes = ["", "-", "+", "split"]
rf.valid_modes = MODES.values()
rf.valid_tmodes = ["", "Tone", "TSQL"]
rf.valid_characters = \
chirp_common.CHARSET_ALPHANUMERIC + "/.-+*)('&%$#! ~}|{"
rf.valid_name_length = 7
rf.memory_bounds = (1, self._upper)
return rf
def _make_mem_spec(self, mem):
if mem.duplex in " -+":
duplex = util.get_dict_rev(DUPLEX, mem.duplex)
offset = mem.offset
else:
duplex = 0
offset = 0
spec = (
"%011i" % mem.freq,
"%X" % STEPS.index(mem.tuning_step),
"%i" % duplex,
"0",
"%i" % (mem.tmode == "Tone"),
"%i" % (mem.tmode == "TSQL"),
"", # DCS Flag
"%02i" % (self._kenwood_valid_tones.index(mem.rtone) + 1),
"", # DCS Code
"%02i" % (self._kenwood_valid_tones.index(mem.ctone) + 1),
"%09i" % offset,
"%i" % util.get_dict_rev(MODES, mem.mode),
"%i" % ((mem.skip == "S") and 1 or 0))
return spec
def _parse_mem_spec(self, spec):
mem = chirp_common.Memory()
mem.number = int(spec[2])
mem.freq = int(spec[3], 10)
mem.tuning_step = STEPS[int(spec[4], 16)]
mem.duplex = DUPLEX[int(spec[5])]
mem.tmode = get_tmode(spec[7], spec[8], spec[9])
mem.rtone = self._kenwood_valid_tones[int(spec[10]) - 1]
mem.ctone = self._kenwood_valid_tones[int(spec[12]) - 1]
if spec[11] and spec[11].isdigit():
mem.dtcs = chirp_common.DTCS_CODES[int(spec[11][:-1]) - 1]
else:
LOG.warn("Unknown or invalid DCS: %s" % spec[11])
if spec[13]:
mem.offset = int(spec[13])
else:
mem.offset = 0
mem.mode = MODES[int(spec[14])]
mem.skip = int(spec[15]) and "S" or ""
return mem
def get_settings(self):
main = RadioSettingGroup("main", "Main")
aux = RadioSettingGroup("aux", "Aux")
tnc = RadioSettingGroup("tnc", "TNC")
save = RadioSettingGroup("save", "Save")
display = RadioSettingGroup("display", "Display")
dtmf = RadioSettingGroup("dtmf", "DTMF")
radio = RadioSettingGroup("radio", "Radio",
aux, tnc, save, display, dtmf)
sky = RadioSettingGroup("sky", "SkyCommand")
aprs = RadioSettingGroup("aprs", "APRS")
top = RadioSettings(main, radio, aprs, sky)
bools = [("AMR", aprs, "APRS Message Auto-Reply"),
("AIP", aux, "Advanced Intercept Point"),
("ARO", aux, "Automatic Repeater Offset"),
("BCN", aprs, "Beacon"),
("CH", radio, "Channel Mode Display"),
# ("DIG", aprs, "APRS Digipeater"),
("DL", main, "Dual"),
("LK", main, "Lock"),
("LMP", main, "Lamp"),
("TSP", dtmf, "DTMF Fast Transmission"),
("TXH", dtmf, "TX Hold"),
]
for setting, group, name in bools:
value = self._kenwood_get_bool(setting)
rs = RadioSetting(setting, name,
RadioSettingValueBoolean(value))
group.append(rs)
lists = [("BAL", main, "Balance"),
("BEP", aux, "Beep"),
("BEPT", aprs, "APRS Beep"),
("DS", tnc, "Data Sense"),
("DTB", tnc, "Data Band"),
("DTBA", aprs, "APRS Data Band"),
("DTX", aprs, "APRS Data TX"),
# ("ICO", aprs, "APRS Icon"),
("MNF", main, "Memory Display Mode"),
("PKSA", aprs, "APRS Packet Speed"),
("POSC", aprs, "APRS Position Comment"),
("PT", dtmf, "DTMF Speed"),
("SV", save, "Battery Save"),
("TEMP", aprs, "APRS Temperature Units"),
("TXI", aprs, "APRS Transmit Interval"),
# ("UNIT", aprs, "APRS Display Units"),
("WAY", aprs, "Waypoint Mode"),
]
for setting, group, name in lists:
value = self._kenwood_get_int(setting)
options = self._SETTINGS_OPTIONS[setting]
rs = RadioSetting(setting, name,
RadioSettingValueList(options,
options[value]))
group.append(rs)
ints = [("CNT", display, "Contrast", 1, 16),
]
for setting, group, name, minv, maxv in ints:
value = self._kenwood_get_int(setting)
rs = RadioSetting(setting, name,
RadioSettingValueInteger(minv, maxv, value))
group.append(rs)
strings = [("MES", display, "Power-on Message", 8),
("MYC", aprs, "APRS Callsign", 8),
("PP", aprs, "APRS Path", 32),
("SCC", sky, "SkyCommand Callsign", 8),
("SCT", sky, "SkyCommand To Callsign", 8),
# ("STAT", aprs, "APRS Status Text", 32),
]
for setting, group, name, length in strings:
_cmd, value = self._kenwood_get(setting)
rs = RadioSetting(setting, name,
RadioSettingValueString(0, length, value))
group.append(rs)
return top
@directory.register
class THD7GRadio(THD7Radio):
"""Kenwood TH-D7G"""
MODEL = "TH-D7G"
def get_features(self):
rf = super(THD7GRadio, self).get_features()
rf.valid_name_length = 8
return rf
@directory.register
class TMD700Radio(KenwoodOldLiveRadio):
"""Kenwood TH-D700"""
MODEL = "TM-D700"
_kenwood_split = True
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.has_dtcs = True
rf.has_dtcs_polarity = False
rf.has_bank = False
rf.has_mode = False
rf.has_tuning_step = False
rf.can_odd_split = True
rf.valid_duplexes = ["", "-", "+", "split"]
rf.valid_modes = ["FM"]
rf.valid_tmodes = ["", "Tone", "TSQL", "DTCS"]
rf.valid_characters = chirp_common.CHARSET_ALPHANUMERIC
rf.valid_name_length = 8
rf.memory_bounds = (1, self._upper)
return rf
def _make_mem_spec(self, mem):
if mem.duplex in " -+":
duplex = util.get_dict_rev(DUPLEX, mem.duplex)
else:
duplex = 0
spec = (
"%011i" % mem.freq,
"%X" % STEPS.index(mem.tuning_step),
"%i" % duplex,
"0",
"%i" % (mem.tmode == "Tone"),
"%i" % (mem.tmode == "TSQL"),
"%i" % (mem.tmode == "DTCS"),
"%02i" % (self._kenwood_valid_tones.index(mem.rtone) + 1),
"%03i0" % (chirp_common.DTCS_CODES.index(mem.dtcs) + 1),
"%02i" % (self._kenwood_valid_tones.index(mem.ctone) + 1),
"%09i" % mem.offset,
"%i" % util.get_dict_rev(MODES, mem.mode),
"%i" % ((mem.skip == "S") and 1 or 0))
return spec
def _parse_mem_spec(self, spec):
mem = chirp_common.Memory()
mem.number = int(spec[2])
mem.freq = int(spec[3])
mem.tuning_step = STEPS[int(spec[4], 16)]
mem.duplex = DUPLEX[int(spec[5])]
mem.tmode = get_tmode(spec[7], spec[8], spec[9])
mem.rtone = self._kenwood_valid_tones[int(spec[10]) - 1]
mem.ctone = self._kenwood_valid_tones[int(spec[12]) - 1]
if spec[11] and spec[11].isdigit():
mem.dtcs = chirp_common.DTCS_CODES[int(spec[11][:-1]) - 1]
else:
LOG.warn("Unknown or invalid DCS: %s" % spec[11])
if spec[13]:
mem.offset = int(spec[13])
else:
mem.offset = 0
mem.mode = MODES[int(spec[14])]
mem.skip = int(spec[15]) and "S" or ""
return mem
@directory.register
class TMV7Radio(KenwoodOldLiveRadio):
"""Kenwood TM-V7"""
MODEL = "TM-V7"
mem_upper_limit = 200 # Will be updated
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.has_dtcs = False
rf.has_dtcs_polarity = False
rf.has_bank = False
rf.has_mode = False
rf.has_tuning_step = False
rf.valid_modes = ["FM"]
rf.valid_tmodes = ["", "Tone", "TSQL"]
rf.valid_characters = chirp_common.CHARSET_ALPHANUMERIC
rf.valid_name_length = 7
rf.has_sub_devices = True
rf.memory_bounds = (1, self._upper)
return rf
def _make_mem_spec(self, mem):
spec = (
"%011i" % mem.freq,
"%X" % STEPS.index(mem.tuning_step),
"%i" % util.get_dict_rev(DUPLEX, mem.duplex),
"0",
"%i" % (mem.tmode == "Tone"),
"%i" % (mem.tmode == "TSQL"),
"0",
"%02i" % (self._kenwood_valid_tones.index(mem.rtone) + 1),
"000",
"%02i" % (self._kenwood_valid_tones.index(mem.ctone) + 1),
"",
"0")
return spec
def _parse_mem_spec(self, spec):
mem = chirp_common.Memory()
mem.number = int(spec[2])
mem.freq = int(spec[3])
mem.tuning_step = STEPS[int(spec[4], 16)]
mem.duplex = DUPLEX[int(spec[5])]
if int(spec[7]):
mem.tmode = "Tone"
elif int(spec[8]):
mem.tmode = "TSQL"
mem.rtone = self._kenwood_valid_tones[int(spec[10]) - 1]
mem.ctone = self._kenwood_valid_tones[int(spec[12]) - 1]
return mem
def get_sub_devices(self):
return [TMV7RadioVHF(self.pipe), TMV7RadioUHF(self.pipe)]
def __test_location(self, loc):
mem = self.get_memory(loc)
if not mem.empty:
# Memory was not empty, must be valid
return True
# Mem was empty (or invalid), try to set it
if self._vfo == 0:
mem.freq = 144000000
else:
mem.freq = 440000000
mem.empty = False
try:
self.set_memory(mem)
except Exception:
# Failed, so we're past the limit
return False
# Erase what we did
try:
self.erase_memory(loc)
except Exception:
pass # V7A Can't delete just yet
return True
def _detect_split(self):
return 50
class TMV7RadioSub(TMV7Radio):
"""Base class for the TM-V7 sub devices"""
def __init__(self, pipe):
TMV7Radio.__init__(self, pipe)
self._detect_split()
class TMV7RadioVHF(TMV7RadioSub):
"""TM-V7 VHF subdevice"""
VARIANT = "VHF"
_vfo = 0
class TMV7RadioUHF(TMV7RadioSub):
"""TM-V7 UHF subdevice"""
VARIANT = "UHF"
_vfo = 1
@directory.register
class TMG707Radio(TMV7Radio):
"""Kenwood TM-G707"""
MODEL = "TM-G707"
def get_features(self):
rf = TMV7Radio.get_features(self)
rf.has_sub_devices = False
rf.memory_bounds = (1, 180)
rf.valid_bands = [(118000000, 174000000),
(300000000, 520000000),
(800000000, 999000000)]
return rf
THG71_STEPS = [5, 6.25, 10, 12.5, 15, 20, 25, 30, 50, 100]
@directory.register
class THG71Radio(TMV7Radio):
"""Kenwood TH-G71"""
MODEL = "TH-G71"
def get_features(self):
rf = TMV7Radio.get_features(self)
rf.has_tuning_step = True
rf.valid_tuning_steps = list(THG71_STEPS)
rf.valid_name_length = 6
rf.has_sub_devices = False
rf.valid_bands = [(118000000, 174000000),
(320000000, 470000000),
(800000000, 945000000)]
return rf
def _make_mem_spec(self, mem):
spec = (
"%011i" % mem.freq,
"%X" % THG71_STEPS.index(mem.tuning_step),
"%i" % util.get_dict_rev(DUPLEX, mem.duplex),
"0",
"%i" % (mem.tmode == "Tone"),
"%i" % (mem.tmode == "TSQL"),
"0",
"%02i" % (self._kenwood_valid_tones.index(mem.rtone) + 1),
"000",
"%02i" % (self._kenwood_valid_tones.index(mem.ctone) + 1),
"%09i" % mem.offset,
"%i" % ((mem.skip == "S") and 1 or 0))
return spec
def _parse_mem_spec(self, spec):
mem = chirp_common.Memory()
mem.number = int(spec[2])
mem.freq = int(spec[3])
mem.tuning_step = THG71_STEPS[int(spec[4], 16)]
mem.duplex = DUPLEX[int(spec[5])]
if int(spec[7]):
mem.tmode = "Tone"
elif int(spec[8]):
mem.tmode = "TSQL"
mem.rtone = self._kenwood_valid_tones[int(spec[10]) - 1]
mem.ctone = self._kenwood_valid_tones[int(spec[12]) - 1]
if spec[13]:
mem.offset = int(spec[13])
else:
mem.offset = 0
return mem
THF6A_STEPS = [5.0, 6.25, 8.33, 9.0, 10.0, 12.5, 15.0, 20.0, 25.0, 30.0, 50.0,
100.0]
THF6A_DUPLEX = dict(DUPLEX)
THF6A_DUPLEX[3] = "split"
@directory.register
class THF6ARadio(KenwoodLiveRadio):
"""Kenwood TH-F6"""
MODEL = "TH-F6"
_upper = 399
_kenwood_split = True
_kenwood_valid_tones = list(KENWOOD_TONES)
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.has_dtcs_polarity = False
rf.has_bank = False
rf.can_odd_split = True
rf.valid_modes = list(THF6_MODES)
rf.valid_tmodes = ["", "Tone", "TSQL", "DTCS"]
rf.valid_tuning_steps = list(THF6A_STEPS)
rf.valid_bands = [(1000, 1300000000)]
rf.valid_skips = ["", "S"]
rf.valid_duplexes = THF6A_DUPLEX.values()
rf.valid_characters = chirp_common.CHARSET_ASCII
rf.valid_name_length = 8
rf.memory_bounds = (0, self._upper)
rf.has_settings = True
return rf
def _cmd_set_memory(self, number, spec):
if spec:
spec = "," + spec
return "MW", "0,%03i%s" % (number, spec)
def _cmd_get_memory(self, number):
return "MR", "0,%03i" % number
def _cmd_get_memory_name(self, number):
return "MNA", "%03i" % number
def _cmd_set_memory_name(self, number, name):
return "MNA", "%03i,%s" % (number, name)
def _cmd_get_split(self, number):
return "MR", "1,%03i" % number
def _cmd_set_split(self, number, spec):
return "MW", "1,%03i,%s" % (number, spec)
def _parse_mem_spec(self, spec):
mem = chirp_common.Memory()
mem.number = int(spec[1])
mem.freq = int(spec[2])
mem.tuning_step = THF6A_STEPS[int(spec[3], 16)]
mem.duplex = THF6A_DUPLEX[int(spec[4])]
mem.tmode = get_tmode(spec[6], spec[7], spec[8])
mem.rtone = self._kenwood_valid_tones[int(spec[9])]
mem.ctone = self._kenwood_valid_tones[int(spec[10])]
if spec[11] and spec[11].isdigit():
mem.dtcs = chirp_common.DTCS_CODES[int(spec[11])]
else:
LOG.warn("Unknown or invalid DCS: %s" % spec[11])
if spec[12]:
mem.offset = int(spec[12])
else:
mem.offset = 0
mem.mode = THF6_MODES[int(spec[13])]
if spec[14] == "1":
mem.skip = "S"
return mem
def _make_mem_spec(self, mem):
if mem.duplex in " +-":
duplex = util.get_dict_rev(THF6A_DUPLEX, mem.duplex)
offset = mem.offset
elif mem.duplex == "split":
duplex = 0
offset = 0
else:
LOG.warn("Bug: unsupported duplex `%s'" % mem.duplex)
spec = (
"%011i" % mem.freq,
"%X" % THF6A_STEPS.index(mem.tuning_step),
"%i" % duplex,
"0",
"%i" % (mem.tmode == "Tone"),
"%i" % (mem.tmode == "TSQL"),
"%i" % (mem.tmode == "DTCS"),
"%02i" % (self._kenwood_valid_tones.index(mem.rtone)),
"%02i" % (self._kenwood_valid_tones.index(mem.ctone)),
"%03i" % (chirp_common.DTCS_CODES.index(mem.dtcs)),
"%09i" % offset,
"%i" % (THF6_MODES.index(mem.mode)),
"%i" % (mem.skip == "S"))
return spec
_SETTINGS_OPTIONS = {
"APO": ["Off", "30min", "60min"],
"BAL": ["100%:0%", "75%:25%", "50%:50%", "25%:75%", "%0:100%"],
"BAT": ["Lithium", "Alkaline"],
"CKEY": ["Call", "1750Hz"],
"DATP": ["1200bps", "9600bps"],
"LAN": ["English", "Japanese"],
"MNF": ["Name", "Frequency"],
"MRM": ["All Band", "Current Band"],
"PT": ["100ms", "250ms", "500ms", "750ms",
"1000ms", "1500ms", "2000ms"],
"SCR": ["Time", "Carrier", "Seek"],
"SV": ["Off", "0.2s", "0.4s", "0.6s", "0.8s", "1.0s",
"2s", "3s", "4s", "5s"],
"VXD": ["250ms", "500ms", "750ms", "1s", "1.5s", "2s", "3s"],
}
def get_settings(self):
main = RadioSettingGroup("main", "Main")
aux = RadioSettingGroup("aux", "Aux")
save = RadioSettingGroup("save", "Save")
display = RadioSettingGroup("display", "Display")
dtmf = RadioSettingGroup("dtmf", "DTMF")
top = RadioSettings(main, aux, save, display, dtmf)
lists = [("APO", save, "Automatic Power Off"),
("BAL", main, "Balance"),
("BAT", save, "Battery Type"),
("CKEY", aux, "CALL Key Set Up"),
("DATP", aux, "Data Packet Speed"),
("LAN", display, "Language"),
("MNF", main, "Memory Display Mode"),
("MRM", main, "Memory Recall Method"),
("PT", dtmf, "DTMF Speed"),
("SCR", main, "Scan Resume"),
("SV", save, "Battery Save"),
("VXD", aux, "VOX Drop Delay"),
]
bools = [("ANT", aux, "Bar Antenna"),
("ATT", main, "Attenuator Enabled"),
("ARO", main, "Automatic Repeater Offset"),
("BEP", aux, "Beep for keypad"),
("DL", main, "Dual"),
("DLK", dtmf, "DTMF Lockout On Transmit"),
("ELK", aux, "Enable Locked Tuning"),
("LK", main, "Lock"),
("LMP", display, "Lamp"),
("NSFT", aux, "Noise Shift"),
("TH", aux, "Tx Hold for 1750"),
("TSP", dtmf, "DTMF Fast Transmission"),
("TXH", dtmf, "TX Hold DTMF"),
("TXS", main, "Transmit Inhibit"),
("VOX", aux, "VOX Enable"),
("VXB", aux, "VOX On Busy"),
]
ints = [("CNT", display, "Contrast", 1, 16),
("VXG", aux, "VOX Gain", 0, 9),
]
strings = [("MES", display, "Power-on Message", 8),
]
for setting, group, name in bools:
value = self._kenwood_get_bool(setting)
rs = RadioSetting(setting, name,
RadioSettingValueBoolean(value))
group.append(rs)
for setting, group, name in lists:
value = self._kenwood_get_int(setting)
options = self._SETTINGS_OPTIONS[setting]
rs = RadioSetting(setting, name,
RadioSettingValueList(options,
options[value]))
group.append(rs)
for setting, group, name, minv, maxv in ints:
value = self._kenwood_get_int(setting)
rs = RadioSetting(setting, name,
RadioSettingValueInteger(minv, maxv, value))
group.append(rs)
for setting, group, name, length in strings:
_cmd, value = self._kenwood_get(setting)
rs = RadioSetting(setting, name,
RadioSettingValueString(0, length, value))
group.append(rs)
return top
@directory.register
class THF7ERadio(THF6ARadio):
"""Kenwood TH-F7"""
MODEL = "TH-F7"
D710_DUPLEX = ["", "+", "-", "split"]
D710_MODES = ["FM", "NFM", "AM"]
D710_SKIP = ["", "S"]
D710_STEPS = [5.0, 6.25, 8.33, 10.0, 12.5, 15.0, 20.0, 25.0, 30.0, 50.0, 100.0]
@directory.register
class TMD710Radio(KenwoodLiveRadio):
"""Kenwood TM-D710"""
MODEL = "TM-D710"
_upper = 999
_kenwood_valid_tones = list(KENWOOD_TONES)
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.can_odd_split = True
rf.has_dtcs_polarity = False
rf.has_bank = False
rf.valid_tmodes = ["", "Tone", "TSQL", "DTCS"]
rf.valid_modes = D710_MODES
rf.valid_duplexes = D710_DUPLEX
rf.valid_tuning_steps = D710_STEPS
rf.valid_characters = chirp_common.CHARSET_ASCII.replace(',', '')
rf.valid_name_length = 8
rf.valid_skips = D710_SKIP
rf.memory_bounds = (0, 999)
return rf
def _cmd_get_memory(self, number):
return "ME", "%03i" % number
def _cmd_get_memory_name(self, number):
return "MN", "%03i" % number
def _cmd_set_memory(self, number, spec):
return "ME", "%03i,%s" % (number, spec)
def _cmd_set_memory_name(self, number, name):
return "MN", "%03i,%s" % (number, name)
def _parse_mem_spec(self, spec):
mem = chirp_common.Memory()
mem.number = int(spec[0])
mem.freq = int(spec[1])
mem.tuning_step = D710_STEPS[int(spec[2], 16)]
mem.duplex = D710_DUPLEX[int(spec[3])]
# Reverse
if int(spec[5]):
mem.tmode = "Tone"
elif int(spec[6]):
mem.tmode = "TSQL"
elif int(spec[7]):
mem.tmode = "DTCS"
mem.rtone = self._kenwood_valid_tones[int(spec[8])]
mem.ctone = self._kenwood_valid_tones[int(spec[9])]
mem.dtcs = chirp_common.DTCS_CODES[int(spec[10])]
mem.offset = int(spec[11])
mem.mode = D710_MODES[int(spec[12])]
# TX Frequency
if int(spec[13]):
mem.duplex = "split"
mem.offset = int(spec[13])
# Unknown
mem.skip = D710_SKIP[int(spec[15])] # Memory Lockout
return mem
def _make_mem_spec(self, mem):
spec = (
"%010i" % mem.freq,
"%X" % D710_STEPS.index(mem.tuning_step),
"%i" % (0 if mem.duplex == "split"
else D710_DUPLEX.index(mem.duplex)),
"0", # Reverse
"%i" % (mem.tmode == "Tone" and 1 or 0),
"%i" % (mem.tmode == "TSQL" and 1 or 0),
"%i" % (mem.tmode == "DTCS" and 1 or 0),
"%02i" % (self._kenwood_valid_tones.index(mem.rtone)),
"%02i" % (self._kenwood_valid_tones.index(mem.ctone)),
"%03i" % (chirp_common.DTCS_CODES.index(mem.dtcs)),
"%08i" % (0 if mem.duplex == "split" else mem.offset), # Offset
"%i" % D710_MODES.index(mem.mode),
"%010i" % (mem.offset if mem.duplex == "split" else 0), # TX Freq
"0", # Unknown
"%i" % D710_SKIP.index(mem.skip), # Memory Lockout
)
return spec
@directory.register
class THD72Radio(TMD710Radio):
"""Kenwood TH-D72"""
MODEL = "TH-D72 (live mode)"
HARDWARE_FLOW = sys.platform == "darwin" # only OS X driver needs hw flow
def _parse_mem_spec(self, spec):
mem = chirp_common.Memory()
mem.number = int(spec[0])
mem.freq = int(spec[1])
mem.tuning_step = D710_STEPS[int(spec[2], 16)]
mem.duplex = D710_DUPLEX[int(spec[3])]
# Reverse
if int(spec[5]):
mem.tmode = "Tone"
elif int(spec[6]):
mem.tmode = "TSQL"
elif int(spec[7]):
mem.tmode = "DTCS"
mem.rtone = self._kenwood_valid_tones[int(spec[9])]
mem.ctone = self._kenwood_valid_tones[int(spec[10])]
mem.dtcs = chirp_common.DTCS_CODES[int(spec[11])]
mem.offset = int(spec[13])
mem.mode = D710_MODES[int(spec[14])]
# TX Frequency
if int(spec[15]):
mem.duplex = "split"
mem.offset = int(spec[15])
# Lockout
mem.skip = D710_SKIP[int(spec[17])] # Memory Lockout
return mem
def _make_mem_spec(self, mem):
spec = (
"%010i" % mem.freq,
"%X" % D710_STEPS.index(mem.tuning_step),
"%i" % (0 if mem.duplex == "split"
else D710_DUPLEX.index(mem.duplex)),
"0", # Reverse
"%i" % (mem.tmode == "Tone" and 1 or 0),
"%i" % (mem.tmode == "TSQL" and 1 or 0),
"%i" % (mem.tmode == "DTCS" and 1 or 0),
"0",
"%02i" % (self._kenwood_valid_tones.index(mem.rtone)),
"%02i" % (self._kenwood_valid_tones.index(mem.ctone)),
"%03i" % (chirp_common.DTCS_CODES.index(mem.dtcs)),
"0",
"%08i" % (0 if mem.duplex == "split" else mem.offset), # Offset
"%i" % D710_MODES.index(mem.mode),
"%010i" % (mem.offset if mem.duplex == "split" else 0), # TX Freq
"0", # Unknown
"%i" % D710_SKIP.index(mem.skip), # Memory Lockout
)
return spec
@directory.register
class TMV71Radio(TMD710Radio):
"""Kenwood TM-V71"""
MODEL = "TM-V71"
@directory.register
class TMD710GRadio(TMD710Radio):
"""Kenwood TM-D710G"""
MODEL = "TM-D710G"
@classmethod
def get_prompts(cls):
rp = chirp_common.RadioPrompts()
rp.experimental = ("This radio driver is currently under development, "
"and supports the same features as the TM-D710A/E. "
"There are no known issues with it, but you should "
"proceed with caution.")
return rp
THK2_DUPLEX = ["", "+", "-"]
THK2_MODES = ["FM", "NFM"]
THK2_CHARS = chirp_common.CHARSET_UPPER_NUMERIC + "-/"
@directory.register
class THK2Radio(KenwoodLiveRadio):
"""Kenwood TH-K2"""
MODEL = "TH-K2"
_kenwood_valid_tones = list(KENWOOD_TONES)
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.can_odd_split = False
rf.has_dtcs_polarity = False
rf.has_bank = False
rf.has_tuning_step = False
rf.valid_tmodes = ["", "Tone", "TSQL", "DTCS"]
rf.valid_modes = THK2_MODES
rf.valid_duplexes = THK2_DUPLEX
rf.valid_characters = THK2_CHARS
rf.valid_name_length = 6
rf.valid_bands = [(136000000, 173990000)]
rf.valid_skips = ["", "S"]
rf.valid_tuning_steps = [5.0]
rf.memory_bounds = (0, 49)
return rf
def _cmd_get_memory(self, number):
return "ME", "%02i" % number
def _cmd_get_memory_name(self, number):
return "MN", "%02i" % number
def _cmd_set_memory(self, number, spec):
return "ME", "%02i,%s" % (number, spec)
def _cmd_set_memory_name(self, number, name):
return "MN", "%02i,%s" % (number, name)
def _parse_mem_spec(self, spec):
mem = chirp_common.Memory()
mem.number = int(spec[0])
mem.freq = int(spec[1])
# mem.tuning_step =
mem.duplex = THK2_DUPLEX[int(spec[3])]
if int(spec[5]):
mem.tmode = "Tone"
elif int(spec[6]):
mem.tmode = "TSQL"
elif int(spec[7]):
mem.tmode = "DTCS"
mem.rtone = self._kenwood_valid_tones[int(spec[8])]
mem.ctone = self._kenwood_valid_tones[int(spec[9])]
mem.dtcs = chirp_common.DTCS_CODES[int(spec[10])]
mem.offset = int(spec[11])
mem.mode = THK2_MODES[int(spec[12])]
mem.skip = int(spec[16]) and "S" or ""
return mem
def _make_mem_spec(self, mem):
try:
rti = self._kenwood_valid_tones.index(mem.rtone)
cti = self._kenwood_valid_tones.index(mem.ctone)
except ValueError:
raise errors.UnsupportedToneError()
spec = (
"%010i" % mem.freq,
"0",
"%i" % THK2_DUPLEX.index(mem.duplex),
"0",
"%i" % int(mem.tmode == "Tone"),
"%i" % int(mem.tmode == "TSQL"),
"%i" % int(mem.tmode == "DTCS"),
"%02i" % rti,
"%02i" % cti,
"%03i" % chirp_common.DTCS_CODES.index(mem.dtcs),
"%08i" % mem.offset,
"%i" % THK2_MODES.index(mem.mode),
"0",
"%010i" % 0,
"0",
"%i" % int(mem.skip == "S")
)
return spec
TM271_STEPS = [2.5, 5.0, 6.25, 10.0, 12.5, 15.0, 20.0, 25.0, 30.0, 50.0, 100.0]
@directory.register
class TM271Radio(THK2Radio):
"""Kenwood TM-271"""
MODEL = "TM-271"
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.can_odd_split = False
rf.has_dtcs_polarity = False
rf.has_bank = False
rf.has_tuning_step = False
rf.valid_tmodes = ["", "Tone", "TSQL", "DTCS"]
rf.valid_modes = THK2_MODES
rf.valid_duplexes = THK2_DUPLEX
rf.valid_characters = THK2_CHARS
rf.valid_name_length = 6
rf.valid_bands = [(137000000, 173990000)]
rf.valid_skips = ["", "S"]
rf.valid_tuning_steps = list(TM271_STEPS)
rf.memory_bounds = (0, 99)
return rf
def _cmd_get_memory(self, number):
return "ME", "%03i" % number
def _cmd_get_memory_name(self, number):
return "MN", "%03i" % number
def _cmd_set_memory(self, number, spec):
return "ME", "%03i,%s" % (number, spec)
def _cmd_set_memory_name(self, number, name):
return "MN", "%03i,%s" % (number, name)
@directory.register
class TM281Radio(TM271Radio):
"""Kenwood TM-281"""
MODEL = "TM-281"
# seems that this is a perfect clone of TM271 with just a different model
@directory.register
class TM471Radio(THK2Radio):
"""Kenwood TM-471"""
MODEL = "TM-471"
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.can_odd_split = False
rf.has_dtcs_polarity = False
rf.has_bank = False
rf.has_tuning_step = False
rf.valid_tmodes = ["", "Tone", "TSQL", "DTCS"]
rf.valid_modes = THK2_MODES
rf.valid_duplexes = THK2_DUPLEX
rf.valid_characters = THK2_CHARS
rf.valid_name_length = 6
rf.valid_bands = [(444000000, 479990000)]
rf.valid_skips = ["", "S"]
rf.valid_tuning_steps = [5.0]
rf.memory_bounds = (0, 99)
return rf
def _cmd_get_memory(self, number):
return "ME", "%03i" % number
def _cmd_get_memory_name(self, number):
return "MN", "%03i" % number
def _cmd_set_memory(self, number, spec):
return "ME", "%03i,%s" % (number, spec)
def _cmd_set_memory_name(self, number, name):
return "MN", "%03i,%s" % (number, name)
| mach327/chirp_fork | chirp/drivers/kenwood_live.py | Python | gpl-3.0 | 44,648 | [
"Elk"
] | 55b75285e64a00cc45a5067485f50a3099f57e95bb2fc8c89518d414bf799829 |
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige.IObject import IObject
from Const import *
from ige.IDataHolder import IDataHolder
import Rules
import math
import random
import Utils, ShipUtils, math, copy, re
from sys import maxint
from ige import GameException, ServerException, log
class IFleet(IObject):
typeID = T_FLEET
def init(self, obj):
IObject.init(self, obj)
#
obj.x = 0.0
obj.y = 0.0
obj.oldX = 0.0
obj.oldY = 0.0
obj.orbiting = OID_NONE
obj.closeSystem = OID_NONE
obj.speed = 0.0
obj.maxSpeed = 0.0
obj.signature = 0
obj.eta = 0.0
obj.target = OID_NONE
#
obj.operEn = 0
obj.storEn = 0
obj.maxEn = 0
obj.operProd = 0.0
obj.ships = []
# action
obj.actions = []
obj.actionIndex = 0
obj.actionWaitCounter = 1
#
obj.speedBoost = 1.0
obj.combatPwr = 0
obj.isMilitary = 0
obj.scannerPwr = 0
obj.origScannerPwr = 0
obj.scannerOn = True
obj.combatExp = 0
obj.combatCounter = 0
obj.combatRetreatWait = 0
obj.lastUpgrade = 0
#
obj.customname = None
obj.allowmerge = 1 #states: 0: no merge; 1: normal merging; 2: fleets can merge with this fleet, but this fleet cannot merge with others
def create(self, tran, obj, refObj, owner):
obj.owner = owner
obj.x = refObj.x
obj.y = refObj.y
if refObj.type == T_SYSTEM:
obj.orbiting = refObj.oid
obj.closeSystem = refObj.oid
refObj.fleets.append(obj.oid)
refObj.closeFleets.append(obj.oid)
obj.target = OID_NONE
elif refObj.type == T_FLEET:
obj.oldX = refObj.oldX
obj.oldY = refObj.oldY
obj.orbiting = OID_NONE
obj.closeSystem = refObj.closeSystem
obj.actions = copy.deepcopy(refObj.actions)
obj.actionIndex = refObj.actionIndex
obj.actionWaitCounter = refObj.actionWaitCounter
system = tran.db[obj.closeSystem]
system.closeFleets.append(obj.oid)
obj.target = refObj.target
# collect used names
names = {}
for fleetID in tran.db[owner].fleets:
names[tran.db[fleetID].name] = None
# create name
counter = 1
name = u'Fleet %d' % counter
while True:
name = u'Fleet %d' % counter
counter += 1
if name not in names:
break
obj.name = name
obj.customname = None
obj.allowmerge = 1
# insert fleet into owner's fleets
tran.db[obj.owner].fleets.append(obj.oid)
create.public = 0
create.accLevel = AL_ADMIN
def addNewShip(self, tran, obj, designID):
spec = tran.db[obj.owner].shipDesigns[designID]
obj.ships.append([designID, spec.maxHP, spec.shieldHP, 0])
# new ship has full tanks
obj.storEn += spec.storEn
# update fleet info
self.cmd(obj).update(tran, obj)
addNewShip.public = 0
def removeShips(self, tran, obj, ships):
for ship in ships:
obj.ships.remove(ship)
if not obj.ships:
log.debug('IFleet', 'removeShips removed last ship')
self.cmd(obj).disbandFleet(tran, obj)
else:
self.cmd(obj).update(tran, obj)
return obj
removeShips.public = 1
removeShips.accLevel = AL_OWNER
def deleteDesign(self, tran, obj, designID):
# remove design
obj.ships = [ship for ship in obj.ships if ship[0] != designID]
self.cmd(obj).update(tran, obj)
deleteDesign.public = 0
def disbandFleet(self, tran, obj):
log.debug('IFleet', 'disbanding fleet', obj.oid, 'of player', obj.owner)
# remove from player's fleets
try:
if obj.owner != OID_NONE:
tran.db[obj.owner].fleets.remove(obj.oid)
except Exception:
log.warning('IFleet', 'disbandFleet: cannot remove fleet from owner\'s fleet')
pass
# remove from orbit
# remove from index if necessary
if obj.orbiting != OID_NONE:
try:
if tran.db.has_key(obj.orbiting):
tran.db[obj.orbiting].fleets.remove(obj.oid)
except Exception:
log.warning('IFleet', 'disbandFleet: cannot remove fleet from system.')
pass
# remove from close fleets
if obj.closeSystem != OID_NONE:
try:
if tran.db.has_key(obj.closeSystem):
tran.db[obj.closeSystem].closeFleets.remove(obj.oid)
except Exception:
log.warning("IFleet", "disbandFleet: cannot remove fleet from the close system.")
# delete from database
try:
tran.db.delete(obj.oid)
except KeyError:
log.warning('IFleet', 'disbandFleet: cannot remove fleet from database.')
disbandFleet.public = 1
disbandFleet.accLevel = AL_FULL
def joinFleet(self, tran, obj, fleetID, force=False):
if obj.orbiting == OID_NONE:
# we are in space
return
if obj.allowmerge != 1:
# owner has turned off auto-joins (join self with other)
return
if fleetID == OID_NONE:
raiseExps = False
# find suitable fleet
system = tran.db[obj.orbiting]
player = tran.db[obj.owner]
for tmpID in system.fleets:
if tmpID == obj.oid:
continue
fleet = tran.db[tmpID]
if fleet.allowmerge == 0 and not force:
# owner has turned off auto-joins (join other with self)
continue
rel = self.cmd(player).getRelationTo(tran, player, fleet.owner)
if rel == REL_UNITY and Utils.isIdleFleet(fleet):
fleetID = tmpID
break
else:
raiseExps = True
if fleetID == OID_NONE:
return
# join to selected fleet
fleet = tran.db[fleetID]
# if the fleet was specified from a client call, validate it:
if not fleet.owner == obj.owner:
if raiseExps:
raise GameException("Fleets do not have the same owner.")
return
if not fleet.orbiting == obj.orbiting:
if raiseExps:
raise GameException("Fleets are not in the same system.")
return
if fleet.allowmerge == 0 and not force:
# owner has turned off auto-joins (join other with self)
return
fleet.ships.extend(obj.ships)
# transfer resources
fleet.storEn += obj.storEn
# update fleet's data
self.cmd(fleet).update(tran, fleet)
# disband this fleet
log.debug('IFleet joinFleet, removing old fleet: source fleet',obj.oid,'; target fleet',fleet.oid)
self.cmd(obj).disbandFleet(tran, obj)
joinFleet.public = 1
joinFleet.accLevel = AL_FULL
def splitFleet(self, tran, obj, ships, mEn):
if not len(ships):
raise GameException('No ships in the new fleet.')
if len(ships) == len(obj.ships):
raise GameException('No ships in the original fleet.')
# check ships
tmpShips = obj.ships[:]
for ship in ships:
if ship not in tmpShips:
raise GameException("No such ship(s) in the original fleet.")
tmpShips.remove(ship)
# create new fleet
fleet = self.new(T_FLEET)
tran.db.create(fleet)
log.debug(obj.oid, "FLEET -- split fleet, new fleet is", fleet.oid)
if obj.orbiting != OID_NONE:
refObj = tran.db[obj.orbiting]
else:
refObj = obj
self.cmd(fleet).create(tran, fleet, refObj, obj.owner)
# move ships
for ship in ships:
# use server data
idx = obj.ships.index(ship)
ship = obj.ships.pop(idx)
fleet.ships.append(ship)
# update fleet
self.cmd(fleet).update(tran, fleet)
# move en
move = max(min(mEn, fleet.maxEn, obj.storEn), 0)
fleet.storEn += move
obj.storEn -= move
# share speed boost
fleet.speedBoost = obj.speedBoost
# update fleets
self.cmd(obj).update(tran, obj)
self.cmd(fleet).update(tran, fleet)
# return new fleet, old fleet and player's fleets
return fleet, obj, tran.db[obj.owner].fleets
splitFleet.public = 1
splitFleet.accLevel = AL_FULL
def renameFleet(self, tran, obj, name):
if not Utils.isCorrectName(name):
raise GameException('Invalid name. Only characters, digits, space, dot and dash permitted, max. length is 30 characters.')
if re.match("/^Fleet \d+$/",name):
raise GameException('Invalid name. You cannot use the format "Fleet ##" for a custom name.')
names = {}
for fleetID in tran.db[obj.owner].fleets:
names[tran.db[fleetID].customname] = None
if name in names and name != obj.customname:
raise GameException('Name already in use.')
obj.customname = name
return obj.customname
renameFleet.public = 1
renameFleet.accLevel = AL_FULL
def removeFleetName(self, tran, obj):
obj.customname = None
return obj.name
removeFleetName.public = 1
removeFleetName.accLevel = AL_FULL
def setMergeState(self, tran, obj, state):
if not state in [0,1,2]:
raise GameException('Bad join fleet state.') #should we log this? Probably don't need to.
obj.allowmerge = state
return obj.allowmerge
setMergeState.public = 1
setMergeState.accLevel = AL_FULL
def update(self, tran, obj):
if not (hasattr(obj,'customname')): #added in 0.5.64
obj.customname = None
obj.allowmerge = 1
# if there are no ships -> disband fleet
if not len(obj.ships) or obj.owner == OID_NONE:
log.warning(obj.oid, "FLEET - no ships in the fleet -- disbanding")
self.cmd(obj).disbandFleet(tran, obj)
return
# check for duplicates (TODO: remove me, bug was fixed)
#for ship1 in obj.ships:
# duplicates = 0
# for ship2 in obj.ships:
# if ship1 is ship2:
# duplicates += 1
# if duplicates != 1:
# # regenerate ships
# newShips = []
# for designID, hp, shield, exp in obj.ships:
# newShips.append([designID, hp, shield, exp])
# obj.ships = newShips
# raise ServerException("Ship duplicates in %s" % obj)
#
obj.origScannerPwr = 0
obj.operEn = 0
obj.operProd = 0.0
obj.maxEn = 0
obj.maxSpeed = 999999.9
obj.combatPwr = 0
obj.isMilitary = 0
#ships = {}
# find
player = tran.db.get(obj.owner, None)
if not player or player.type not in PLAYER_TYPES or obj.oid not in player.fleets:
# disband fleet when owner is invalid
log.warning(obj.oid, "Disbanding fleet - invalid owner", obj)
self.cmd(obj).disbandFleet(tran, obj)
return
obj.signature = 0
remove = []
idx = 0
for designID, hp, shield, exp in obj.ships:
if designID in player.shipDesigns:
tech = player.shipDesigns[designID]
obj.origScannerPwr = max(tech.scannerPwr, obj.origScannerPwr)
obj.operEn += tech.operEn
obj.operProd += tech.buildProd * Rules.operProdRatio
obj.maxEn += tech.storEn
obj.maxSpeed = min(obj.maxSpeed, tech.speed)
obj.signature += tech.signature
obj.combatPwr += int(tech.combatPwr * float(hp + shield) / (tech.maxHP + tech.shieldHP))
obj.isMilitary = obj.isMilitary or tech.isMilitary
#ships[tech.signature] = ships.get(tech.signature, 0) + 1
if obj.ships[idx][1] > tech.maxHP:
log.debug(obj.oid, "Too high maxHP for ship, player", obj.owner)
obj.ships[idx][1] = min(obj.ships[idx][1], tech.maxHP)
else:
# TODO track this problem
log.warning("Player has not this designID", player.oid, designID)
remove.append([designID, hp, shield, exp])
idx += 1
# delete ships intended for removal
for shipSpec in remove:
obj.ships.remove(shipSpec)
# misc
obj.signature = min(obj.signature, Rules.maxSignature)
obj.signature = max(obj.signature,1) #require fleet signature to be at least 1 now that we removed that from a per-ship basis
obj.speed = obj.maxSpeed
# storage
obj.storEn = min(obj.storEn, obj.maxEn)
# sort ships only when there is no combat
# this prevents resorting fleets in combat
if obj.combatCounter == 0:
obj.ships = ShipUtils.sortShips(obj.ships)
else:
log.debug("Skipping ship (re)sorting [fleet in combat]", obj.oid)
# closest system
if not tran.db.has_key(obj.closeSystem) or tran.db[obj.closeSystem].type not in (T_SYSTEM, T_WORMHOLE):
if obj.orbiting == OID_NONE:
log.debug("No close system for fleet", obj.oid)
# select any system
systemID = tran.db[tran.db[OID_UNIVERSE].galaxies[0]].systems[0]
obj.closeSystem = systemID
log.debug(obj.oid, "Setting NULL close system to", systemID)
else:
log.debug(obj.oid, "Generating close system from orbiting", obj.orbiting)
obj.closeSystem = obj.orbiting
system = tran.db[obj.closeSystem]
if obj.oid not in system.closeFleets:
system.closeFleets.append(obj.oid)
# verify close system
if tran.db.has_key(obj.closeSystem):
system = tran.db[obj.closeSystem]
if system.type in (T_SYSTEM, T_WORMHOLE):
if obj.oid not in system.closeFleets:
log.debug("Adding fleet", obj.oid, "into closeFleets of", system.oid)
system.closeFleets.append(obj.oid)
else:
log.debug(obj.oid, "Close system is not a system")
obj.closeSystem = OID_NONE
else:
log.debug(obj.oid, "Close system does not exists")
obj.closeSystem = OID_NONE
# compute scanner pwr
if obj.closeSystem:
system = tran.db[obj.closeSystem]
emrLevel = tran.db[system.compOf].emrLevel
obj.scannerPwr = int(obj.origScannerPwr * (2.0 - emrLevel))
# replace obsolete commands
for actionTuple in obj.actions[:]:
try:
action, target, actionData = actionTuple
except:
log.warning(obj.oid, "Removing action", actionTuple)
obj.actions.remove(actionTuple)
index = 0
for action, target, actionData in obj.actions:
if action >= 2 and action <= 100:
# this is an old action -> replace it by move command if available
if target != OID_NONE:
log.debug(obj.oid, "Replacing action", action, "by action MOVE")
obj.actions[index][0] = FLACTION_MOVE
else:
# replace by none action
log.debug(obj.oid, "Replacing action", action, "by action NONE")
obj.actions[index] = (FLACTION_NONE, None, None)
if action == FLACTION_DEPLOY and actionData not in player.shipDesigns:
# deployment of scrapped ship
log.debug(obj.oid, "invalid ship to deploy")
obj.actions[index] = (FLACTION_NONE, None, None)
index += 1
update.public = 0
def getScanInfos(self, tran, obj, scanPwr, player):
if obj.owner == player.oid:
return []
if scanPwr >= Rules.level1InfoScanPwr:
result = IDataHolder()
result._type = T_SCAN
result.scanPwr = scanPwr
result.oid = obj.oid
result.x = obj.x
result.y = obj.y
result.oldX = obj.oldX
result.oldY = obj.oldY
result.eta = obj.eta
result.signature = obj.signature
result.type = obj.type
result.orbiting = obj.orbiting
if obj.orbiting == OID_NONE and obj.actionIndex < len(obj.actions):
target = obj.actions[obj.actionIndex][1]
targetObj = tran.db[target]
if targetObj.type == T_PLANET:
result.target = targetObj.compOf
else:
result.target = target
else:
return []
if scanPwr >= Rules.level2InfoScanPwr:
result.owner = obj.owner
if obj.customname:
result.name = obj.customname
else:
result.name = obj.name
if scanPwr >= Rules.level3InfoScanPwr:
result.isMilitary = obj.isMilitary
result.combatPwr = obj.combatPwr
if scanPwr >= Rules.level4InfoScanPwr:
# provide less information
result.shipScan = {}
owner = tran.db[obj.owner]
for designID, hp, shield, exp in obj.ships:
tech = owner.shipDesigns[designID]
key = tech.name, tech.combatClass, tech.isMilitary
result.shipScan[key] = result.shipScan.get(key, 0) + 1
if scanPwr >= Rules.partnerScanPwr:
result.scannerPwr = obj.scannerPwr
result.allowmerge = obj.allowmerge
result.customname = obj.customname
result.name = obj.name
return [result]
def addAction(self, tran, obj, index, action, targetID, aData):
# check if target is valid
if action == FLACTION_REDIRECT:
if targetID != OID_NONE:
raise GameException("This command has no target.")
elif action == FLACTION_WAIT or action == FLACTION_REPEATFROM:
if targetID != OID_NONE:
raise GameException("This command has no target.")
aData = int(aData)
if aData < 0:
raise GameException("Number equal or larger than 1 must be specified.")
elif action == FLACTION_DECLAREWAR:
if targetID != OID_NONE:
raise GameException("This command has no target.")
if aData == OID_NONE or aData == obj.owner:
raise GameException("Invalid commander.")
else:
target = tran.db[targetID]
if target.type not in (T_SYSTEM, T_WORMHOLE, T_PLANET):
raise GameException('Can target wormholes, systems or planets only.')
if action == FLACTION_ENTERWORMHOLE and target.type != T_WORMHOLE:
raise GameException('Can only traverse wormholes.')
if action == FLACTION_DEPLOY and target.type != T_PLANET:
raise GameException('Can build on/colonize planets only.')
if len(obj.actions) + 1 > Rules.maxCmdQueueLen:
raise GameException('Too many commands in the queue.')
#validate that the target is in the fleet owner's galaxy
if target.type == T_PLANET:
systemID = target.compOf
else:
systemID = targetID
owner = tran.db[obj.owner]
# validate that the player has actually scanned this system
if systemID not in owner.validSystems:
raise GameException('You cannot find this system (never scanned).')
if not owner.galaxies:
raise GameException('The fleet owner is not in a galaxy.')
galaxy = tran.db[owner.galaxies[0]]
if systemID not in galaxy.systems:
raise GameException('The target system is not in your galaxy.')
obj.actions.insert(index, (action, targetID, aData))
if index <= obj.actionIndex:
obj.actionIndex += 1
if obj.actionIndex >= len(obj.actions) or obj.actionIndex < 0:
obj.actionIndex = min(index, len(obj.actions) - 1)
return obj.actions, obj.actionIndex
addAction.public = 1
addAction.accLevel = AL_FULL
def deleteAction(self, tran, obj, index):
if index >= len(obj.actions) or index < 0:
raise GameException('Index out of bounds.')
if index == obj.actionIndex and obj.orbiting == OID_NONE:
if obj.actions[index][0] == FLACTION_MOVE:
raise GameException('Move command in progress cannot be deleted.')
else:
# convert action to the move command
action, targetID, aData = obj.actions[index]
obj.actions[index] = (FLACTION_MOVE, targetID, aData)
return obj.actions, obj.actionIndex
if index == obj.actionIndex and obj.actions[index][0] == FLACTION_WAIT:
# reset wait counters
obj.actionWaitCounter = 1
del obj.actions[index]
if index <= obj.actionIndex and obj.actionIndex > 0:
obj.actionIndex -= 1
return obj.actions, obj.actionIndex
deleteAction.public = 1
deleteAction.accLevel = AL_FULL
def setActionIndex(self, tran, obj, index):
if index >= len(obj.actions) or index < 0:
raise GameException('Index out of bounds.')
if obj.orbiting == OID_NONE:
raise GameException('Move command in progress cannot be changed.')
obj.actionIndex = index
return obj.actionIndex
setActionIndex.public = 1
setActionIndex.accLevel = AL_FULL
def moveAction(self, tran, fleet, index, rel):
if index >= len(fleet.actions):
raise GameException('No such item in the command list.')
if index + rel < 0 or index + rel >= len(fleet.actions):
raise GameException('Cannot move.')
if index == fleet.actionIndex:
raise GameException('Cannot move active command.')
if index < fleet.actionIndex:
raise GameException('Cannot move processed command.')
if index + rel <= fleet.actionIndex:
raise GameException('Cannot move before active command.')
action = fleet.actions[index]
del fleet.actions[index]
fleet.actions.insert(index + rel, action)
return fleet.actions
moveAction.public = 1
moveAction.accLevel = AL_FULL
def clearProcessedActions(self, tran, fleet):
if fleet.actionIndex <= 0:
return (fleet.actions, fleet.actionIndex)
for actionIdx in range(0, fleet.actionIndex):
del fleet.actions[0]
fleet.actionIndex = 0
return (fleet.actions, fleet.actionIndex)
clearProcessedActions.public = 1
clearProcessedActions.accLevel = AL_FULL
def processACTIONPhase(self, tran, obj, data):
#@log.debug("Fleet", obj.oid, "ACTION")
# update fleet data
self.cmd(obj).update(tran, obj)
# consume support
if obj.storEn >= obj.operEn:
obj.storEn -= obj.operEn
# refuel
refuelled = self.cmd(obj).refuelAndRepairAndRecharge(tran, obj)
else:
# try to refuel fleet
refuelled = self.cmd(obj).refuelAndRepairAndRecharge(tran, obj)
# there is not enought support -> damage ships
log.debug('IFleet', 'No support - damaging ships in fleet', obj.oid)
index = 0
player = tran.db[obj.owner]
destroyed = []
for designID, hp, shield, exp in obj.ships:
spec = player.shipDesigns[designID]
operEn = spec.operEn
if obj.storEn >= spec.operEn:
#@log.debug('IFleet', 'Ship SUPPORT OK', shipTechID)
obj.storEn -= spec.operEn
elif obj.storEn > 0:
# consume remaining fuel
obj.storEn = 0
else:
# apply damage
dmg = max(int(spec.maxHP * Rules.shipDecayRatio), 1)
if dmg >= hp:
destroyed.append(obj.ships[index])
else:
obj.ships[index][SHIP_IDX_HP] -= dmg
index += 1
self.cmd(obj).removeShips(tran, obj, destroyed)
# if fleet has been destroyed -> abort action processing and send message
if not tran.db.has_key(obj.oid):
if obj.orbiting:
system = tran.db[obj.orbiting]
Utils.sendMessage(tran, player, MSG_FUEL_LOST_ORBITING, system.oid, (obj.name, system.oid))
else:
action, target, actionData = obj.actions[obj.actionIndex]
Utils.sendMessage(tran, player, MSG_FUEL_LOST_FLYING, target, (obj.name, target))
log.debug('IFleet', obj.oid, 'fleet destroyed')
return
# upgrade ships
if obj.orbiting != OID_NONE:
# autoRepair is part of serviceShips
self.cmd(obj).serviceShips(tran, obj)
# record scanner into system scanner overview
system = tran.db[obj.orbiting]
system.scannerPwrs[obj.owner] = max(obj.scannerPwr, system.scannerPwrs.get(obj.owner, 0))
# ACTIONS
if Utils.isIdleFleet(obj):
#@log.debug('IFleet', obj.oid, 'fleet idle')
# reset retreat counter
obj.combatRetreatWait = 0
# reset last position to current position
obj.oldX = obj.x
obj.oldY = obj.y
# there is nothing to do - try to join other fleets
self.cmd(obj).joinFleet(tran, obj, OID_NONE)
return
#@log.debug('IFleet', obj.oid, 'processing action', action)
while not Utils.isIdleFleet(obj):
action, target, actionData = obj.actions[obj.actionIndex]
if action == FLACTION_NONE:
obj.actionIndex += 1
elif action == FLACTION_DEPLOY:
if self.cmd(obj).actionDeploy(tran, obj):
obj.actionIndex += 1
break
elif action == FLACTION_WAIT:
if obj.actionWaitCounter >= actionData:
obj.actionWaitCounter = 1
obj.actionIndex += 1
else:
obj.actionWaitCounter += 1
break #wait should wait, not let move; deindented this to act for completed waits also --RC
elif action == FLACTION_MOVE:
if self.cmd(obj).moveToTarget(tran, obj, target):
# we are there
obj.actionIndex += 1
break
elif action == FLACTION_ENTERWORMHOLE:
if self.cmd(obj).moveToWormhole(tran, obj, target):
# we are there
obj.actionIndex += 1
break
elif action == FLACTION_DECLAREWAR:
# switch off pact allow military ships
player = tran.db[obj.owner]
self.cmd(player).changePactCond(tran, player, actionData,
PACT_ALLOW_MILITARY_SHIPS, PACT_OFF, [PACT_ALLOW_MILITARY_SHIPS])
# next action
obj.actionIndex +=1
elif action == FLACTION_REFUEL:
# check current refuel level
if self.cmd(obj).moveToTarget(tran, obj, target) and refuelled:
# next action
obj.actionIndex += 1
else:
break
elif action == FLACTION_REDIRECT:
# ok, let's do some magic
if self.cmd(obj).actionRedirect(tran, obj, refuelled):
obj.actionIndex += 1
else:
break
elif action == FLACTION_REPEATFROM:
log.debug(obj.oid, "Setting action index to", data)
if actionData != None:
obj.actionIndex = actionData
else:
obj.actionIndex += 1
break # TODO fix me
else:
raise GameException('Unsupported action %d' % action)
break
# it there is nothing to do -> join other idle fleets
# the fleet could joined with another fleet
if tran.db.has_key(obj.oid) and Utils.isIdleFleet(obj):
# reset retreat counter
obj.combatRetreatWait = 0
# try to join some fleet
self.cmd(obj).joinFleet(tran, obj, OID_NONE)
processACTIONPhase.public = 1
processACTIONPhase.accLevel = AL_ADMIN
def actionRedirect(self, tran, obj, refuelled):
if obj.orbiting != OID_NONE:
# try to find fleet with the redirect command (<10 ships)
# and join it
system = tran.db[obj.orbiting]
for fleetID in system.fleets:
fleet = tran.db[fleetID]
if fleet.owner != obj.owner or obj.oid == fleetID:
continue
if Utils.isIdleFleet(fleet):
continue
action, target, actionData = fleet.actions[fleet.actionIndex]
# same command, less than 20 ships in the resulting fleet
if action == FLACTION_REDIRECT and len(fleet.ships) + len(obj.ships) <= 20:
# join it
log.debug("JOINING", obj.oid, fleetID)
self.cmd(obj).joinFleet(tran, obj, fleetID)
# "join" targets
fleet.actions[fleet.actionIndex] = (
action,
max(obj.actions[obj.actionIndex][1], target),
actionData,
)
return 0
# move?
action, target, actionData = obj.actions[obj.actionIndex]
if obj.orbiting == OID_NONE or target != OID_NONE:
# ok, the target was already selected
if not self.cmd(obj).moveToTarget(tran, obj, target):
# keep moving
return 0
# we are in the system - delete target
obj.actions[obj.actionIndex] = (action, OID_NONE, actionData)
# check if current system has a redirection
player = tran.db[obj.owner]
if obj.orbiting not in player.shipRedirections:
# there is no redirection, we are done
return 1
# select a new target if tanks are full
# departure every 6th turn
turn = tran.db[OID_UNIVERSE].turn
if refuelled and turn % 6 == 0:
obj.actions[obj.actionIndex] = (action, player.shipRedirections[obj.orbiting], actionData)
return 0
# old code
# check if current system has any redirection
player = tran.db[obj.owner]
if obj.orbiting not in player.shipRedirections:
return 1
# form new command queue
obj.actions = [
[FLACTION_REFUEL, player.shipRedirections[obj.orbiting], None],
[FLACTION_REDIRECT, OID_NONE, None],
]
obj.actionIndex = 0
return 0
actionRedirect.public = 0
def actionDeploy(self, tran, obj):
action, target, actionData = obj.actions[obj.actionIndex]
if not self.cmd(obj).moveToTarget(tran, obj, target):
return 0
# deploy ship
log.debug('IFleet', 'Deploying on planet - tech', actionData)
planet = tran.db[target]
player = tran.db[obj.owner]
# find ship containing specified building
for designID, hp, shield, exp in obj.ships:
tech = player.shipDesigns[designID]
if designID == actionData:
removeShip = 0
for deployHandlerID in tech.deployHandlers: #do handlers first so that structures can deploy on new planets
if not (type(deployHandlerID) in (str,int,long)): #just a double check...
continue
if not deployHandlerID.isdigit():
continue
log.debug('IFleet -', 'Attempting deploy of',deployHandlerID)
try:
deployHandlerID = int(deployHandlerID) #just a double check...
except:
log.warning('IFleet -','Deployment failed: NAN')
continue
deployHandler = Rules.techs[deployHandlerID]
if deployHandler.deployHandlerValidator(tran, obj, planet, deployHandler):
try:
deployHandler.deployHandlerFunction(tran, obj, planet, deployHandler)
Utils.sendMessage(tran, obj, MSG_DELOY_HANDLER, planet.oid, deployHandlerID)
removeShip = 1
except GameException, e:
log.warning('IFleet -','Deploy handler error - internal error')
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_SHLOST, planet.oid, None)
else:
log.debug('IFleet -', 'Deploy handler - validation failed')
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_SHLOST, planet.oid, None)
for structTechID in tech.deployStructs:
if not (type(structTechID) in (int,long)): #just a double check...
continue
structTech = Rules.techs[structTechID]
# validate
if structTech.validateConstrHandler(tran, obj, planet, structTech):
# build it
if len(planet.slots) < planet.plSlots:
try:
structTech.finishConstrHandler(tran, obj, planet, structTech)
planet.slots.insert(0, Utils.newStructure(tran, structTechID, obj.owner))
removeShip = 1
Utils.sendMessage(tran, obj, MSG_COMPLETED_STRUCTURE, planet.oid, structTech.id)
except GameException, e:
# cannot build (planet already occupied?)
log.warning('IFleet -', 'Build on planet - cannot complete')
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_SHLOST, planet.oid, None)
else:
# no free slot
log.debug('IFleet -', 'Build on planet - no free slot')
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_NOSLOT, planet.oid, None)
else:
# cannot build this here TODO report it
log.debug('IFleet -', 'Build on planet - cannot build here (validation)')
if removeShip:
self.cmd(obj).removeShips(tran, obj, [[designID, hp, shield, exp]])
# ship has been deployed
return 1
# no suitable ship in fleet TODO report it
log.debug('IFleet -', 'Deploy ship - no suitable ship')
return 1
actionDeploy.public = 0
def refuelAndRepairAndRecharge(self, tran, obj):
if obj.orbiting == OID_NONE:
# we are in space
return 0
# find ALLIED PLANETS
system = tran.db[obj.orbiting]
player = tran.db[obj.owner]
refuelMax = 0
refuelInc = 0
repairShip = 0.0
for planetID in system.planets:
planet = tran.db[planetID]
if planet.owner == OID_NONE:
continue
if planet.owner == player.oid:
refuelMax = max(refuelMax, planet.refuelMax)
refuelInc = max(refuelInc, planet.refuelInc)
repairShip = max(repairShip, planet.repairShip)
elif self.cmd(player).isPactActive(tran, player, planet.owner, PACT_ALLOW_TANKING):
refuelMax = max(refuelMax, planet.refuelMax)
refuelInc = max(refuelInc, planet.refuelInc)
repairShip = max(repairShip, planet.repairShip)
# repair ships
self.cmd(obj).autoRepairAndRecharge(tran, obj, forceRepairPerc = repairShip)
# tank
if refuelMax == 0:
return 1
currentLevel = int(100.0 * obj.storEn / obj.maxEn)
#@log.debug(obj.oid, "Refuel", currentLevel, refuelMax)
if currentLevel >= refuelMax:
# don't burn any fuel if you can refuel
obj.storEn = min(obj.maxEn, obj.storEn + obj.operEn)
return 1
obj.storEn = min(
int(math.ceil(obj.maxEn * refuelInc / 100.0 + obj.operEn + obj.storEn)),
int(math.ceil(obj.maxEn * refuelMax / 100.0)),
obj.maxEn,
)
#@log.debug("Refuelling", obj.oid, refuelInc, refuelMax)
currentLevel = 100.0 * obj.storEn / obj.maxEn
#@log.debug(obj.oid, "After refuel", currentLevel, refuelMax)
#@log.debug(obj.oid, "Tanks after refuel", obj.storEn, "/", obj.maxEn)
return currentLevel >= refuelMax
refuelAndRepairAndRecharge.public = 0
def serviceShips(self, tran, obj):
player = tran.db[obj.owner]
# check conditions
# no combat in the system
system = tran.db[obj.orbiting]
if system.combatCounter != 0:
return
# player's or ally's planet in the system and upgrade facility there
# check for train facilities too
upgrPlanets = []
trainPlanets = []
trainShipInc = 0.0
trainShipMax = 0
for planetID in system.planets:
planet = tran.db[planetID]
if planet.owner == player.oid and planet.upgradeShip > 0:
upgrPlanets.append(planet)
elif self.cmd(player).isPactActive(tran, player, planet.owner, PACT_ALLOW_TANKING) and planet.upgradeShip > 0:
upgrPlanets.append(planet)
if planet.owner == player.oid and planet.trainShipInc > 0.0:
trainShipInc = max(trainShipInc, planet.trainShipInc)
trainShipMax = max(trainShipMax, planet.trainShipMax)
# train ships
if trainShipInc > 0:
for index, ship in enumerate(obj.ships):
spec = player.shipDesigns[ship[SHIP_IDX_DESIGNID]]
if ship[SHIP_IDX_EXP] / spec.baseExp < trainShipMax and spec.isMilitary:
ship[SHIP_IDX_EXP] = min(
spec.baseExp * trainShipMax,
ship[SHIP_IDX_EXP] + max(int(trainShipInc * spec.baseExp), 1),
)
if not upgrPlanets:
# no service facility
return
upgraded = 0
# perform upgrade
for designID in player.shipDesigns.keys():
spec = player.shipDesigns[designID]
if spec.upgradeTo:
#@log.debug("Upgrading design", designID, "to", spec.upgradeTo, "for player", player.oid)
upgradeToSpec = player.shipDesigns[spec.upgradeTo]
player.fleetUpgradeInProgress = 1
diff = max(
Rules.shipMinUpgrade,
int((upgradeToSpec.buildProd - spec.buildProd) * Rules.shipUpgradeMod),
)
if player.fleetUpgradePool < diff:
continue
# scan all ships for design
designExists = 0
for index in xrange(0, len(obj.ships)):
if obj.ships[index][SHIP_IDX_DESIGNID] == designID:
# find planet with free upgrade points
needsUPts = Rules.shipUpgradePts[upgradeToSpec.combatClass]
planet = None
for tmpPlanet in upgrPlanets:
if tmpPlanet.upgradeShip >= needsUPts:
planet = tmpPlanet
break
if not planet:
break
# check strategic resources
neededSR = {}
# new design
for sr in upgradeToSpec.buildSRes:
if not sr in neededSR:
neededSR[sr] = 0
neededSR[sr] += 1
# old design
for sr in spec.buildSRes:
if not sr in neededSR:
neededSR[sr] = 0
neededSR[sr] -= 1
# check player's resources
ok = 1
for sr in neededSR:
if player.stratRes.get(sr, 0) < neededSR[sr]:
Utils.sendMessage(tran, obj, MSG_CANNOT_UPGRADE_SR, obj.oid, (spec.name, upgradeToSpec.name, sr))
# skip this ship
ok = 0
if not ok:
# skip this ship
break
# consume strategic resources
for sr in neededSR:
player.stratRes[sr] -= neededSR[sr]
# upgrade ship
log.debug("Upgrading ship in fleet", obj.oid, needsUPts, planet.upgradeShip, planet.oid)
maxHPRatio = max(0.01, 1.0 - max(upgradeToSpec.buildProd - spec.buildProd, 0) / float(upgradeToSpec.buildProd))
obj.ships[index][SHIP_IDX_DESIGNID] = spec.upgradeTo
obj.ships[index][SHIP_IDX_HP] = max(1, min(
obj.ships[index][1],
int(upgradeToSpec.maxHP * maxHPRatio)
))
obj.ships[index][SHIP_IDX_SHIELDHP] = upgradeToSpec.shieldHP
# cap max experience based on equivilent percentage of experience transfer (prevent high baseExp ship upgrading to low baseExp ships with a higher bonus)
obj.ships[index][SHIP_IDX_EXP] = min(obj.ships[index][SHIP_IDX_EXP],int(1.0 * upgradeToSpec.baseExp / spec.baseExp * obj.ships[index][SHIP_IDX_EXP]))
upgraded += 1
#@log.debug("HP penalty", diff, upgradeToSpec.buildProd, maxHPRatio)
player.fleetUpgradePool -= diff
designExists = 1
# consume upgrade points
planet.upgradeShip -= needsUPts
# record last upgrade
obj.lastUpgrade = tran.db[OID_UNIVERSE].turn
# send a message to the player
# Utils.sendMessage(tran, obj, MSG_UPGRADED_SHIP, obj.oid, (spec.name, player.shipDesigns[spec.upgradeTo].name))
if player.fleetUpgradePool < diff:
break
if player.fleetUpgradePool < diff:
break
# fix fleet stats
if upgraded > 0:
self.cmd(obj).update(tran, obj)
serviceShips.public = 0
def autoRepairAndRecharge(self, tran, obj, forceRepairPerc = 0.0):
player = tran.db[obj.owner]
idx = 0
for designID, hp, shields, exp in obj.ships:
spec = player.shipDesigns[designID]
if hp < spec.maxHP:
if obj.storEn == 0:
repairFix = 0
repairPerc = forceRepairPerc
else:
repairFix = spec.autoRepairFix
repairPerc = max(spec.autoRepairPerc, forceRepairPerc)
if repairFix > 0 or repairPerc > 0:
#&log.debug("IFleet - repairing ship", obj.oid, designID, hp, repairFix, repairPerc)
obj.ships[idx][SHIP_IDX_HP] = int(min(
spec.maxHP,
hp + repairFix + max(1, spec.maxHP * repairPerc),
))
if shields < spec.shieldHP and obj.storEn:
#@log.debug("IFleet - recharging shields", designID, shields, spec.shieldRechargeFix, spec.shieldRechargePerc)
obj.ships[idx][SHIP_IDX_SHIELDHP] = int(min(
spec.shieldHP,
shields + spec.shieldRechargeFix + max(1, spec.shieldHP * spec.shieldRechargePerc),
))
idx += 1
autoRepairAndRecharge.public = 0
def moveToWormhole(self, tran, obj, targetID):
origin = tran.db[targetID]
if not (obj.x==origin.x and obj.y==origin.y):
if not self.cmd(obj).moveToTarget(tran, obj, targetID):
return 0 #ship hasn't arrived
# enter wormhole
if origin.type == T_WORMHOLE: #is wormhole, now enter it!
destinationWormHole = tran.db[origin.destinationOid]
if destinationWormHole.oid == targetID:
return 1
if obj.oid not in destinationWormHole.fleets:
destinationWormHole.fleets.append(obj.oid)
if obj.oid not in destinationWormHole.closeFleets:
destinationWormHole.closeFleets.append(obj.oid)
if obj.oid in origin.fleets:
origin.fleets.remove(obj.oid)
if obj.oid in origin.closeFleets:
origin.closeFleets.remove(obj.oid)
obj.closeSystem = destinationWormHole.oid
log.debug('IFleet', 'Entering Wormhole - destination ', destinationWormHole.oid)
obj.orbiting = destinationWormHole.oid
obj.x = destinationWormHole.x
obj.y = destinationWormHole.y
destinationWormHole.scannerPwrs[obj.owner] = max(obj.scannerPwr, destinationWormHole.scannerPwrs.get(obj.owner, 0))
Utils.sendMessage(tran, obj, MSG_ENTERED_WORMHOLE, destinationWormHole.oid , (origin.name,destinationWormHole.name))
arrived = 1
else: #is not wormhole...how'd you ever execute this command? Or is there some weird "terraform wormhole" technology we never forsaw?
log.warning('IFleet', 'Cannot enter non-existant wormhole at location ', origin.oid)
#Utils.sendMessage(tran, obj, MSG_ENTERED_WORMHOLE, destinationWormHole.oid , (origin.name,destinationWormHole.name))
arrived = 1 #since the move part was successful, just ignore this problem for the player
return arrived
moveToWormhole.public = 0
def moveToTarget(self, tran, obj, targetID): #added action passthrough for wormhole move...needed
# DON'T move fleet with speed == 0
if obj.speed <= 0:
# they cannot arive (never)
# reset retreat counter
obj.combatRetreatWait = 0
return 1
if targetID == OID_NONE:
# reset retreat counter
obj.combatRetreatWait = 0
return 1
# reset/remember old values
obj.oldX = obj.x
obj.oldY = obj.y
obj.eta = 0.0
target = tran.db[targetID]
obj.target = targetID
# MOVE to target
dx = target.x - obj.x
dy = target.y - obj.y
#if dx == 0 and dy == 0:
# return 1
if obj.orbiting:
system = tran.db[obj.orbiting]
if system.combatCounter > 0:
# well, there is a combat there -> wait a while and reduce ROF
obj.combatRetreatWait += 1
if obj.combatRetreatWait <= Rules.combatRetreatWait:
return 0
# ok, we suffered enough, move away
# reset counter
obj.combatRetreatWait = 0
# speed boost?
obj.speedBoost = Utils.getSpeedBoost(tran, tran.db[obj.owner], (system, target))
#
try:
system.fleets.remove(obj.oid)
except ValueError:
log.warning('IFleet', 'Problem with removing fleet from system.')
obj.orbiting = OID_NONE
# change close system to target one
if obj.closeSystem != OID_NONE: # TODO remove condition in 0.6
system = tran.db[obj.closeSystem]
try:
system.closeFleets.remove(obj.oid)
except ValueError:
log.warning("IFleet", "Problem with changing the close system.")
if target.type == T_PLANET:
system = tran.db[target.compOf]
system.closeFleets.append(obj.oid)
obj.closeSystem = system.oid
elif target.type in (T_SYSTEM, T_WORMHOLE):
target.closeFleets.append(obj.oid)
obj.closeSystem = target.oid
else:
raise GameException('Unsupported type of target %d for move command.' % target.type)
dist = math.hypot(dx, dy)
maxDelta = obj.speed / Rules.turnsPerDay * obj.speedBoost
if not maxDelta:
obj.combatRetreatWait = 0
return 0
arrived = 0
# 0.01 acceptable error
if dist <= maxDelta + 0.01:
# we are at destination
obj.x = target.x
obj.y = target.y
if target.type == T_PLANET:
obj.orbiting = target.compOf
system = tran.db[obj.orbiting]
system.fleets.append(obj.oid)
arrived = 1
elif target.type == T_SYSTEM or target.type == T_WORMHOLE:
#@log.debug('IFleet', obj.oid, 'is aproaching orbit of', targetID)
obj.orbiting = target.oid
system = tran.db[obj.orbiting]
system.fleets.append(obj.oid)
#@log.debug('IFleet', system.oid, 'system fleets', system.fleets)
arrived = 1
else:
raise GameException('Unsupported type of target %d for move command.' % target.type)
else:
# move
obj.x += dx / dist * maxDelta
obj.y += dy / dist * maxDelta
# (already moved 1 x maxDelta) (0.01 is acceptable error)
obj.eta = math.ceil(dist / maxDelta - 1 - 0.01)
if arrived:
obj.target = OID_NONE
# just make sure that this is reset
obj.combatRetreatWait = 0
# turn scanner on
obj.scannerOn = True
# check the speed boost
speedBoost = Utils.getSpeedBoost(tran, tran.db[obj.owner], (system,))
if speedBoost < obj.speedBoost:
# damage all ships in the fleet
# damage is based on percentual difference
percHull = 1.0 - Rules.starGateDamage * (obj.speedBoost / speedBoost - 1.0)
log.debug(obj.oid, "fleet speed boost too low - damaging ships", speedBoost, obj.speedBoost, percHull)
Utils.sendMessage(tran, obj, MSG_DAMAGE_BY_SG, obj.orbiting, int((1.0 - percHull) * 100))
for ship in obj.ships:
ship[SHIP_IDX_HP] = max(1, int(ship[SHIP_IDX_HP] * percHull))
# TODO: send message to player
obj.speedBoost = 1.0
# add ship to the scanner pwrs of the system
system.scannerPwrs[obj.owner] = max(obj.scannerPwr, system.scannerPwrs.get(obj.owner, 0))
return arrived
moveToTarget.public = 0
def processFINALPhase(self, tran, obj, data):
# stats
player = tran.db[obj.owner]
player.stats.fleetPwr += obj.combatPwr
player.stats.fleetSupportProd += obj.operProd
#
galaxyID = tran.db[obj.closeSystem].compOf
if galaxyID not in player.galaxies:
player.galaxies.append(galaxyID)
processFINALPhase.public = 1
processFINALPhase.accLevel = AL_ADMIN
##
## Combat related functions
##
def getPreCombatData(self, tran, obj):
# compute data
shots = {0: [], 1: [], 2: [], 3: []}
targets = [0, 0, 0, 0]
player = tran.db[obj.owner]
desCount = {}
firing = False
rofMod = 1.0
# limit number of shots per ship
obj.maxHits = {0: 0, 1: 0, 2: 0, 3: 0}
obj.hitCounters = {0: 0, 1: 0, 2: 0, 3: 0}
obj.lastHitClass = 3
obj.hitMods = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0}
if obj.combatRetreatWait > 0:
# ROF penalty
#@log.debug(obj.oid, "Fleet inactive", obj.combatRetreatWait)
rofMod *= 0.33
if obj.storEn == 0:
rofMod *= 0.33
for designID, hp, shield, exp in obj.ships:
tech = player.shipDesigns[designID]
targets[tech.combatClass] += 1
desCount[designID] = desCount.get(designID, 0) + 1
obj.maxHits[tech.combatClass] += 2
wpnCount = {}
for weaponID in tech.weaponIDs:
firing = True
weapon = Rules.techs[weaponID]
wpnCount[weaponID] = wpnCount.get(weaponID, 0) + 1
#
weaponEff = Rules.techImprEff[player.techs.get(weaponID, Rules.techBaseImprovement)]
# base attack
attack = (tech.combatAtt + int(weapon.weaponAtt * weaponEff)) * tech.combatAttMultiplier #added multiplier part
# correct using ship's level
level = Rules.shipExpToLevel.get(int(exp / tech.baseExp), Rules.shipDefLevel)
attack = int(attack * Rules.shipLevelEff[level])
# because ALL counters starts at 1, subtract 3
count = obj.combatCounter + desCount[designID] + wpnCount[weaponID] - 3
# add to attacks
#@log.debug('IFleet', obj.oid, designID, "Count", count, 'Shots', weapon.name, ShipUtils.getRounds(weapon.weaponROF, count))
for round in xrange(0, ShipUtils.getRounds(weapon.weaponROF * rofMod, count)):
shots[weapon.weaponClass].append((attack, weaponID))
log.debug(obj.oid, "Combat limit settings", obj.maxHits)
return shots, targets, firing
getPreCombatData.public = 0
def applyMine(self, tran, obj, attack, damage, ignoreshield):
player = tran.db[obj.owner]
targetindex = random.randrange(0,len(obj.ships))
designID, hp, shield, exp = obj.ships[targetindex]
targetShip = player.shipDesigns[designID]
level = Rules.shipExpToLevel.get(int(exp / targetShip.baseExp), Rules.shipDefLevel)
defense = int(targetShip.missileDef * Rules.shipLevelEff[level])
#determine damage:
defenseBase = 4 #normal enemy defense to use as part of the ratio
damageRatio = min(max(1.0*(attack + defenseBase) / (attack + defense),0.25),1.25) #the better the defense, the less damage you take from the mine: 25% to 125% damage of normal mine
damage = int(damage * damageRatio)
if not damage:
return 0,0 #mine did no damage due to low ATT value on mine
#do damage:
destroyed = 0
blocked = 0
if not ignoreshield and shield > 0:
blocked = min(shield, damage)
obj.ships[targetindex][2] -= blocked
damage -= blocked
elif ignoreshield and targetShip.hardShield > 0 and shield > 0:
blocked = min(shield, int(damage*(ship.hardShield))) #hard shields also reduce penetrating weapons
obj.ships[targetindex][2] -= blocked
damage -= blocked
if shield: #mines never pierce shields at this time; possible future expansion of the tech
blocked = min(shield, damage)
damage -= blocked
obj.ships[targetindex][2] -= blocked
if damage > 0:
if hp < damage:
damage = hp
destroyed = 1
self.cmd(obj).removeShips(tran, obj, [obj.ships[targetindex]])
else:
obj.ships[targetindex][1] -= damage
return damage + blocked, destroyed
applyMine.public = 0
def applyShot(self, tran, obj, defense, attack, weaponID, targetClass, target):
#@log.debug(obj.oid, 'IFleet', 'Apply shot', attack, weaponID, targetClass, target)
player = tran.db[obj.owner]
# find correct ship to hit
target = -1
targetCiv = 0
while target == -1:
index = 0
found = 0
for designID, hp, shield, exp in obj.ships:
design = player.shipDesigns[designID]
if design.combatClass == targetClass and (design.isMilitary or targetCiv):
found = 1
if Utils.rand(1, 101) < Rules.shipTargetPerc[targetClass]:
target = index
break
index += 1
if not targetCiv:
targetCiv = 1
continue
if not found and targetCiv:
# no such target class - try to find another one
log.warning("No such target class in the fleet", obj.oid, targetClass)
targetClass = targetClass + 1
targetCiv = 0
if targetClass > 3:
return 0, 0, 0
designID, hp, shield, exp = obj.ships[target]
ship = player.shipDesigns[designID]
# compute if ship has been hit
weapon = Rules.techs[weaponID]
level = Rules.shipExpToLevel.get(int(exp / ship.baseExp), Rules.shipDefLevel)
# add system defense bonus to ship inate defense
if weapon.weaponIsMissile:
defense += int(ship.missileDef * Rules.shipLevelEff[level])
else:
defense += int(ship.combatDef * Rules.shipLevelEff[level])
destroyed = 0
destroyedClass = ship.combatClass
dmg = 0
blocked = 0
# limit number of shots
cClass = weapon.weaponClass
if cClass < obj.lastHitClass:
#@log.debug(obj.oid, "Different class", obj.lastHitClass, cClass, obj.maxHits)
for i in range(obj.lastHitClass - 1, cClass - 1, -1):
if obj.hitMods[cClass] >= 0.99: # == 1.0
#@log.debug(obj.oid, "Adding to", i, int(Rules.combatHitXferMod * (obj.maxHits[i + 1] - obj.hitCounters[i + 1])), obj.hitCounters[i + 1])
obj.maxHits[i] += int(Rules.combatHitXferMod * (obj.maxHits[i + 1] - obj.hitCounters[i + 1]))
else:
#@log.debug(obj.oid, "Not transfering hits")
pass
obj.maxHits[i + 1] = 0
#@log.debug(obj.oid, "max hits", obj.maxHits)
obj.lastHitClass = cClass
elif cClass > obj.lastHitClass:
log.debug(obj.oid, "INCORRECT ORDER OF SHOTS", obj.lastHitClass, cClass)
if weapon.weaponROF > 1:
#@log.debug(obj.oid, "Increasing counter", cClass, 1.0 / weapon.weaponROF)
obj.hitCounters[cClass] += 1.0 / weapon.weaponROF
else:
#@log.debug(obj.oid, "Increasing counter", cClass, 1)
obj.hitCounters[cClass] += 1
if obj.hitCounters[cClass] > obj.maxHits[cClass]:
obj.hitCounters[cClass] = 0
obj.hitMods[cClass] *= Rules.combatShipHitMod
#@log.debug(obj.oid, "Increasing hit penalty", obj.hitMods[cClass], obj.maxHits[cClass], "class", cClass)
#
attackChance = obj.hitMods[cClass] * attack / (attack + defense)
#@log.debug(obj.oid, "Chance to attack", attackChance, obj.hitMods[cClass],
#@ obj.hitCounters[cClass], obj.maxHits[cClass], "without penalty:", float(attack) / (attack + defense))
if random.random() <= attackChance:
player = tran.db[obj.owner]
weaponEff = Rules.techImprEff[player.techs.get(weaponID, Rules.techBaseImprovement)]
# HIT! -> apply damage
dmg = ShipUtils.computeDamage(weapon.weaponClass, ship.combatClass, weapon.weaponDmgMin, weapon.weaponDmgMax, weaponEff)
#@log.debug(obj.oid, 'HIT! att=%d vs def=%d, dmg=%d '% (attack, defense, dmg))
# shield
if not weapon.weaponIgnoreShield and shield > 0:
blocked = min(shield, dmg)
obj.ships[target][2] -= blocked
dmg -= blocked
elif weapon.weaponIgnoreShield and ship.hardShield > 0 and shield > 0:
blocked = min(shield, int(dmg*(ship.hardShield))) #hard shields also reduce penetrating weapons
obj.ships[target][2] -= blocked
dmg -= blocked
#damage absorbsion by armor
if ship.damageAbsorb > 0 and dmg > 0:
dmg = max(0,dmg-ship.damageAbsorb)
# armour
if dmg >= hp:
destroyed = 1
self.cmd(obj).removeShips(tran, obj, [obj.ships[target]])
dmg = hp
else:
obj.ships[target][1] -= dmg
#@log.debug(obj.oid, "Damaged", dmg, blocked, destroyed)
return dmg + blocked, destroyed, destroyedClass
applyShot.public = 0
def distributeExp(self, tran, obj):
# TODO improve
player = tran.db[obj.owner]
while obj.combatExp > 0:
haveMilitary = 0
for ship in obj.ships:
# ignore civilian ships
if not player.shipDesigns[ship[0]].isMilitary:
continue
# add exp point
haveMilitary = 1
ship[3] += 1
obj.combatExp -= 1
if obj.combatExp == 0:
break
if not haveMilitary:
break
del obj.maxHits
del obj.hitCounters
del obj.lastHitClass
del obj.hitMods
distributeExp.public = 0
def surrenderTo(self, tran, obj, newOwnerID):
# we've lost the battle - issue MOVE command to the nearest player's star
return 0
surrenderTo.public = 0
| Lukc/ospace-lukc | server/lib/ige/ospace/IFleet.py | Python | gpl-2.0 | 51,556 | [
"Galaxy"
] | bb99e2eadb69f49d57421ed430a2da194175b45f746e3e83e1c4fe5560563bae |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Input sets for Qchem
"""
import logging
import os
from monty.io import zopen
from pymatgen.io.qchem.inputs import QCInput
from pymatgen.io.qchem.utils import lower_and_check_unique
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
logger = logging.getLogger(__name__)
class QChemDictSet(QCInput):
"""
Build a QCInput given all the various input parameters. Can be extended by standard implementations below.
"""
def __init__(
self,
molecule,
job_type,
basis_set,
scf_algorithm,
dft_rung=4,
pcm_dielectric=None,
smd_solvent=None,
custom_smd=None,
max_scf_cycles=200,
geom_opt_max_cycles=200,
plot_cubes=False,
overwrite_inputs=None,
):
"""
Args:
molecule (Pymatgen molecule object)
job_type (str)
basis_set (str)
scf_algorithm (str)
dft_rung (int)
pcm_dielectric (str)
max_scf_cycles (int)
geom_opt_max_cycles (int)
plot_cubes (bool)
overwrite_inputs (dict): This is dictionary of QChem input sections to add or overwrite variables,
the available sections are currently rem, pcm, and solvent. So the accepted keys are rem, pcm, or solvent
and the value is a dictionary of key value pairs relevant to the section. An example would be adding a
new variable to the rem section that sets symmetry to false.
ex. overwrite_inputs = {"rem": {"symmetry": "false"}}
***It should be noted that if something like basis is added to the rem dict it will overwrite
the default basis.***
"""
self.molecule = molecule
self.job_type = job_type
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.dft_rung = dft_rung
self.pcm_dielectric = pcm_dielectric
self.smd_solvent = smd_solvent
self.custom_smd = custom_smd
self.max_scf_cycles = max_scf_cycles
self.geom_opt_max_cycles = geom_opt_max_cycles
self.plot_cubes = plot_cubes
self.overwrite_inputs = overwrite_inputs
pcm_defaults = {
"heavypoints": "194",
"hpoints": "194",
"radii": "uff",
"theory": "cpcm",
"vdwscale": "1.1",
}
plots_defaults = {"grid_spacing": "0.05", "total_density": "0"}
mypcm = {}
mysolvent = {}
mysmx = {}
myplots = {}
myrem = {}
myrem["job_type"] = job_type
myrem["basis"] = self.basis_set
myrem["max_scf_cycles"] = self.max_scf_cycles
myrem["gen_scfman"] = "true"
myrem["xc_grid"] = "3"
myrem["scf_algorithm"] = self.scf_algorithm
myrem["resp_charges"] = "true"
myrem["symmetry"] = "false"
myrem["sym_ignore"] = "true"
if self.dft_rung == 1:
myrem["method"] = "b3lyp"
elif self.dft_rung == 2:
myrem["method"] = "b3lyp"
myrem["dft_D"] = "D3_BJ"
elif self.dft_rung == 3:
myrem["method"] = "wb97xd"
elif self.dft_rung == 4:
myrem["method"] = "wb97xv"
elif self.dft_rung == 5:
myrem["method"] = "wb97mv"
else:
raise ValueError("dft_rung should be between 1 and 5!")
if self.job_type.lower() == "opt":
myrem["geom_opt_max_cycles"] = self.geom_opt_max_cycles
if self.pcm_dielectric is not None and self.smd_solvent is not None:
raise ValueError("Only one of pcm or smd may be used for solvation.")
if self.pcm_dielectric is not None:
mypcm = pcm_defaults
mysolvent["dielectric"] = self.pcm_dielectric
myrem["solvent_method"] = "pcm"
if self.smd_solvent is not None:
if self.smd_solvent == "custom":
mysmx["solvent"] = "other"
else:
mysmx["solvent"] = self.smd_solvent
myrem["solvent_method"] = "smd"
myrem["ideriv"] = "1"
if self.smd_solvent == "custom" or self.smd_solvent == "other":
if self.custom_smd is None:
raise ValueError(
"A user-defined SMD requires passing custom_smd, a string"
+ " of seven comma separated values in the following order:"
+ " dielectric, refractive index, acidity, basicity, surface"
+ " tension, aromaticity, electronegative halogenicity"
)
if self.plot_cubes:
myplots = plots_defaults
myrem["plots"] = "true"
myrem["make_cube_files"] = "true"
if self.overwrite_inputs:
for sec, sec_dict in self.overwrite_inputs.items():
if sec == "rem":
temp_rem = lower_and_check_unique(sec_dict)
for k, v in temp_rem.items():
myrem[k] = v
if sec == "pcm":
temp_pcm = lower_and_check_unique(sec_dict)
for k, v in temp_pcm.items():
mypcm[k] = v
if sec == "solvent":
temp_solvent = lower_and_check_unique(sec_dict)
for k, v in temp_solvent.items():
mysolvent[k] = v
if sec == "smx":
temp_smx = lower_and_check_unique(sec_dict)
for k, v in temp_smx.items():
mysmx[k] = v
if sec == "plots":
temp_plots = lower_and_check_unique(sec_dict)
for k, v in temp_plots.items():
myplots[k] = v
super().__init__(self.molecule, rem=myrem, pcm=mypcm, solvent=mysolvent, smx=mysmx, plots=myplots)
def write(self, input_file):
"""
Args:
input_file (str): Filename
"""
self.write_file(input_file)
if self.smd_solvent == "custom" or self.smd_solvent == "other":
with zopen(os.path.join(os.path.dirname(input_file), "solvent_data"), "wt") as f:
f.write(self.custom_smd)
class OptSet(QChemDictSet):
"""
QChemDictSet for a geometry optimization
"""
def __init__(
self,
molecule,
dft_rung=3,
basis_set="def2-tzvppd",
pcm_dielectric=None,
smd_solvent=None,
custom_smd=None,
scf_algorithm="diis",
max_scf_cycles=200,
geom_opt_max_cycles=200,
plot_cubes=False,
overwrite_inputs=None,
):
"""
Args:
molecule ():
dft_rung ():
basis_set ():
pcm_dielectric ():
smd_solvent ():
custom_smd ():
scf_algorithm ():
max_scf_cycles ():
geom_opt_max_cycles ():
overwrite_inputs ():
"""
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.max_scf_cycles = max_scf_cycles
self.geom_opt_max_cycles = geom_opt_max_cycles
super().__init__(
molecule=molecule,
job_type="opt",
dft_rung=dft_rung,
pcm_dielectric=pcm_dielectric,
smd_solvent=smd_solvent,
custom_smd=custom_smd,
basis_set=self.basis_set,
scf_algorithm=self.scf_algorithm,
max_scf_cycles=self.max_scf_cycles,
geom_opt_max_cycles=self.geom_opt_max_cycles,
plot_cubes=plot_cubes,
overwrite_inputs=overwrite_inputs,
)
class SinglePointSet(QChemDictSet):
"""
QChemDictSet for a single point calculation
"""
def __init__(
self,
molecule,
dft_rung=3,
basis_set="def2-tzvppd",
pcm_dielectric=None,
smd_solvent=None,
custom_smd=None,
scf_algorithm="diis",
max_scf_cycles=200,
plot_cubes=False,
overwrite_inputs=None,
):
"""
Args:
molecule ():
dft_rung ():
basis_set ():
pcm_dielectric ():
smd_solvent ():
custom_smd ():
scf_algorithm ():
max_scf_cycles ():
overwrite_inputs ():
"""
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.max_scf_cycles = max_scf_cycles
super().__init__(
molecule=molecule,
job_type="sp",
dft_rung=dft_rung,
pcm_dielectric=pcm_dielectric,
smd_solvent=smd_solvent,
custom_smd=custom_smd,
basis_set=self.basis_set,
scf_algorithm=self.scf_algorithm,
max_scf_cycles=self.max_scf_cycles,
plot_cubes=plot_cubes,
overwrite_inputs=overwrite_inputs,
)
class FreqSet(QChemDictSet):
"""
QChemDictSet for a frequency calculation
"""
def __init__(
self,
molecule,
dft_rung=3,
basis_set="def2-tzvppd",
pcm_dielectric=None,
smd_solvent=None,
custom_smd=None,
scf_algorithm="diis",
max_scf_cycles=200,
plot_cubes=False,
overwrite_inputs=None,
):
"""
Args:
molecule ():
dft_rung ():
basis_set ():
pcm_dielectric ():
smd_solvent ():
custom_smd ():
scf_algorithm ():
max_scf_cycles ():
overwrite_inputs ():
"""
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.max_scf_cycles = max_scf_cycles
super().__init__(
molecule=molecule,
job_type="freq",
dft_rung=dft_rung,
pcm_dielectric=pcm_dielectric,
smd_solvent=smd_solvent,
custom_smd=custom_smd,
basis_set=self.basis_set,
scf_algorithm=self.scf_algorithm,
max_scf_cycles=self.max_scf_cycles,
plot_cubes=plot_cubes,
overwrite_inputs=overwrite_inputs,
)
| davidwaroquiers/pymatgen | pymatgen/io/qchem/sets.py | Python | mit | 10,509 | [
"pymatgen"
] | 58adf9c7451f8eea544f82d4ce213593f596b61f5e022fcbe55e6630a27eb979 |
"""
This module contain utilities for the source finding routines
"""
import numpy
import math
import scipy.integrate
from tkp.sourcefinder.gaussian import gaussian
from tkp.utility import coordinates
def generate_subthresholds(min_value, max_value, num_thresholds):
"""
Generate a series of ``num_thresholds`` logarithmically spaced values
in the range (min_value, max_value) (both exclusive).
"""
# First, we calculate a logarithmically spaced sequence between exp(0.0)
# and (max - min + 1). That is, the total range is between 1 and one
# greater than the difference between max and min.
# We subtract 1 from this to get the range between 0 and (max-min).
# We add min to that to get the range between min and max.
subthrrange = numpy.logspace(
0.0,
numpy.log(max_value + 1 - min_value),
num=num_thresholds+1, # first value == min_value
base=numpy.e,
endpoint=False # do not include max_value
)[1:]
subthrrange += (min_value - 1)
return subthrrange
def get_error_radius(wcs, x_value, x_error, y_value, y_error):
"""
Estimate an absolute angular error on the position (x_value, y_value)
with the given errors.
This is a pessimistic estimate, because we take sum of the error
along the X and Y axes. Better might be to project them both back on
to the major/minor axes of the elliptical fit, but this should do for
now.
"""
error_radius = 0
try:
centre_ra, centre_dec = wcs.p2s([x_value, y_value])
# We check all possible combinations in case we have a nonlinear
# WCS.
for pixpos in [
(x_value + x_error, y_value + y_error),
(x_value - x_error, y_value + y_error),
(x_value + x_error, y_value - y_error),
(x_value - x_error, y_value - y_error)
]:
error_ra, error_dec = wcs.p2s(pixpos)
error_radius = max(
error_radius,
coordinates.angsep(centre_ra, centre_dec, error_ra, error_dec)
)
except RuntimeError:
# We get a runtime error from wcs.p2s if the errors place the
# limits outside of the image, in which case we set the angular
# uncertainty to infinity.
error_radius = float('inf')
return error_radius
def circular_mask(xdim, ydim, radius):
"""
Returns a numpy array of shape (xdim, ydim). All points with radius of
the centre are set to 0; outside that region, they are set to 1.
"""
centre_x, centre_y = (xdim-1)/2.0, (ydim-1)/2.0
x, y = numpy.ogrid[-centre_x:xdim-centre_x, -centre_y:ydim-centre_y]
return x*x + y*y >= radius*radius
def generate_result_maps(data, sourcelist):
"""Return a source and residual image
Given a data array (image) and list of sources, return two images, one
showing the sources themselves and the other the residual after the
sources have been removed from the input data.
"""
residual_map = numpy.array(data) # array constructor copies by default
gaussian_map = numpy.zeros(residual_map.shape)
for src in sourcelist:
# Include everything with 6 times the std deviation along the major
# axis. Should be very very close to 100% of the flux.
box_size = 6 * src.smaj.value / math.sqrt(2 * math.log(2))
lower_bound_x = max(0, int(src.x.value - 1 - box_size))
upper_bound_x = min(residual_map.shape[0], int(src.x.value - 1 + box_size))
lower_bound_y = max(0, int(src.y.value - 1 - box_size))
upper_bound_y = min(residual_map.shape[1], int(src.y.value - 1 + box_size))
local_gaussian = gaussian(
src.peak.value,
src.x.value,
src.y.value,
src.smaj.value,
src.smin.value,
src.theta.value
)(
numpy.indices(residual_map.shape)[0,lower_bound_x:upper_bound_x,lower_bound_y:upper_bound_y],
numpy.indices(residual_map.shape)[1,lower_bound_x:upper_bound_x,lower_bound_y:upper_bound_y]
)
gaussian_map[lower_bound_x:upper_bound_x, lower_bound_y:upper_bound_y] += local_gaussian
residual_map[lower_bound_x:upper_bound_x, lower_bound_y:upper_bound_y] -= local_gaussian
return gaussian_map, residual_map
def calculate_correlation_lengths(semimajor, semiminor):
"""Calculate the Condon correlation length
In order to derive the error bars from Gauss fitting from the
Condon (1997, PASP 109, 116C) formulae, one needs the so called
correlation length. The Condon formulae assumes a circular area
with diameter theta_N (in pixels) for the correlation. This was
later generalized by Hopkins et al. (2003, AJ 125, 465) for
correlation areas which are not axisymmetric.
Basically one has theta_N^2 = theta_B*theta_b.
Good estimates in general are:
+ theta_B = 2.0 * semimajar
+ theta_b = 2.0 * semiminor
"""
return (2.0 * semimajor, 2.0 * semiminor)
def calculate_beamsize(semimajor, semiminor):
"""Calculate the beamsize based on the semi major and minor axes"""
return numpy.pi * semimajor * semiminor
def fudge_max_pix(semimajor, semiminor, theta):
"""Estimate peak flux correction at pixel of maximum flux
Previously, we adopted Rengelink's correction for the
underestimate of the peak of the Gaussian by the maximum pixel
method: fudge_max_pix = 1.06. See the WENSS paper
(1997A&AS..124..259R) or his thesis. (The peak of the Gaussian
is, of course, never at the exact center of the pixel, that's why
the maximum pixel method will always underestimate it.)
But, instead of just taking 1.06 one can make an estimate of the
overall correction by assuming that the true peak is at a random
position on the peak pixel and averaging over all possible
corrections. This overall correction makes use of the beamshape,
so strictly speaking only accurate for unresolved sources.
"""
# scipy.integrate.dblquad: Computes a double integral
# from the scipy docs:
# Return the double (definite) integral of f1(y,x) from x=a..b
# and y=f2(x)..f3(x).
log20 = numpy.log(2.0)
cos_theta = numpy.cos(theta)
sin_theta = numpy.sin(theta)
def landscape(y, x):
up = math.pow(((cos_theta * x + sin_theta * y) / semiminor ), 2)
down = math.pow(((cos_theta * y - sin_theta * x) / semimajor ), 2)
return numpy.exp(log20 * ( up + down ))
(correction, abserr) = scipy.integrate.dblquad(landscape, -0.5, 0.5,
lambda ymin: -0.5, lambda ymax: 0.5)
return correction
def maximum_pixel_method_variance(semimajor, semiminor, theta):
"""Estimate variance for peak flux at pixel position of maximum
When we use the maximum pixel method, with a correction
fudge_max_pix, there should be no bias, unless the peaks of the
Gaussians are not randomly distributed, but relatively close to
the centres of the pixels due to selection effects from detection
thresholds.
Disregarding the latter effect and noise, we can compute the
variance of the maximum pixel method by integrating (the true
flux-the average true flux)^2 = (the true flux-fudge_max_pix)^2
over the pixel area and dividing by the pixel area ( = 1). This
is just equal to integral of the true flux^2 over the pixel area
- fudge_max_pix^2.
"""
# scipy.integrate.dblquad: Computes a double integral
# from the scipy docs:
# Return the double (definite) integral of f1(y,x) from x=a..b
# and y=f2(x)..f3(x).
log20 = numpy.log(2.0)
cos_theta = numpy.cos(theta)
sin_theta = numpy.sin(theta)
def landscape(y, x):
return numpy.exp(2.0 * log20 *
( math.pow(((cos_theta * x + sin_theta * y) / semiminor), 2) +
math.pow(((cos_theta * y - sin_theta * x) / semimajor), 2)
)
)
(result, abserr) = scipy.integrate.dblquad(landscape, -0.5, 0.5, lambda ymin: -0.5, lambda ymax: 0.5)
variance = result - math.pow(fudge_max_pix(semimajor, semiminor, theta), 2)
return variance
def flatten(nested_list):
"""Flatten a nested list
Nested lists are made in the deblending algorithm. They're
awful. This is a piece of code I grabbed from
http://www.daniweb.com/code/snippet216879.html.
The output from this method is a generator, so make sure to turn
it into a list, like this::
flattened = list(flatten(nested)).
"""
for elem in nested_list:
if isinstance(elem, (tuple, list, numpy.ndarray)):
for i in flatten(elem):
yield i
else:
yield elem
| mkuiack/tkp | tkp/sourcefinder/utils.py | Python | bsd-2-clause | 8,748 | [
"Gaussian"
] | def1a81206a32f6eb7a9c58933a8e019fb3ff5bd476709a1d3d70ae7795aec76 |
#!/usr/bin/env python
import pysam
import argparse
import multiprocessing as mp
import subprocess
import logging
logger = logging.getLogger(__name__)
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.INFO)
from math import log10
import pandas as pd
import scipy.stats as ss
class Segment:
def __init__(self, chrom, start, end, cpm):
self.chrom = chrom
self.start = start
self.end = end
self.cpm = cpm
def __lt__(self, other):
if self.chrom != other.chrom:
return self.chrom < other.chrom
return self.start < other.start
def __str__(self):
return '%s\t%d\t%d\t%f' % (self.chrom, self.start, self.end, self.cpm)
def cpm(chrom, start, end, bamfn):
bam = pysam.AlignmentFile(bamfn, 'rb')
n = bam.mapped / float(1e6)
count = 0
for read in bam.fetch(chrom, start, end):
if not read.is_secondary and read.mapq > 10: count += 1
try:
return (count / n) / (end-start)
except ZeroDivisionError:
return 0.0
def calc_seg(chrom, binstart, binend, bam):
bin_cpm = cpm(chrom, binstart, binend, bam)
return Segment(chrom, binstart, binend, bin_cpm)
def main(args):
binsize = int(args.binsize)
pool = mp.Pool(processes=int(args.procs))
reslist = []
with open(args.fai) as fai:
for line in fai:
chrom, chrlen = line.strip().split()[:2]
chrlen = int(chrlen)
for binstart in range(0, chrlen, binsize):
binend = binstart + binsize
if binend > chrlen: binend = chrlen
res = pool.apply_async(calc_seg, [chrom, binstart, binend, args.bam])
reslist.append(res)
cn_segs = []
for res in reslist:
cn_segs.append(res.get())
outfile = '.'.join(args.bam.split('.')[:-1]) + '.cpm.mask.txt'
with open(outfile, 'w') as out:
out.write('#Chrom\tStart\tEnd\tCPM\n')
for s in sorted(cn_segs):
out.write('%s\n' % str(s))
data = pd.DataFrame.from_csv(outfile, sep='\t', header=0, index_col=None)
data['z'] = ss.zscore(data['CPM'])
highcov = data.loc[data['z'] > float(args.z)]
highcov.to_csv(outfile, sep='\t', index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build mask from BAM based on CPM')
parser.add_argument('--bam', required=True, help='indexed BAM')
parser.add_argument('-f', '--fai', required=True, help='fasta index (.fai)')
parser.add_argument('--binsize', required=True, help='bin size')
parser.add_argument('-p', '--procs', default=1)
parser.add_argument('-z', default=2.0, help='z-score cutoff (default = 2.0)')
args = parser.parse_args()
main(args)
| adamewing/tebreak | scripts/bam_cpm_mask.py | Python | mit | 2,815 | [
"pysam"
] | 0814a066ed2b81a95996d928b9f1bb28504992d574ad592c2beac2066653fa92 |
'''OpenAnything: a kind and thoughtful library for HTTP web services
This program is part of 'Dive Into Python', a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
'''
__author__ = 'Mark Pilgrim (mark@diveintopython.org)'
__version__ = '$Revision: 1.6 $'[11:-2]
__date__ = '$Date: 2004/04/16 21:16:24 $'
__copyright__ = 'Copyright (c) 2004 Mark Pilgrim'
__license__ = 'Python'
import urllib2, urlparse, gzip, httplib, mimetypes
from StringIO import StringIO
from mapConst import *
#from django.template.defaultfilters import urlencode
#USER_AGENT = 'OpenAnything/%s +http://diveintopython.org/http_web_services/' % __version__
USER_AGENT = '%s/%s +%s' % (NAME, VERSION, WEB_ADDRESS)
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
result.status = code
return result
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
def encode_post_data_dict( post_data ):
data = []
for key in post_data.keys():
data.append( urlencode(key) +'='+ urlencode(post_data[key]) )
return '&'.join(data)
def encode_post_data( post_data ):
data = []
for x in post_data:
data.append( urlencode(x[0]) +'='+ urlencode(x[1]) )
return '&'.join(data)
def openAnything( source, etag=None, lastmodified=None, agent=USER_AGENT, post_data=None, files=None ):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the lastmodified argument is supplied, it must be a formatted
date/time string in GMT (as returned in the Last-Modified header of
a previous request). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
"""
if hasattr(source, 'read'):
return source
if source == '-':
return sys.stdin
if isinstance(post_data, dict):
post_data_dict = post_data
post_data = []
for key in post_data_dict.keys():
post_data.append( (key, post_data_dict[key]) )
protocol = urlparse.urlparse(source)[0]
if protocol=='http' or protocol=='https':
# open URL with urllib2
request = urllib2.Request(source)
request.add_header('User-Agent', agent)
if lastmodified:
request.add_header('If-Modified-Since', lastmodified)
if etag:
request.add_header('If-None-Match', etag)
if post_data and files:
content_type, body = encode_multipart_formdata( post_data, files )
request.add_header('Content-Type', content_type)
request.add_data(body)
elif post_data:
request.add_data( encode_post_data( post_data ) )
request.add_header('Accept-encoding', 'gzip')
opener = urllib2.build_opener(SmartRedirectHandler(), DefaultErrorHandler())
return opener.open(request)
# try to open with native open function (if source is a filename)
try:
return open(source)
except (IOError, OSError):
pass
# treat source as string
return StringIO(str(source))
def fetch(source, etag=None, lastmodified=None, agent=USER_AGENT, post_data=None, files=None):
'''Fetch data and metadata from a URL, file, stream, or string'''
result = {}
f = openAnything(source, etag, lastmodified, agent, post_data, files)
result['data'] = f.read()
if hasattr(f, 'headers'):
# save ETag, if the server sent one
result['etag'] = f.headers.get('ETag')
# save Last-Modified header, if the server sent one
result['lastmodified'] = f.headers.get('Last-Modified')
if f.headers.get('content-encoding') == 'gzip':
# data came back gzip-compressed, decompress it
result['data'] = gzip.GzipFile(fileobj=StringIO(result['data'])).read()
if hasattr(f, 'url'):
result['url'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
f.close()
return result
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(open(filename,'rb').read())
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
#print '--== encode_multipart_formdata:body ==--'
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
| pacoqueen/cican | utils/gmapcatcher/gmapcatcher/openanything.py | Python | gpl-3.0 | 6,282 | [
"VisIt"
] | c8c4665675a0e9de665f9955f1f3d87efa3b9161a0415b0af5e7b1a2b0b417f7 |
#!/usr/bin/python
# (C) 2014, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
import os
import sys
import subprocess
local_dir = './rts2saf'
def frostedDirectory():
if not os.path.isdir(local_dir):
print 'rts2saf_frosted.py: {} not found in local directory, change to ~/rts-2/scripts/rts2saf, exiting'.format(local_dir)
sys.exit(1)
def executeFrosted():
cmdL = ['frosted', '*py', 'rts2saf/*py', './unittest/*py']
cmd = ' '.join(cmdL)
stdo, stde = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).communicate()
# ToDo
if len(stdo) > 1471:
print stdo
if stde:
print stde
if __name__ == '__main__':
try:
subprocess.Popen(['frosted', '/dev/null'])
except Exception, e:
print 'frosted not found, do: sudo pip install frosted, exiting'
sys.exit(1)
executeFrosted()
print 'DONE'
| zguangyu/rts2 | scripts/rts2saf/rts2saf_frosted.py | Python | gpl-2.0 | 1,649 | [
"VisIt"
] | b46aa19e9a50c6e2395ce55649da76a7dff7e150c203e45f30379c37937ee53f |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2005-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Gary Burton
# Copyright (C) 2012 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Manages the main window and the pluggable views
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
from collections import defaultdict
import os
import time
import datetime
from io import StringIO
import posixpath
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".")
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.cli.grampscli import CLIManager
from .user import User
from .plug import tool
from gramps.gen.plug import START
from gramps.gen.plug import REPORT
from gramps.gen.plug.report._constants import standalone_categories
from .plug import (PluginWindows, ReportPluginDialog, ToolPluginDialog)
from .plug.report import report, BookSelector
from .utils import AvailableUpdates
from .pluginmanager import GuiPluginManager
from gramps.gen.relationship import get_relationship_calculator
from .displaystate import DisplayState, RecentDocsMenu
from gramps.gen.const import (HOME_DIR, ICON, URL_BUGTRACKER, URL_HOMEPAGE,
URL_MAILINGLIST, URL_MANUAL_PAGE, URL_WIKISTRING,
WIKI_EXTRAPLUGINS, URL_BUGHOME)
from gramps.gen.constfunc import is_quartz
from gramps.gen.config import config
from gramps.gen.errors import WindowActiveError
from .dialog import ErrorDialog, WarningDialog, QuestionDialog2, InfoDialog
from .widgets import Statusbar
from .undohistory import UndoHistory
from gramps.gen.utils.file import media_path_full
from .dbloader import DbLoader
from .display import display_help, display_url
from .configure import GrampsPreferences
from .aboutdialog import GrampsAboutDialog
from .navigator import Navigator
from .views.tags import Tags
from .actiongroup import ActionGroup
from gramps.gen.db.exceptions import DbWriteFailure
from .managedwindow import ManagedWindow
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
if is_quartz():
try:
from gi.repository import GtkosxApplication as QuartzApp
_GTKOSXAPPLICATION = True
except:
print("Failed to import gtk_osxapplication")
_GTKOSXAPPLICATION = False
else:
_GTKOSXAPPLICATION = False
_UNSUPPORTED = ("Unsupported", _("Unsupported"))
UIDEFAULT = '''<ui>
<menubar name="MenuBar">
<menu action="FileMenu">
<menuitem action="Open"/>
<menu action="OpenRecent">
</menu>
<menuitem action="Close"/>
<separator/>
<menuitem action="Import"/>
<menuitem action="Export"/>
<placeholder name="LocalExport"/>
<menuitem action="Backup"/>
<separator/>
<menuitem action="Abandon"/>
<menuitem action="Quit"/>
</menu>
<menu action="EditMenu">
<menuitem action="Undo"/>
<menuitem action="Redo"/>
<menuitem action="UndoHistory"/>
<separator/>
<placeholder name="CommonEdit"/>
<separator/>
<placeholder name="TagMenu"/>
<separator/>
<menuitem action="Clipboard"/>
<separator/>
<menuitem action="Preferences"/>
</menu>
<menu action="ViewMenu">
<menuitem action="ConfigView"/>
<menuitem action="Navigator"/>
<menuitem action="Toolbar"/>
<placeholder name="Bars"/>
<menuitem action="Fullscreen"/>
<separator/>
<placeholder name="ViewsInCategory"/>
<separator/>
</menu>
<menu action="GoMenu">
<placeholder name="CommonGo"/>
<placeholder name="CommonHistory"/>
</menu>
<menu action="BookMenu">
<placeholder name="AddEditBook"/>
<separator/>
<placeholder name="GoToBook"/>
</menu>
<menu action="ReportsMenu">
<menuitem action="Books"/>
<separator/>
<placeholder name="P_ReportsMenu"/>
</menu>
<menu action="ToolsMenu">
<placeholder name="P_ToolsMenu"/>
</menu>
<menu action="WindowsMenu">
<placeholder name="WinMenu"/>
</menu>
<menu action="HelpMenu">
<menuitem action="UserManual"/>
<menuitem action="FAQ"/>
<menuitem action="KeyBindings"/>
<menuitem action="TipOfDay"/>
<menuitem action="PluginStatus"/>
<separator/>
<menuitem action="HomePage"/>
<menuitem action="MailingLists"/>
<menuitem action="ReportBug"/>
<menuitem action="ExtraPlugins"/>
<separator/>
<menuitem action="About"/>
</menu>
</menubar>
<toolbar name="ToolBar">
<placeholder name="CommonNavigation"/>
<separator/>
<placeholder name="CommonEdit"/>
<placeholder name="TagTool"/>
<toolitem action="Clipboard"/>
<separator/>
<toolitem action="ConfigView"/>
<placeholder name="ViewsInCategory"/>
<separator/>
<toolitem action="Reports"/>
<toolitem action="Tools"/>
</toolbar>
<accelerator action="F2"/>
<accelerator action="F3"/>
<accelerator action="F4"/>
<accelerator action="F5"/>
<accelerator action="F6"/>
<accelerator action="F7"/>
<accelerator action="F8"/>
<accelerator action="F9"/>
<accelerator action="F11"/>
<accelerator action="F12"/>
<accelerator action="<PRIMARY>1"/>
<accelerator action="<PRIMARY>2"/>
<accelerator action="<PRIMARY>3"/>
<accelerator action="<PRIMARY>4"/>
<accelerator action="<PRIMARY>5"/>
<accelerator action="<PRIMARY>6"/>
<accelerator action="<PRIMARY>7"/>
<accelerator action="<PRIMARY>8"/>
<accelerator action="<PRIMARY>9"/>
<accelerator action="<PRIMARY>0"/>
<accelerator action="<PRIMARY>BackSpace"/>
<accelerator action="<PRIMARY>J"/>
<accelerator action="<PRIMARY>N"/>
<accelerator action="<PRIMARY>P"/>
</ui>
'''
WIKI_HELP_PAGE_FAQ = '%s_-_FAQ' % URL_MANUAL_PAGE
WIKI_HELP_PAGE_KEY = '%s_-_Keybindings' % URL_MANUAL_PAGE
WIKI_HELP_PAGE_MAN = '%s' % URL_MANUAL_PAGE
#-------------------------------------------------------------------------
#
# ViewManager
#
#-------------------------------------------------------------------------
class ViewManager(CLIManager):
"""
**Overview**
The ViewManager is the session manager of the program.
Specifically, it manages the main window of the program. It is closely tied
into the Gtk.UIManager to control all menus and actions.
The ViewManager controls the various Views within the Gramps programs.
Views are organised in categories. The categories can be accessed via
a sidebar. Within a category, the different views are accesible via the
toolbar of view menu.
A View is a particular way of looking a information in the Gramps main
window. Each view is separate from the others, and has no knowledge of
the others.
Examples of current views include:
- Person View
- Relationship View
- Family View
- Source View
The View Manager does not have to know the number of views, the type of
views, or any other details about the views. It simply provides the
method of containing each view, and has methods for creating, deleting and
switching between the views.
"""
def __init__(self, dbstate, view_category_order, user=None):
"""
The viewmanager is initialised with a dbstate on which GRAMPS is
working, and a fixed view_category_order, which is the order in which
the view categories are accessible in the sidebar.
"""
CLIManager.__init__(self, dbstate, setloader=False, user=user)
if _GTKOSXAPPLICATION:
self.macapp = QuartzApp.Application()
self.view_category_order = view_category_order
#set pluginmanager to GUI one
self._pmgr = GuiPluginManager.get_instance()
self.merge_ids = []
self.toolactions = None
self.tool_menu_ui_id = None
self.reportactions = None
self.report_menu_ui_id = None
self.active_page = None
self.pages = []
self.page_lookup = {}
self.views = None
self.current_views = [] # The current view in each category
self.view_changing = False
self.show_navigator = config.get('interface.view')
self.show_toolbar = config.get('interface.toolbar-on')
self.fullscreen = config.get('interface.fullscreen')
self.__build_main_window() # sets self.uistate
if self.user is None:
self.user = User(error=ErrorDialog,
parent=self.window,
callback=self.uistate.pulse_progressbar,
uistate=self.uistate,
dbstate=self.dbstate)
self.__connect_signals()
if _GTKOSXAPPLICATION:
self.macapp.ready()
self.do_reg_plugins(self.dbstate, self.uistate)
#plugins loaded now set relationship class
self.rel_class = get_relationship_calculator()
self.uistate.set_relationship_class()
# Need to call after plugins have been registered
self.uistate.connect('update-available', self.process_updates)
self.check_for_updates()
# Set autobackup
self.uistate.connect('autobackup', self.autobackup)
self.uistate.set_backup_timer()
def check_for_updates(self):
"""
Check for add-on updates.
"""
howoften = config.get("behavior.check-for-addon-updates")
update = False
if howoften != 0: # update never if zero
year, mon, day = list(map(
int, config.get("behavior.last-check-for-addon-updates").split("/")))
days = (datetime.date.today() - datetime.date(year, mon, day)).days
if howoften == 1 and days >= 30: # once a month
update = True
elif howoften == 2 and days >= 7: # once a week
update = True
elif howoften == 3 and days >= 1: # once a day
update = True
elif howoften == 4: # always
update = True
if update:
AvailableUpdates(self.uistate).start()
def process_updates(self, addon_update_list):
"""
Called when add-on updates are available.
"""
PluginWindows.UpdateAddons(self.uistate, [], addon_update_list)
self.do_reg_plugins(self.dbstate, self.uistate)
def _errordialog(self, title, errormessage):
"""
Show the error.
In the GUI, the error is shown, and a return happens
"""
ErrorDialog(title, errormessage,
parent=self.uistate.window)
return 1
def __build_main_window(self):
"""
Builds the GTK interface
"""
width = config.get('interface.main-window-width')
height = config.get('interface.main-window-height')
horiz_position = config.get('interface.main-window-horiz-position')
vert_position = config.get('interface.main-window-vert-position')
self.window = Gtk.Window()
self.window.set_icon_from_file(ICON)
self.window.set_default_size(width, height)
self.window.move(horiz_position, vert_position)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.window.add(vbox)
hpane = Gtk.Paned()
self.ebox = Gtk.EventBox()
self.navigator = Navigator(self)
self.ebox.add(self.navigator.get_top())
hpane.add1(self.ebox)
hpane.show()
self.notebook = Gtk.Notebook()
self.notebook.set_scrollable(True)
self.notebook.set_show_tabs(False)
self.notebook.show()
self.__init_lists()
self.__build_ui_manager()
hpane.add2(self.notebook)
self.menubar = self.uimanager.get_widget('/MenuBar')
self.toolbar = self.uimanager.get_widget('/ToolBar')
self.__attach_menubar(vbox)
vbox.pack_start(self.toolbar, False, True, 0)
vbox.pack_start(hpane, True, True, 0)
self.statusbar = Statusbar()
self.statusbar.show()
vbox.pack_end(self.statusbar, False, True, 0)
vbox.show()
self.uistate = DisplayState(self.window, self.statusbar,
self.uimanager, self)
# Create history objects
for nav_type in ('Person', 'Family', 'Event', 'Place', 'Source',
'Citation', 'Repository', 'Note', 'Media'):
self.uistate.register(self.dbstate, nav_type, 0)
self.dbstate.connect('database-changed', self.uistate.db_changed)
self.tags = Tags(self.uistate, self.dbstate)
self.sidebar_menu = self.uimanager.get_widget(
'/MenuBar/ViewMenu/Sidebar/')
# handle OPEN button, insert it into the toolbar. Unfortunately,
# UIManager has no built in support for and Open Recent button
openbtn = self.__build_open_button()
self.uistate.set_open_widget(openbtn)
self.toolbar.insert(openbtn, 0)
self.recent_manager = RecentDocsMenu(
self.uistate, self.dbstate, self._read_recent_file)
self.recent_manager.build()
self.db_loader = DbLoader(self.dbstate, self.uistate)
self.__setup_navigator()
if self.show_toolbar:
self.toolbar.show()
else:
self.toolbar.hide()
if self.fullscreen:
self.window.fullscreen()
self.window.set_title("%s - Gramps" % _('No Family Tree'))
self.window.show()
def __setup_navigator(self):
"""
If we have enabled te sidebar, show it, and turn off the tabs. If
disabled, hide the sidebar and turn on the tabs.
"""
if self.show_navigator:
self.ebox.show()
else:
self.ebox.hide()
def __build_open_button(self):
"""
Build the OPEN button. Since GTK's UIManager does not have support for
the Open Recent button, we must build in on our own.
"""
openbtn = Gtk.MenuToolButton()
openbtn.set_icon_name('gramps')
openbtn.connect('clicked', self.__open_activate)
openbtn.set_sensitive(False)
openbtn.set_tooltip_text(_("Connect to a recent database"))
openbtn.show()
return openbtn
def __connect_signals(self):
"""
Connects the signals needed
"""
self.window.connect('delete-event', self.quit)
self.notebook.connect('switch-page', self.view_changed)
if _GTKOSXAPPLICATION:
self.macapp.connect('NSApplicationWillTerminate', self.quit)
def __init_lists(self):
"""
Initialize the actions lists for the UIManager
"""
self._file_action_list = [
('FileMenu', None, _('_Family Trees')),
('Open', 'gramps-db', _('_Manage Family Trees...'), "<PRIMARY>o",
_("Manage databases"), self.__open_activate),
('OpenRecent', None, _('Open _Recent'), None,
_("Open an existing database")),
('Quit', 'application-exit', _('_Quit'), "<PRIMARY>q", None,
self.quit),
('ViewMenu', None, _('_View')),
('EditMenu', None, _('_Edit')),
('Preferences', 'preferences-system', _('_Preferences...'), None,
None, self.preferences_activate),
('HelpMenu', None, _('_Help')),
('HomePage', None, _('Gramps _Home Page'), None, None,
home_page_activate),
('MailingLists', None, _('Gramps _Mailing Lists'), None, None,
mailing_lists_activate),
('ReportBug', None, _('_Report a Bug'), None, None,
report_bug_activate),
('ExtraPlugins', None, _('_Extra Reports/Tools'), None, None,
extra_plugins_activate),
('About', 'help-about', _('_About'), None, None,
self.display_about_box),
('PluginStatus', None, _('_Plugin Manager'), None, None,
self.__plugin_status),
('FAQ', None, _('_FAQ'), None, None, faq_activate),
('KeyBindings', None, _('_Key Bindings'), None, None, key_bindings),
('UserManual', 'help-browser', _('_User Manual'), 'F1', None,
manual_activate),
('TipOfDay', None, _('Tip of the Day'), None, None,
self.tip_of_day_activate),
]
self._readonly_action_list = [
('Close', None, _('_Close'), "<control>w",
_("Close the current database"), self.close_database),
('Export', 'gramps-export', _('_Export...'), "<PRIMARY>e", None,
self.export_data),
('Backup', None, _("Make Backup..."), None,
_("Make a Gramps XML backup of the database"), self.quick_backup),
('Abandon', 'document-revert',
_('_Abandon Changes and Quit'), None, None, self.abort),
('Reports', 'gramps-reports', _('_Reports'), None,
_("Open the reports dialog"), self.reports_clicked),
('GoMenu', None, _('_Go')),
('ReportsMenu', None, _('_Reports')),
('Books', None, _('Books...'), None, None, self.run_book),
('WindowsMenu', None, _('_Windows')),
('F2', None, 'F2', "F2", None, self.__keypress),
('F3', None, 'F3', "F3", None, self.__keypress),
('F4', None, 'F4', "F4", None, self.__keypress),
('F5', None, 'F5', "F5", None, self.__keypress),
('F6', None, 'F6', "F6", None, self.__keypress),
('F7', None, 'F7', "F7", None, self.__keypress),
('F8', None, 'F9', "F8", None, self.__keypress),
('F9', None, 'F9', "F9", None, self.__keypress),
('F11', None, 'F11', "F11", None, self.__keypress),
('<PRIMARY>1', None, '<PRIMARY>1', "<PRIMARY>1", None,
self.__gocat),
('<PRIMARY>2', None, '<PRIMARY>2', "<PRIMARY>2", None,
self.__gocat),
('<PRIMARY>3', None, '<PRIMARY>3', "<PRIMARY>3", None,
self.__gocat),
('<PRIMARY>4', None, '<PRIMARY>4', "<PRIMARY>4", None,
self.__gocat),
('<PRIMARY>5', None, '<PRIMARY>5', "<PRIMARY>5", None,
self.__gocat),
('<PRIMARY>6', None, '<PRIMARY>6', "<PRIMARY>6", None,
self.__gocat),
('<PRIMARY>7', None, '<PRIMARY>7', "<PRIMARY>7", None,
self.__gocat),
('<PRIMARY>8', None, '<PRIMARY>8', "<PRIMARY>8", None,
self.__gocat),
('<PRIMARY>9', None, '<PRIMARY>9', "<PRIMARY>9", None,
self.__gocat),
('<PRIMARY>0', None, '<PRIMARY>0', "<PRIMARY>0", None,
self.__gocat),
# NOTE: CTRL+ALT+NUMBER is set in src/plugins/sidebar/cat...py
('<PRIMARY>BackSpace', None, '<PRIMARY>BackSpace',
"<PRIMARY>BackSpace", None, self.__keypress),
('<PRIMARY>Delete', None, '<PRIMARY>Delete',
"<PRIMARY>Delete", None, self.__keypress),
('<PRIMARY>Insert', None, '<PRIMARY>Insert',
"<PRIMARY>Insert", None, self.__keypress),
('F12', None, 'F12', "F12", None, self.__keypress),
('<PRIMARY>J', None, '<PRIMARY>J',
"<PRIMARY>J", None, self.__keypress),
('<PRIMARY>N', None, '<PRIMARY>N', "<PRIMARY>N", None,
self.__next_view),
('<PRIMARY>P', None, '<PRIMARY>P', "<PRIMARY>P", None,
self.__prev_view),
]
self._action_action_list = [
('Clipboard', 'edit-paste', _('Clip_board'), "<PRIMARY>b",
_("Open the Clipboard dialog"), self.clipboard),
('Import', 'gramps-import', _('_Import...'), "<PRIMARY>i", None,
self.import_data),
('Tools', 'gramps-tools', _('_Tools'), None,
_("Open the tools dialog"), self.tools_clicked),
('BookMenu', None, _('_Bookmarks')),
('ToolsMenu', None, _('_Tools')),
('ConfigView', 'gramps-config', _('_Configure...'),
'<shift><PRIMARY>c', _('Configure the active view'),
self.config_view),
]
self._file_toggle_action_list = [
('Navigator', None, _('_Navigator'), "<PRIMARY>m", None,
self.navigator_toggle, self.show_navigator),
('Toolbar', None, _('_Toolbar'), None, None, self.toolbar_toggle,
self.show_toolbar),
('Fullscreen', None, _('F_ull Screen'), "F11", None,
self.fullscreen_toggle, self.fullscreen),
]
self._undo_action_list = [
('Undo', 'edit-undo', _('_Undo'), '<PRIMARY>z', None,
self.undo),
]
self._redo_action_list = [
('Redo', 'edit-redo', _('_Redo'), '<shift><PRIMARY>z', None,
self.redo),
]
self._undo_history_action_list = [
('UndoHistory', 'gramps-undo-history',
_('Undo History...'), "<PRIMARY>H", None, self.undo_history),
]
def run_book(self, action):
"""
Run a book.
"""
try:
BookSelector(self.dbstate, self.uistate)
except WindowActiveError:
return
def __keypress(self, action):
"""
Callback that is called on a keypress. It works by extracting the
name of the associated action, and passes that to the active page
(current view) so that it can take the associated action.
"""
name = action.get_name()
try:
self.active_page.call_function(name)
except Exception:
self.uistate.push_message(self.dbstate,
_("Key %s is not bound") % name)
def __gocat(self, action):
"""
Callback that is called on ctrl+number press. It moves to the
requested category like __next_view/__prev_view. 0 is 10
"""
cat = int(action.get_name()[-1])
if cat == 0:
cat = 10
cat -= 1
if cat >= len(self.current_views):
#this view is not present
return False
self.goto_page(cat, None)
def __next_view(self, action):
"""
Callback that is called when the next category action is selected. It
selects the next category as the active category. If we reach the end,
we wrap around to the first.
"""
curpage = self.notebook.get_current_page()
#find cat and view of the current page
for key in self.page_lookup:
if self.page_lookup[key] == curpage:
cat_num, view_num = key
break
#now go to next category
if cat_num >= len(self.current_views)-1:
self.goto_page(0, None)
else:
self.goto_page(cat_num+1, None)
def __prev_view(self, action):
"""
Callback that is called when the previous category action is selected.
It selects the previous category as the active category. If we reach
the beginning of the list, we wrap around to the last.
"""
curpage = self.notebook.get_current_page()
#find cat and view of the current page
for key in self.page_lookup:
if self.page_lookup[key] == curpage:
cat_num, view_num = key
break
#now go to next category
if cat_num > 0:
self.goto_page(cat_num-1, None)
else:
self.goto_page(len(self.current_views)-1, None)
def init_interface(self):
"""
Initialize the interface.
"""
self.views = self.get_available_views()
defaults = views_to_show(self.views,
config.get('preferences.use-last-view'))
self.current_views = defaults[2]
self.navigator.load_plugins(self.dbstate, self.uistate)
self.goto_page(defaults[0], defaults[1])
self.fileactions.set_sensitive(False)
self.__build_tools_menu(self._pmgr.get_reg_tools())
self.__build_report_menu(self._pmgr.get_reg_reports())
self._pmgr.connect('plugins-reloaded',
self.__rebuild_report_and_tool_menus)
self.fileactions.set_sensitive(True)
self.uistate.widget.set_sensitive(True)
if not self.file_loaded:
self.actiongroup.set_sensitive(False)
self.readonlygroup.set_sensitive(False)
self.undoactions.set_sensitive(False)
self.redoactions.set_sensitive(False)
self.undohistoryactions.set_sensitive(False)
self.actiongroup.set_visible(False)
self.readonlygroup.set_visible(False)
self.undoactions.set_visible(False)
self.redoactions.set_visible(False)
self.undohistoryactions.set_visible(False)
self.uimanager.ensure_update()
config.connect("interface.statusbar", self.__statusbar_key_update)
def __statusbar_key_update(self, client, cnxn_id, entry, data):
"""
Callback function for statusbar key update
"""
self.uistate.modify_statusbar(self.dbstate)
def post_init_interface(self, show_manager=True):
"""
Showing the main window is deferred so that
ArgHandler can work without it always shown
"""
self.window.show()
if not self.dbstate.is_open() and show_manager:
self.__open_activate(None)
def do_reg_plugins(self, dbstate, uistate):
"""
Register the plugins at initialization time. The plugin status window
is opened on an error if the user has requested.
"""
# registering plugins
self.uistate.status_text(_('Registering plugins...'))
error = CLIManager.do_reg_plugins(self, dbstate, uistate)
# get to see if we need to open the plugin status window
if error and config.get('behavior.pop-plugin-status'):
self.__plugin_status()
self.uistate.push_message(self.dbstate, _('Ready'))
def close_database(self, action=None, make_backup=True):
"""
Close the database
"""
self.dbstate.no_database()
self.post_close_db()
def quit(self, *obj):
"""
Closes out the program, backing up data
"""
# mark interface insenstitive to prevent unexpected events
self.uistate.set_sensitive(False)
# backup data
if config.get('database.backup-on-exit'):
self.autobackup()
# close the database
if self.dbstate.is_open():
self.dbstate.db.close(user=self.user)
# have each page save anything, if they need to:
self.__delete_pages()
# save the current window size
(width, height) = self.window.get_size()
config.set('interface.main-window-width', width)
config.set('interface.main-window-height', height)
# save the current window position
(horiz_position, vert_position) = self.window.get_position()
config.set('interface.main-window-horiz-position', horiz_position)
config.set('interface.main-window-vert-position', vert_position)
config.save()
Gtk.main_quit()
def abort(self, obj=None):
"""
Abandon changes and quit.
"""
if self.dbstate.db.abort_possible:
dialog = QuestionDialog2(
_("Abort changes?"),
_("Aborting changes will return the database to the state "
"it was before you started this editing session."),
_("Abort changes"),
_("Cancel"),
parent=self.uistate.window)
if dialog.run():
self.dbstate.db.disable_signals()
while self.dbstate.db.undo():
pass
self.quit()
else:
WarningDialog(
_("Cannot abandon session's changes"),
_('Changes cannot be completely abandoned because the '
'number of changes made in the session exceeded the '
'limit.'), parent=self.uistate.window)
def __init_action_group(self, name, actions, sensitive=True, toggles=None):
"""
Initialize an action group for the UIManager
"""
new_group = ActionGroup(name=name)
new_group.add_actions(actions)
if toggles:
new_group.add_toggle_actions(toggles)
new_group.set_sensitive(sensitive)
self.uimanager.insert_action_group(new_group, 1)
return new_group
def __build_ui_manager(self):
"""
Builds the UIManager, and the associated action groups
"""
self.uimanager = Gtk.UIManager()
accelgroup = self.uimanager.get_accel_group()
self.actiongroup = self.__init_action_group(
'MainWindow', self._action_action_list)
self.readonlygroup = self.__init_action_group(
'AllMainWindow', self._readonly_action_list)
self.undohistoryactions = self.__init_action_group(
'UndoHistory', self._undo_history_action_list)
self.fileactions = self.__init_action_group(
'FileWindow', self._file_action_list,
toggles=self._file_toggle_action_list)
self.undoactions = self.__init_action_group(
'Undo', self._undo_action_list, sensitive=False)
self.redoactions = self.__init_action_group(
'Redo', self._redo_action_list, sensitive=False)
self.window.add_accel_group(accelgroup)
self.uimanager.add_ui_from_string(UIDEFAULT)
self.uimanager.ensure_update()
def __attach_menubar(self, vbox):
"""
Attach the menubar
"""
vbox.pack_start(self.menubar, False, True, 0)
if _GTKOSXAPPLICATION:
self.menubar.hide()
quit_item = self.uimanager.get_widget("/MenuBar/FileMenu/Quit")
about_item = self.uimanager.get_widget("/MenuBar/HelpMenu/About")
prefs_item = self.uimanager.get_widget(
"/MenuBar/EditMenu/Preferences")
self.macapp.set_menu_bar(self.menubar)
self.macapp.insert_app_menu_item(about_item, 0)
self.macapp.insert_app_menu_item(prefs_item, 1)
def preferences_activate(self, obj):
"""
Open the preferences dialog.
"""
try:
GrampsPreferences(self.uistate, self.dbstate)
except WindowActiveError:
return
def tip_of_day_activate(self, obj):
"""
Display Tip of the day
"""
from .tipofday import TipOfDay
TipOfDay(self.uistate)
def __plugin_status(self, obj=None, data=None):
"""
Display plugin status dialog
"""
try:
PluginWindows.PluginStatus(self.dbstate, self.uistate, [])
except WindowActiveError:
pass
def navigator_toggle(self, obj, data=None):
"""
Set the sidebar based on the value of the toggle button. Save the
results in the configuration settings
"""
if obj.get_active():
self.ebox.show()
config.set('interface.view', True)
self.show_navigator = True
else:
self.ebox.hide()
config.set('interface.view', False)
self.show_navigator = False
config.save()
def toolbar_toggle(self, obj, data=None):
"""
Set the toolbar based on the value of the toggle button. Save the
results in the configuration settings
"""
if obj.get_active():
self.toolbar.show()
config.set('interface.toolbar-on', True)
else:
self.toolbar.hide()
config.set('interface.toolbar-on', False)
config.save()
def fullscreen_toggle(self, obj, data=None):
"""
Set the main Granps window fullscreen based on the value of the
toggle button. Save the setting in the config file.
"""
if obj.get_active():
self.window.fullscreen()
config.set('interface.fullscreen', True)
else:
self.window.unfullscreen()
config.set('interface.fullscreen', False)
config.save()
def get_views(self):
"""
Return the view definitions.
"""
return self.views
def goto_page(self, cat_num, view_num):
"""
Create the page if it doesn't exist and make it the current page.
"""
if view_num is None:
view_num = self.current_views[cat_num]
else:
self.current_views[cat_num] = view_num
page_num = self.page_lookup.get((cat_num, view_num))
if page_num is None:
page_def = self.views[cat_num][view_num]
page_num = self.notebook.get_n_pages()
self.page_lookup[(cat_num, view_num)] = page_num
self.__create_page(page_def[0], page_def[1])
self.notebook.set_current_page(page_num)
return self.pages[page_num]
def get_category(self, cat_name):
"""
Return the category number from the given category name.
"""
for cat_num, cat_views in enumerate(self.views):
if cat_name == cat_views[0][0].category[1]:
return cat_num
return None
def __create_dummy_page(self, pdata, error):
""" Create a dummy page """
from .views.pageview import DummyPage
return DummyPage(pdata.name, pdata, self.dbstate, self.uistate,
_("View failed to load. Check error output."), error)
def __create_page(self, pdata, page_def):
"""
Create a new page and set it as the current page.
"""
try:
page = page_def(pdata, self.dbstate, self.uistate)
except:
import traceback
LOG.warning("View '%s' failed to load.", pdata.id)
traceback.print_exc()
page = self.__create_dummy_page(pdata, traceback.format_exc())
try:
page_display = page.get_display()
except:
import traceback
print("ERROR: '%s' failed to create view" % pdata.name)
traceback.print_exc()
page = self.__create_dummy_page(pdata, traceback.format_exc())
page_display = page.get_display()
page.define_actions()
page.post()
self.pages.append(page)
# create icon/label for notebook tab (useful for debugging)
hbox = Gtk.Box()
image = Gtk.Image()
image.set_from_icon_name(page.get_stock(), Gtk.IconSize.MENU)
hbox.pack_start(image, False, True, 0)
hbox.add(Gtk.Label(label=pdata.name))
hbox.show_all()
page_num = self.notebook.append_page(page.get_display(), hbox)
if not self.file_loaded:
self.actiongroup.set_sensitive(False)
self.readonlygroup.set_sensitive(False)
self.undoactions.set_sensitive(False)
self.redoactions.set_sensitive(False)
self.undohistoryactions.set_sensitive(False)
self.actiongroup.set_visible(False)
self.readonlygroup.set_visible(False)
self.undoactions.set_visible(False)
self.redoactions.set_visible(False)
self.undohistoryactions.set_visible(False)
self.uimanager.ensure_update()
return page
def view_changed(self, notebook, page, page_num):
"""
Called when the notebook page is changed.
"""
if self.view_changing:
return
self.view_changing = True
cat_num = view_num = None
for key in self.page_lookup:
if self.page_lookup[key] == page_num:
cat_num, view_num = key
break
# Save last view in configuration
view_id = self.views[cat_num][view_num][0].id
config.set('preferences.last-view', view_id)
last_views = config.get('preferences.last-views')
if len(last_views) != len(self.views):
# If the number of categories has changed then reset the defaults
last_views = [''] * len(self.views)
last_views[cat_num] = view_id
config.set('preferences.last-views', last_views)
config.save()
self.navigator.view_changed(cat_num, view_num)
self.__change_page(page_num)
self.view_changing = False
def __change_page(self, page_num):
"""
Perform necessary actions when a page is changed.
"""
if not self.dbstate.is_open():
return
self.__disconnect_previous_page()
self.active_page = self.pages[page_num]
self.active_page.set_active()
self.__connect_active_page(page_num)
self.uimanager.ensure_update()
if _GTKOSXAPPLICATION:
self.macapp.sync_menubar()
while Gtk.events_pending():
Gtk.main_iteration()
self.active_page.change_page()
def __delete_pages(self):
"""
Calls on_delete() for each view
"""
for page in self.pages:
page.on_delete()
def __disconnect_previous_page(self):
"""
Disconnects the previous page, removing the old action groups
and removes the old UI components.
"""
list(map(self.uimanager.remove_ui, self.merge_ids))
if self.active_page is not None:
self.active_page.set_inactive()
groups = self.active_page.get_actions()
for grp in groups:
if grp in self.uimanager.get_action_groups():
self.uimanager.remove_action_group(grp)
self.active_page = None
def __connect_active_page(self, page_num):
"""
Inserts the action groups associated with the current page
into the UIManager
"""
for grp in self.active_page.get_actions():
self.uimanager.insert_action_group(grp, 1)
uidef = self.active_page.ui_definition()
self.merge_ids = [self.uimanager.add_ui_from_string(uidef)]
for uidef in self.active_page.additional_ui_definitions():
mergeid = self.uimanager.add_ui_from_string(uidef)
self.merge_ids.append(mergeid)
configaction = self.actiongroup.get_action('ConfigView')
if self.active_page.can_configure():
configaction.set_sensitive(True)
else:
configaction.set_sensitive(False)
def import_data(self, obj):
"""
Imports a file
"""
if self.dbstate.is_open():
self.db_loader.import_file()
infotxt = self.db_loader.import_info_text()
if infotxt:
InfoDialog(_('Import Statistics'), infotxt,
parent=self.window)
self.__post_load()
def __open_activate(self, obj):
"""
Called when the Open button is clicked, opens the DbManager
"""
from .dbman import DbManager
dialog = DbManager(self.uistate, self.dbstate, self, self.window)
value = dialog.run()
if value:
if self.dbstate.is_open():
self.dbstate.db.close(user=self.user)
(filename, title) = value
self.db_loader.read_file(filename)
self._post_load_newdb(filename, 'x-directory/normal', title)
else:
if dialog.after_change != "":
# We change the title of the main window.
old_title = self.uistate.window.get_title()
if old_title:
delim = old_title.find(' - ')
tit1 = old_title[:delim]
tit2 = old_title[delim:]
new_title = dialog.after_change
if '<=' in tit2:
## delim2 = tit2.find('<=') + 3
## tit3 = tit2[delim2:-1]
new_title += tit2.replace(']', '') + ' => ' + tit1 + ']'
else:
new_title += tit2 + ' <= [' + tit1 + ']'
self.uistate.window.set_title(new_title)
def __post_load(self):
"""
This method is for the common UI post_load, both new files
and added data like imports.
"""
self.dbstate.db.undo_callback = self.__change_undo_label
self.dbstate.db.redo_callback = self.__change_redo_label
self.__change_undo_label(None)
self.__change_redo_label(None)
self.dbstate.db.undo_history_callback = self.undo_history_update
self.undo_history_close()
def _post_load_newdb(self, filename, filetype, title=None):
"""
The method called after load of a new database.
Inherit CLI method to add GUI part
"""
self._post_load_newdb_nongui(filename, title)
self._post_load_newdb_gui(filename, filetype, title)
def _post_load_newdb_gui(self, filename, filetype, title=None):
"""
Called after a new database is loaded to do GUI stuff
"""
# GUI related post load db stuff
# Update window title
if filename[-1] == os.path.sep:
filename = filename[:-1]
name = os.path.basename(filename)
if title:
name = title
if self.dbstate.db.readonly:
msg = "%s (%s) - Gramps" % (name, _('Read Only'))
self.uistate.window.set_title(msg)
self.actiongroup.set_sensitive(False)
else:
msg = "%s - Gramps" % name
self.uistate.window.set_title(msg)
self.actiongroup.set_sensitive(True)
self.__change_page(self.notebook.get_current_page())
self.actiongroup.set_visible(True)
self.readonlygroup.set_visible(True)
self.undoactions.set_visible(True)
self.redoactions.set_visible(True)
self.undohistoryactions.set_visible(True)
self.actiongroup.set_sensitive(True)
self.readonlygroup.set_sensitive(True)
self.undoactions.set_sensitive(True)
self.redoactions.set_sensitive(True)
self.undohistoryactions.set_sensitive(True)
self.recent_manager.build()
# Call common __post_load method for GUI update after a change
self.__post_load()
def post_close_db(self):
"""
Called after a database is closed to do GUI stuff.
"""
self.undo_history_close()
self.uistate.window.set_title("%s - Gramps" % _('No Family Tree'))
self.actiongroup.set_sensitive(False)
self.readonlygroup.set_sensitive(False)
self.undohistoryactions.set_sensitive(False)
self.uistate.clear_filter_results()
self.__disconnect_previous_page()
self.actiongroup.set_visible(False)
self.readonlygroup.set_visible(False)
self.undoactions.set_visible(False)
self.redoactions.set_visible(False)
self.undohistoryactions.set_visible(False)
self.uimanager.ensure_update()
config.set('paths.recent-file', '')
config.save()
def __change_undo_label(self, label):
"""
Change the UNDO label
"""
self.uimanager.remove_action_group(self.undoactions)
self.undoactions = Gtk.ActionGroup(name='Undo')
if label:
self.undoactions.add_actions([
('Undo', 'edit-undo', label, '<PRIMARY>z', None, self.undo)])
else:
self.undoactions.add_actions([
('Undo', 'edit-undo', _('_Undo'),
'<PRIMARY>z', None, self.undo)])
self.undoactions.set_sensitive(False)
self.uimanager.insert_action_group(self.undoactions, 1)
def __change_redo_label(self, label):
"""
Change the REDO label
"""
self.uimanager.remove_action_group(self.redoactions)
self.redoactions = Gtk.ActionGroup(name='Redo')
if label:
self.redoactions.add_actions([
('Redo', 'edit-redo', label, '<shift><PRIMARY>z',
None, self.redo)])
else:
self.redoactions.add_actions([
('Redo', 'edit-undo', _('_Redo'),
'<shift><PRIMARY>z', None, self.redo)])
self.redoactions.set_sensitive(False)
self.uimanager.insert_action_group(self.redoactions, 1)
def undo_history_update(self):
"""
This function is called to update both the state of
the Undo History menu item (enable/disable) and
the contents of the Undo History window.
"""
try:
# Try updating undo history window if it exists
self.undo_history_window.update()
except AttributeError:
# Let it go: history window does not exist
return
def undo_history_close(self):
"""
Closes the undo history
"""
try:
# Try closing undo history window if it exists
if self.undo_history_window.opened:
self.undo_history_window.close()
except AttributeError:
# Let it go: history window does not exist
return
def quick_backup(self, obj):
"""
Make a quick XML back with or without media.
"""
try:
QuickBackup(self.dbstate, self.uistate, self.user)
except WindowActiveError:
return
def autobackup(self):
"""
Backup the current family tree.
"""
if self.dbstate.db.is_open() and self.dbstate.db.has_changed:
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
self.uistate.push_message(self.dbstate, _("Autobackup..."))
try:
self.__backup()
except DbWriteFailure as msg:
self.uistate.push_message(self.dbstate,
_("Error saving backup data"))
self.uistate.set_busy_cursor(False)
self.uistate.progress.hide()
def __backup(self):
"""
Backup database to a Gramps XML file.
"""
from gramps.plugins.export.exportxml import XmlWriter
backup_path = config.get('database.backup-path')
compress = config.get('database.compress-backup')
writer = XmlWriter(self.dbstate.db, self.user, strip_photos=0,
compress=compress)
timestamp = '{0:%Y-%m-%d-%H-%M-%S}'.format(datetime.datetime.now())
backup_name = "%s-%s.gramps" % (self.dbstate.db.get_dbname(),
timestamp)
filename = os.path.join(backup_path, backup_name)
writer.write(filename)
def reports_clicked(self, obj):
"""
Displays the Reports dialog
"""
try:
ReportPluginDialog(self.dbstate, self.uistate, [])
except WindowActiveError:
return
def tools_clicked(self, obj):
"""
Displays the Tools dialog
"""
try:
ToolPluginDialog(self.dbstate, self.uistate, [])
except WindowActiveError:
return
def clipboard(self, obj):
"""
Displays the Clipboard
"""
from .clipboard import ClipboardWindow
try:
ClipboardWindow(self.dbstate, self.uistate)
except WindowActiveError:
return
def config_view(self, obj):
"""
Displays the configuration dialog for the active view
"""
self.active_page.configure()
def undo(self, obj):
"""
Calls the undo function on the database
"""
self.uistate.set_busy_cursor(True)
self.dbstate.db.undo()
self.uistate.set_busy_cursor(False)
def redo(self, obj):
"""
Calls the redo function on the database
"""
self.uistate.set_busy_cursor(True)
self.dbstate.db.redo()
self.uistate.set_busy_cursor(False)
def undo_history(self, obj):
"""
Displays the Undo history window
"""
try:
self.undo_history_window = UndoHistory(self.dbstate, self.uistate)
except WindowActiveError:
return
def export_data(self, obj):
"""
Calls the ExportAssistant to export data
"""
if self.dbstate.is_open():
from .plug.export import ExportAssistant
try:
ExportAssistant(self.dbstate, self.uistate)
except WindowActiveError:
return
def __rebuild_report_and_tool_menus(self):
"""
Callback that rebuilds the tools and reports menu
"""
self.__build_tools_menu(self._pmgr.get_reg_tools())
self.__build_report_menu(self._pmgr.get_reg_reports())
self.uistate.set_relationship_class()
def __build_tools_menu(self, tool_menu_list):
"""
Builds a new tools menu
"""
if self.toolactions:
self.uistate.uimanager.remove_action_group(self.toolactions)
self.uistate.uimanager.remove_ui(self.tool_menu_ui_id)
self.toolactions = Gtk.ActionGroup(name='ToolWindow')
(uidef, actions) = self.build_plugin_menu(
'ToolsMenu', tool_menu_list, tool.tool_categories,
make_plugin_callback)
self.toolactions.add_actions(actions)
self.tool_menu_ui_id = self.uistate.uimanager.add_ui_from_string(uidef)
self.uimanager.insert_action_group(self.toolactions, 1)
self.uistate.uimanager.ensure_update()
def __build_report_menu(self, report_menu_list):
"""
Builds a new reports menu
"""
if self.reportactions:
self.uistate.uimanager.remove_action_group(self.reportactions)
self.uistate.uimanager.remove_ui(self.report_menu_ui_id)
self.reportactions = Gtk.ActionGroup(name='ReportWindow')
(udef, actions) = self.build_plugin_menu(
'ReportsMenu', report_menu_list, standalone_categories,
make_plugin_callback)
self.reportactions.add_actions(actions)
self.report_menu_ui_id = self.uistate.uimanager.add_ui_from_string(udef)
self.uimanager.insert_action_group(self.reportactions, 1)
self.uistate.uimanager.ensure_update()
def build_plugin_menu(self, text, item_list, categories, func):
"""
Builds a new XML description for a menu based on the list of plugindata
"""
actions = []
ofile = StringIO()
ofile.write('<ui><menubar name="MenuBar"><menu action="%s">'
'<placeholder name="%s">' % (text, 'P_'+ text))
menu = Gtk.Menu()
menu.show()
hash_data = defaultdict(list)
for pdata in item_list:
if not pdata.supported:
category = _UNSUPPORTED
else:
category = categories[pdata.category]
hash_data[category].append(pdata)
# Sort categories, skipping the unsupported
catlist = sorted(item for item in hash_data if item != _UNSUPPORTED)
for key in catlist:
new_key = key[0].replace(' ', '-')
ofile.write('<menu action="%s">' % new_key)
actions.append((new_key, None, key[1]))
pdatas = hash_data[key]
pdatas.sort(key=lambda x: x.name)
for pdata in pdatas:
new_key = pdata.id.replace(' ', '-')
menu_name = ("%s...") % pdata.name
ofile.write('<menuitem action="%s"/>' % new_key)
actions.append((new_key, None, menu_name, None, None,
func(pdata, self.dbstate, self.uistate)))
ofile.write('</menu>')
# If there are any unsupported items we add separator
# and the unsupported category at the end of the menu
if _UNSUPPORTED in hash_data:
ofile.write('<separator/>')
ofile.write('<menu action="%s">' % _UNSUPPORTED[0])
actions.append((_UNSUPPORTED[0], None, _UNSUPPORTED[1]))
pdatas = hash_data[_UNSUPPORTED]
pdatas.sort(key=lambda x: x.name)
for pdata in pdatas:
new_key = pdata.id.replace(' ', '-')
menu_name = ("%s...") % pdata.name
ofile.write('<menuitem action="%s"/>' % new_key)
actions.append((new_key, None, menu_name, None, None,
func(pdata, self.dbstate, self.uistate)))
ofile.write('</menu>')
ofile.write('</placeholder></menu></menubar></ui>')
return (ofile.getvalue(), actions)
def display_about_box(self, obj):
"""Display the About box."""
about = GrampsAboutDialog(self.uistate.window)
about.run()
about.destroy()
def get_available_views(self):
"""
Query the views and determine what views to show and in which order
:Returns: a list of lists containing tuples (view_id, viewclass)
"""
pmgr = GuiPluginManager.get_instance()
view_list = pmgr.get_reg_views()
viewstoshow = defaultdict(list)
for pdata in view_list:
mod = pmgr.load_plugin(pdata)
if not mod or not hasattr(mod, pdata.viewclass):
#import of plugin failed
try:
lasterror = pmgr.get_fail_list()[-1][1][1]
except:
lasterror = '*** No error found, '
lasterror += 'probably error in gpr.py file ***'
ErrorDialog(
_('Failed Loading View'),
_('The view %(name)s did not load and reported an error.'
'\n\n%(error_msg)s\n\n'
'If you are unable to fix the fault yourself then you '
'can submit a bug at %(gramps_bugtracker_url)s '
'or contact the view author (%(firstauthoremail)s).\n\n'
'If you do not want Gramps to try and load this view '
'again, you can hide it by using the Plugin Manager '
'on the Help menu.'
) % {'name': pdata.name,
'gramps_bugtracker_url': URL_BUGHOME,
'firstauthoremail': pdata.authors_email[0]
if pdata.authors_email else '...',
'error_msg': lasterror},
parent=self.uistate.window)
continue
viewclass = getattr(mod, pdata.viewclass)
# pdata.category is (string, trans-string):
if pdata.order == START:
viewstoshow[pdata.category[0]].insert(0, (pdata, viewclass))
else:
viewstoshow[pdata.category[0]].append((pdata, viewclass))
# First, get those in order defined, if exists:
resultorder = [viewstoshow[cat]
for cat in config.get("interface.view-categories")
if cat in viewstoshow]
# Next, get the rest in some order:
resultorder.extend(viewstoshow[cat]
for cat in sorted(viewstoshow.keys())
if viewstoshow[cat] not in resultorder)
return resultorder
def key_bindings(obj):
"""
Display key bindings
"""
display_help(webpage=WIKI_HELP_PAGE_KEY)
def manual_activate(obj):
"""
Display the GRAMPS manual
"""
display_help(webpage=WIKI_HELP_PAGE_MAN)
def report_bug_activate(obj):
"""
Display the bug tracker web site
"""
display_url(URL_BUGTRACKER)
def home_page_activate(obj):
"""
Display the GRAMPS home page
"""
display_url(URL_HOMEPAGE)
def mailing_lists_activate(obj):
"""
Display the mailing list web page
"""
display_url(URL_MAILINGLIST)
def extra_plugins_activate(obj):
"""
Display the wiki page with extra plugins
"""
display_url(URL_WIKISTRING+WIKI_EXTRAPLUGINS)
def faq_activate(obj):
"""
Display FAQ
"""
display_help(webpage=WIKI_HELP_PAGE_FAQ)
def run_plugin(pdata, dbstate, uistate):
"""
run a plugin based on it's PluginData:
1/ load plugin.
2/ the report is run
"""
pmgr = GuiPluginManager.get_instance()
mod = pmgr.load_plugin(pdata)
if not mod:
#import of plugin failed
failed = pmgr.get_fail_list()
if failed:
error_msg = failed[-1][1][1]
else:
error_msg = "(no error message)"
ErrorDialog(
_('Failed Loading Plugin'),
_('The plugin %(name)s did not load and reported an error.\n\n'
'%(error_msg)s\n\n'
'If you are unable to fix the fault yourself then you can '
'submit a bug at %(gramps_bugtracker_url)s or contact '
'the plugin author (%(firstauthoremail)s).\n\n'
'If you do not want Gramps to try and load this plugin again, '
'you can hide it by using the Plugin Manager on the '
'Help menu.') % {'name' : pdata.name,
'gramps_bugtracker_url' : URL_BUGHOME,
'firstauthoremail' : pdata.authors_email[0]
if pdata.authors_email
else '...',
'error_msg' : error_msg},
parent=uistate.window)
return
if pdata.ptype == REPORT:
report(dbstate, uistate, uistate.get_active('Person'),
getattr(mod, pdata.reportclass),
getattr(mod, pdata.optionclass),
pdata.name, pdata.id,
pdata.category, pdata.require_active)
else:
tool.gui_tool(dbstate=dbstate, user=User(uistate=uistate),
tool_class=getattr(mod, pdata.toolclass),
options_class=getattr(mod, pdata.optionclass),
translated_name=pdata.name,
name=pdata.id,
category=pdata.category,
callback=dbstate.db.request_rebuild)
def make_plugin_callback(pdata, dbstate, uistate):
"""
Makes a callback for a report/tool menu item
"""
return lambda x: run_plugin(pdata, dbstate, uistate)
def views_to_show(views, use_last=True):
"""
Determine based on preference setting which views should be shown
"""
current_cat = 0
current_cat_view = 0
default_cat_views = [0] * len(views)
if use_last:
current_page_id = config.get('preferences.last-view')
default_page_ids = config.get('preferences.last-views')
found = False
for indexcat, cat_views in enumerate(views):
cat_view = 0
for pdata, page_def in cat_views:
if not found:
if pdata.id == current_page_id:
current_cat = indexcat
current_cat_view = cat_view
default_cat_views[indexcat] = cat_view
found = True
break
if pdata.id in default_page_ids:
default_cat_views[indexcat] = cat_view
cat_view += 1
if not found:
current_cat = 0
current_cat_view = 0
return current_cat, current_cat_view, default_cat_views
class QuickBackup(ManagedWindow): # TODO move this class into its own module
def __init__(self, dbstate, uistate, user):
"""
Make a quick XML back with or without media.
"""
self.dbstate = dbstate
self.user = user
ManagedWindow.__init__(self, uistate, [], self.__class__)
window = Gtk.Dialog('',
self.uistate.window,
Gtk.DialogFlags.DESTROY_WITH_PARENT, None)
self.set_window(window, None, _("Gramps XML Backup"))
self.setup_configs('interface.quick-backup', 500, 150)
close_button = window.add_button(_('_Close'),
Gtk.ResponseType.CLOSE)
ok_button = window.add_button(_('_OK'),
Gtk.ResponseType.APPLY)
vbox = window.get_content_area()
hbox = Gtk.Box()
label = Gtk.Label(label=_("Path:"))
label.set_justify(Gtk.Justification.LEFT)
label.set_size_request(90, -1)
label.set_halign(Gtk.Align.START)
hbox.pack_start(label, False, True, 0)
path_entry = Gtk.Entry()
dirtext = config.get('paths.quick-backup-directory')
path_entry.set_text(dirtext)
hbox.pack_start(path_entry, True, True, 0)
file_entry = Gtk.Entry()
button = Gtk.Button()
button.connect("clicked",
lambda widget:
self.select_backup_path(widget, path_entry))
image = Gtk.Image()
image.set_from_icon_name('document-open', Gtk.IconSize.BUTTON)
image.show()
button.add(image)
hbox.pack_end(button, False, True, 0)
vbox.pack_start(hbox, False, True, 0)
hbox = Gtk.Box()
label = Gtk.Label(label=_("File:"))
label.set_justify(Gtk.Justification.LEFT)
label.set_size_request(90, -1)
label.set_halign(Gtk.Align.START)
hbox.pack_start(label, False, True, 0)
struct_time = time.localtime()
file_entry.set_text(
config.get('paths.quick-backup-filename'
) % {"filename": self.dbstate.db.get_dbname(),
"year": struct_time.tm_year,
"month": struct_time.tm_mon,
"day": struct_time.tm_mday,
"hour": struct_time.tm_hour,
"minutes": struct_time.tm_min,
"seconds": struct_time.tm_sec,
"extension": "gpkg"})
hbox.pack_end(file_entry, True, True, 0)
vbox.pack_start(hbox, False, True, 0)
hbox = Gtk.Box()
fbytes = 0
mbytes = "0"
for media in self.dbstate.db.iter_media():
fullname = media_path_full(self.dbstate.db, media.get_path())
try:
fbytes += posixpath.getsize(fullname)
length = len(str(fbytes))
if fbytes <= 999999:
mbytes = "< 1"
else:
mbytes = str(fbytes)[:(length-6)]
except OSError:
pass
label = Gtk.Label(label=_("Media:"))
label.set_justify(Gtk.Justification.LEFT)
label.set_size_request(90, -1)
label.set_halign(Gtk.Align.START)
hbox.pack_start(label, False, True, 0)
include = Gtk.RadioButton.new_with_mnemonic_from_widget(
None, "%s (%s %s)" % (_("Include"),
mbytes, _("Megabyte|MB")))
exclude = Gtk.RadioButton.new_with_mnemonic_from_widget(include,
_("Exclude"))
include.connect("toggled", lambda widget: self.media_toggle(widget,
file_entry))
include_mode = config.get('preferences.quick-backup-include-mode')
if include_mode:
include.set_active(True)
else:
exclude.set_active(True)
hbox.pack_start(include, False, True, 0)
hbox.pack_end(exclude, False, True, 0)
vbox.pack_start(hbox, False, True, 0)
self.show()
dbackup = window.run()
if dbackup == Gtk.ResponseType.APPLY:
# if file exists, ask if overwrite; else abort
basefile = file_entry.get_text()
basefile = basefile.replace("/", r"-")
filename = os.path.join(path_entry.get_text(), basefile)
if os.path.exists(filename):
question = QuestionDialog2(
_("Backup file already exists! Overwrite?"),
_("The file '%s' exists.") % filename,
_("Proceed and overwrite"),
_("Cancel the backup"),
parent=self.window)
yes_no = question.run()
if not yes_no:
current_dir = path_entry.get_text()
if current_dir != dirtext:
config.set('paths.quick-backup-directory', current_dir)
self.close()
return
position = self.window.get_position() # crock
window.hide()
self.window.move(position[0], position[1])
self.uistate.set_busy_cursor(True)
self.uistate.pulse_progressbar(0)
self.uistate.progress.show()
self.uistate.push_message(self.dbstate, _("Making backup..."))
if include.get_active():
from gramps.plugins.export.exportpkg import PackageWriter
writer = PackageWriter(self.dbstate.db, filename, self.user)
writer.export()
else:
from gramps.plugins.export.exportxml import XmlWriter
writer = XmlWriter(self.dbstate.db, self.user,
strip_photos=0, compress=1)
writer.write(filename)
self.uistate.set_busy_cursor(False)
self.uistate.progress.hide()
self.uistate.push_message(self.dbstate,
_("Backup saved to '%s'") % filename)
config.set('paths.quick-backup-directory', path_entry.get_text())
else:
self.uistate.push_message(self.dbstate, _("Backup aborted"))
self.close()
def select_backup_path(self, widget, path_entry):
"""
Choose a backup folder. Make sure there is one highlighted in
right pane, otherwise FileChooserDialog will hang.
"""
fdialog = Gtk.FileChooserDialog(
title=_("Select backup directory"),
parent=self.window,
action=Gtk.FileChooserAction.SELECT_FOLDER,
buttons=(_('_Cancel'),
Gtk.ResponseType.CANCEL,
_('_Apply'),
Gtk.ResponseType.OK))
mpath = path_entry.get_text()
if not mpath:
mpath = HOME_DIR
fdialog.set_current_folder(os.path.dirname(mpath))
fdialog.set_filename(os.path.join(mpath, "."))
status = fdialog.run()
if status == Gtk.ResponseType.OK:
filename = fdialog.get_filename()
if filename:
path_entry.set_text(filename)
fdialog.destroy()
return True
def media_toggle(self, widget, file_entry):
"""
Toggles media include values in the quick backup dialog.
"""
include = widget.get_active()
config.set('preferences.quick-backup-include-mode', include)
extension = "gpkg" if include else "gramps"
filename = file_entry.get_text()
if "." in filename:
base, ext = filename.rsplit(".", 1)
file_entry.set_text("%s.%s" % (base, extension))
else:
file_entry.set_text("%s.%s" % (filename, extension))
| beernarrd/gramps | gramps/gui/viewmanager.py | Python | gpl-2.0 | 69,222 | [
"Brian"
] | 92bbe76f0d1816501e89a28861c283f7e3f5921769f408cf595719244958a3fe |
"""
Generating lines of code.
"""
from functools import partial, wraps
import sys
from typing import Collection, Iterator, List, Optional, Set, Union
from black.nodes import WHITESPACE, RARROW, STATEMENT, STANDALONE_COMMENT
from black.nodes import ASSIGNMENTS, OPENING_BRACKETS, CLOSING_BRACKETS
from black.nodes import Visitor, syms, is_arith_like, ensure_visible
from black.nodes import is_docstring, is_empty_tuple, is_one_tuple, is_one_tuple_between
from black.nodes import is_name_token, is_lpar_token, is_rpar_token
from black.nodes import is_walrus_assignment, is_yield, is_vararg, is_multiline_string
from black.nodes import is_stub_suite, is_stub_body, is_atom_with_invisible_parens
from black.nodes import wrap_in_parentheses
from black.brackets import max_delimiter_priority_in_atom
from black.brackets import DOT_PRIORITY, COMMA_PRIORITY
from black.lines import Line, line_to_string, is_line_short_enough
from black.lines import can_omit_invisible_parens, can_be_split, append_leaves
from black.comments import generate_comments, list_comments, FMT_OFF
from black.numerics import normalize_numeric_literal
from black.strings import get_string_prefix, fix_docstring
from black.strings import normalize_string_prefix, normalize_string_quotes
from black.trans import Transformer, CannotTransform, StringMerger, StringSplitter
from black.trans import StringParenWrapper, StringParenStripper, hug_power_op
from black.mode import Mode, Feature, Preview
from blib2to3.pytree import Node, Leaf
from blib2to3.pgen2 import token
# types
LeafID = int
LN = Union[Leaf, Node]
class CannotSplit(CannotTransform):
"""A readable split that fits the allotted line length is impossible."""
# This isn't a dataclass because @dataclass + Generic breaks mypyc.
# See also https://github.com/mypyc/mypyc/issues/827.
class LineGenerator(Visitor[Line]):
"""Generates reformatted Line objects. Empty lines are not emitted.
Note: destroys the tree it's visiting by mutating prefixes of its leaves
in ways that will no longer stringify to valid Python code on the tree.
"""
def __init__(self, mode: Mode) -> None:
self.mode = mode
self.current_line: Line
self.__post_init__()
def line(self, indent: int = 0) -> Iterator[Line]:
"""Generate a line.
If the line is empty, only emit if it makes sense.
If the line is too long, split it first and then generate.
If any lines were generated, set up a new current_line.
"""
if not self.current_line:
self.current_line.depth += indent
return # Line is empty, don't emit. Creating a new one unnecessary.
complete_line = self.current_line
self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent)
yield complete_line
def visit_default(self, node: LN) -> Iterator[Line]:
"""Default `visit_*()` implementation. Recurses to children of `node`."""
if isinstance(node, Leaf):
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
for comment in generate_comments(node):
if any_open_brackets:
# any comment within brackets is subject to splitting
self.current_line.append(comment)
elif comment.type == token.COMMENT:
# regular trailing comment
self.current_line.append(comment)
yield from self.line()
else:
# regular standalone comment
yield from self.line()
self.current_line.append(comment)
yield from self.line()
normalize_prefix(node, inside_brackets=any_open_brackets)
if self.mode.string_normalization and node.type == token.STRING:
node.value = normalize_string_prefix(node.value)
node.value = normalize_string_quotes(node.value)
if node.type == token.NUMBER:
normalize_numeric_literal(node)
if node.type not in WHITESPACE:
self.current_line.append(node)
yield from super().visit_default(node)
def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
"""Increase indentation level, maybe yield a line."""
# In blib2to3 INDENT never holds comments.
yield from self.line(+1)
yield from self.visit_default(node)
def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
"""Decrease indentation level, maybe yield a line."""
# The current line might still wait for trailing comments. At DEDENT time
# there won't be any (they would be prefixes on the preceding NEWLINE).
# Emit the line then.
yield from self.line()
# While DEDENT has no value, its prefix may contain standalone comments
# that belong to the current indentation level. Get 'em.
yield from self.visit_default(node)
# Finally, emit the dedent.
yield from self.line(-1)
def visit_stmt(
self, node: Node, keywords: Set[str], parens: Set[str]
) -> Iterator[Line]:
"""Visit a statement.
This implementation is shared for `if`, `while`, `for`, `try`, `except`,
`def`, `with`, `class`, `assert`, and assignments.
The relevant Python language `keywords` for a given statement will be
NAME leaves within it. This methods puts those on a separate line.
`parens` holds a set of string leaf values immediately after which
invisible parens should be put.
"""
normalize_invisible_parens(node, parens_after=parens)
for child in node.children:
if is_name_token(child) and child.value in keywords:
yield from self.line()
yield from self.visit(child)
def visit_match_case(self, node: Node) -> Iterator[Line]:
"""Visit either a match or case statement."""
normalize_invisible_parens(node, parens_after=set())
yield from self.line()
for child in node.children:
yield from self.visit(child)
def visit_suite(self, node: Node) -> Iterator[Line]:
"""Visit a suite."""
if self.mode.is_pyi and is_stub_suite(node):
yield from self.visit(node.children[2])
else:
yield from self.visit_default(node)
def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
"""Visit a statement without nested statements."""
prev_type: Optional[int] = None
for child in node.children:
if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child):
wrap_in_parentheses(node, child, visible=False)
prev_type = child.type
is_suite_like = node.parent and node.parent.type in STATEMENT
if is_suite_like:
if self.mode.is_pyi and is_stub_body(node):
yield from self.visit_default(node)
else:
yield from self.line(+1)
yield from self.visit_default(node)
yield from self.line(-1)
else:
if (
not self.mode.is_pyi
or not node.parent
or not is_stub_suite(node.parent)
):
yield from self.line()
yield from self.visit_default(node)
def visit_async_stmt(self, node: Node) -> Iterator[Line]:
"""Visit `async def`, `async for`, `async with`."""
yield from self.line()
children = iter(node.children)
for child in children:
yield from self.visit(child)
if child.type == token.ASYNC:
break
internal_stmt = next(children)
for child in internal_stmt.children:
yield from self.visit(child)
def visit_decorators(self, node: Node) -> Iterator[Line]:
"""Visit decorators."""
for child in node.children:
yield from self.line()
yield from self.visit(child)
def visit_power(self, node: Node) -> Iterator[Line]:
for idx, leaf in enumerate(node.children[:-1]):
next_leaf = node.children[idx + 1]
if not isinstance(leaf, Leaf):
continue
value = leaf.value.lower()
if (
leaf.type == token.NUMBER
and next_leaf.type == syms.trailer
# Ensure that we are in an attribute trailer
and next_leaf.children[0].type == token.DOT
# It shouldn't wrap hexadecimal, binary and octal literals
and not value.startswith(("0x", "0b", "0o"))
# It shouldn't wrap complex literals
and "j" not in value
):
wrap_in_parentheses(node, leaf)
yield from self.visit_default(node)
def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
"""Remove a semicolon and put the other statement on a separate line."""
yield from self.line()
def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
"""End of file. Process outstanding comments and end with a newline."""
yield from self.visit_default(leaf)
yield from self.line()
def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
if not self.current_line.bracket_tracker.any_open_brackets():
yield from self.line()
yield from self.visit_default(leaf)
def visit_factor(self, node: Node) -> Iterator[Line]:
"""Force parentheses between a unary op and a binary power:
-2 ** 8 -> -(2 ** 8)
"""
_operator, operand = node.children
if (
operand.type == syms.power
and len(operand.children) == 3
and operand.children[1].type == token.DOUBLESTAR
):
lpar = Leaf(token.LPAR, "(")
rpar = Leaf(token.RPAR, ")")
index = operand.remove() or 0
node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
yield from self.visit_default(node)
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
if is_docstring(leaf) and "\\\n" not in leaf.value:
# We're ignoring docstrings with backslash newline escapes because changing
# indentation of those changes the AST representation of the code.
docstring = normalize_string_prefix(leaf.value)
prefix = get_string_prefix(docstring)
docstring = docstring[len(prefix) :] # Remove the prefix
quote_char = docstring[0]
# A natural way to remove the outer quotes is to do:
# docstring = docstring.strip(quote_char)
# but that breaks on """""x""" (which is '""x').
# So we actually need to remove the first character and the next two
# characters but only if they are the same as the first.
quote_len = 1 if docstring[1] != quote_char else 3
docstring = docstring[quote_len:-quote_len]
docstring_started_empty = not docstring
if is_multiline_string(leaf):
indent = " " * 4 * self.current_line.depth
docstring = fix_docstring(docstring, indent)
else:
docstring = docstring.strip()
if docstring:
# Add some padding if the docstring starts / ends with a quote mark.
if docstring[0] == quote_char:
docstring = " " + docstring
if docstring[-1] == quote_char:
docstring += " "
if docstring[-1] == "\\":
backslash_count = len(docstring) - len(docstring.rstrip("\\"))
if backslash_count % 2:
# Odd number of tailing backslashes, add some padding to
# avoid escaping the closing string quote.
docstring += " "
elif not docstring_started_empty:
docstring = " "
# We could enforce triple quotes at this point.
quote = quote_char * quote_len
leaf.value = prefix + quote + docstring + quote
yield from self.visit_default(leaf)
def __post_init__(self) -> None:
"""You are in a twisty little maze of passages."""
self.current_line = Line(mode=self.mode)
v = self.visit_stmt
Ø: Set[str] = set()
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
self.visit_if_stmt = partial(
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
)
self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
self.visit_try_stmt = partial(
v, keywords={"try", "except", "else", "finally"}, parens=Ø
)
self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
self.visit_async_funcdef = self.visit_async_stmt
self.visit_decorated = self.visit_decorators
# PEP 634
self.visit_match_stmt = self.visit_match_case
self.visit_case_block = self.visit_match_case
def transform_line(
line: Line, mode: Mode, features: Collection[Feature] = ()
) -> Iterator[Line]:
"""Transform a `line`, potentially splitting it into many lines.
They should fit in the allotted `line_length` but might not be able to.
`features` are syntactical features that may be used in the output.
"""
if line.is_comment:
yield line
return
line_str = line_to_string(line)
ll = mode.line_length
sn = mode.string_normalization
string_merge = StringMerger(ll, sn)
string_paren_strip = StringParenStripper(ll, sn)
string_split = StringSplitter(ll, sn)
string_paren_wrap = StringParenWrapper(ll, sn)
transformers: List[Transformer]
if (
not line.contains_uncollapsable_type_comments()
and not line.should_split_rhs
and not line.magic_trailing_comma
and (
is_line_short_enough(line, line_length=mode.line_length, line_str=line_str)
or line.contains_unsplittable_type_ignore()
)
and not (line.inside_brackets and line.contains_standalone_comments())
):
# Only apply basic string preprocessing, since lines shouldn't be split here.
if Preview.string_processing in mode:
transformers = [string_merge, string_paren_strip]
else:
transformers = []
elif line.is_def:
transformers = [left_hand_split]
else:
def _rhs(
self: object, line: Line, features: Collection[Feature]
) -> Iterator[Line]:
"""Wraps calls to `right_hand_split`.
The calls increasingly `omit` right-hand trailers (bracket pairs with
content), meaning the trailers get glued together to split on another
bracket pair instead.
"""
for omit in generate_trailers_to_omit(line, mode.line_length):
lines = list(
right_hand_split(line, mode.line_length, features, omit=omit)
)
# Note: this check is only able to figure out if the first line of the
# *current* transformation fits in the line length. This is true only
# for simple cases. All others require running more transforms via
# `transform_line()`. This check doesn't know if those would succeed.
if is_line_short_enough(lines[0], line_length=mode.line_length):
yield from lines
return
# All splits failed, best effort split with no omits.
# This mostly happens to multiline strings that are by definition
# reported as not fitting a single line, as well as lines that contain
# trailing commas (those have to be exploded).
yield from right_hand_split(
line, line_length=mode.line_length, features=features
)
# HACK: nested functions (like _rhs) compiled by mypyc don't retain their
# __name__ attribute which is needed in `run_transformer` further down.
# Unfortunately a nested class breaks mypyc too. So a class must be created
# via type ... https://github.com/mypyc/mypyc/issues/884
rhs = type("rhs", (), {"__call__": _rhs})()
if Preview.string_processing in mode:
if line.inside_brackets:
transformers = [
string_merge,
string_paren_strip,
string_split,
delimiter_split,
standalone_comment_split,
string_paren_wrap,
rhs,
]
else:
transformers = [
string_merge,
string_paren_strip,
string_split,
string_paren_wrap,
rhs,
]
else:
if line.inside_brackets:
transformers = [delimiter_split, standalone_comment_split, rhs]
else:
transformers = [rhs]
# It's always safe to attempt hugging of power operations and pretty much every line
# could match.
transformers.append(hug_power_op)
for transform in transformers:
# We are accumulating lines in `result` because we might want to abort
# mission and return the original line in the end, or attempt a different
# split altogether.
try:
result = run_transformer(line, transform, mode, features, line_str=line_str)
except CannotTransform:
continue
else:
yield from result
break
else:
yield line
def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]:
"""Split line into many lines, starting with the first matching bracket pair.
Note: this usually looks weird, only use this for function definitions.
Prefer RHS otherwise. This is why this function is not symmetrical with
:func:`right_hand_split` which also handles optional parentheses.
"""
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = head_leaves
matching_bracket: Optional[Leaf] = None
for leaf in line.leaves:
if (
current_leaves is body_leaves
and leaf.type in CLOSING_BRACKETS
and leaf.opening_bracket is matching_bracket
):
current_leaves = tail_leaves if body_leaves else head_leaves
current_leaves.append(leaf)
if current_leaves is head_leaves:
if leaf.type in OPENING_BRACKETS:
matching_bracket = leaf
current_leaves = body_leaves
if not matching_bracket:
raise CannotSplit("No brackets found")
head = bracket_split_build_line(head_leaves, line, matching_bracket)
body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
bracket_split_succeeded_or_raise(head, body, tail)
for result in (head, body, tail):
if result:
yield result
def right_hand_split(
line: Line,
line_length: int,
features: Collection[Feature] = (),
omit: Collection[LeafID] = (),
) -> Iterator[Line]:
"""Split line into many lines, starting with the last matching bracket pair.
If the split was by optional parentheses, attempt splitting without them, too.
`omit` is a collection of closing bracket IDs that shouldn't be considered for
this split.
Note: running this function modifies `bracket_depth` on the leaves of `line`.
"""
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = tail_leaves
opening_bracket: Optional[Leaf] = None
closing_bracket: Optional[Leaf] = None
for leaf in reversed(line.leaves):
if current_leaves is body_leaves:
if leaf is opening_bracket:
current_leaves = head_leaves if body_leaves else tail_leaves
current_leaves.append(leaf)
if current_leaves is tail_leaves:
if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
current_leaves = body_leaves
if not (opening_bracket and closing_bracket and head_leaves):
# If there is no opening or closing_bracket that means the split failed and
# all content is in the tail. Otherwise, if `head_leaves` are empty, it means
# the matching `opening_bracket` wasn't available on `line` anymore.
raise CannotSplit("No brackets found")
tail_leaves.reverse()
body_leaves.reverse()
head_leaves.reverse()
head = bracket_split_build_line(head_leaves, line, opening_bracket)
body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
bracket_split_succeeded_or_raise(head, body, tail)
if (
Feature.FORCE_OPTIONAL_PARENTHESES not in features
# the opening bracket is an optional paren
and opening_bracket.type == token.LPAR
and not opening_bracket.value
# the closing bracket is an optional paren
and closing_bracket.type == token.RPAR
and not closing_bracket.value
# it's not an import (optional parens are the only thing we can split on
# in this case; attempting a split without them is a waste of time)
and not line.is_import
# there are no standalone comments in the body
and not body.contains_standalone_comments(0)
# and we can actually remove the parens
and can_omit_invisible_parens(body, line_length)
):
omit = {id(closing_bracket), *omit}
try:
yield from right_hand_split(line, line_length, features=features, omit=omit)
return
except CannotSplit as e:
if not (
can_be_split(body)
or is_line_short_enough(body, line_length=line_length)
):
raise CannotSplit(
"Splitting failed, body is still too long and can't be split."
) from e
elif head.contains_multiline_strings() or tail.contains_multiline_strings():
raise CannotSplit(
"The current optional pair of parentheses is bound to fail to"
" satisfy the splitting algorithm because the head or the tail"
" contains multiline strings which by definition never fit one"
" line."
) from e
ensure_visible(opening_bracket)
ensure_visible(closing_bracket)
for result in (head, body, tail):
if result:
yield result
def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
"""Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
Do nothing otherwise.
A left- or right-hand split is based on a pair of brackets. Content before
(and including) the opening bracket is left on one line, content inside the
brackets is put on a separate line, and finally content starting with and
following the closing bracket is put on a separate line.
Those are called `head`, `body`, and `tail`, respectively. If the split
produced the same line (all content in `head`) or ended up with an empty `body`
and the `tail` is just the closing bracket, then it's considered failed.
"""
tail_len = len(str(tail).strip())
if not body:
if tail_len == 0:
raise CannotSplit("Splitting brackets produced the same line")
elif tail_len < 3:
raise CannotSplit(
f"Splitting brackets on an empty body to save {tail_len} characters is"
" not worth it"
)
def bracket_split_build_line(
leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
) -> Line:
"""Return a new line with given `leaves` and respective comments from `original`.
If `is_body` is True, the result line is one-indented inside brackets and as such
has its first leaf's prefix normalized and a trailing comma added when expected.
"""
result = Line(mode=original.mode, depth=original.depth)
if is_body:
result.inside_brackets = True
result.depth += 1
if leaves:
# Since body is a new indent level, remove spurious leading whitespace.
normalize_prefix(leaves[0], inside_brackets=True)
# Ensure a trailing comma for imports and standalone function arguments, but
# be careful not to add one after any comments or within type annotations.
no_commas = (
original.is_def
and opening_bracket.value == "("
and not any(leaf.type == token.COMMA for leaf in leaves)
# In particular, don't add one within a parenthesized return annotation.
# Unfortunately the indicator we're in a return annotation (RARROW) may
# be defined directly in the parent node, the parent of the parent ...
# and so on depending on how complex the return annotation is.
# This isn't perfect and there's some false negatives but they are in
# contexts were a comma is actually fine.
and not any(
node.prev_sibling.type == RARROW
for node in (
leaves[0].parent,
getattr(leaves[0].parent, "parent", None),
)
if isinstance(node, Node) and isinstance(node.prev_sibling, Leaf)
)
)
if original.is_import or no_commas:
for i in range(len(leaves) - 1, -1, -1):
if leaves[i].type == STANDALONE_COMMENT:
continue
if leaves[i].type != token.COMMA:
new_comma = Leaf(token.COMMA, ",")
leaves.insert(i + 1, new_comma)
break
# Populate the line
for leaf in leaves:
result.append(leaf, preformatted=True)
for comment_after in original.comments_after(leaf):
result.append(comment_after, preformatted=True)
if is_body and should_split_line(result, opening_bracket):
result.should_split_rhs = True
return result
def dont_increase_indentation(split_func: Transformer) -> Transformer:
"""Normalize prefix of the first leaf in every line returned by `split_func`.
This is a decorator over relevant split functions.
"""
@wraps(split_func)
def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
for line in split_func(line, features):
normalize_prefix(line.leaves[0], inside_brackets=True)
yield line
return split_wrapper
@dont_increase_indentation
def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
"""Split according to delimiters of the highest priority.
If the appropriate Features are given, the split will add trailing commas
also in function signatures and calls that contain `*` and `**`.
"""
try:
last_leaf = line.leaves[-1]
except IndexError:
raise CannotSplit("Line empty") from None
bt = line.bracket_tracker
try:
delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
except ValueError:
raise CannotSplit("No delimiters found") from None
if delimiter_priority == DOT_PRIORITY:
if bt.delimiter_count_with_priority(delimiter_priority) == 1:
raise CannotSplit("Splitting a single attribute from its owner looks wrong")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
lowest_depth = sys.maxsize
trailing_comma_safe = True
def append_to_line(leaf: Leaf) -> Iterator[Line]:
"""Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
for leaf in line.leaves:
yield from append_to_line(leaf)
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
lowest_depth = min(lowest_depth, leaf.bracket_depth)
if leaf.bracket_depth == lowest_depth:
if is_vararg(leaf, within={syms.typedargslist}):
trailing_comma_safe = (
trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
)
elif is_vararg(leaf, within={syms.arglist, syms.argument}):
trailing_comma_safe = (
trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
)
leaf_priority = bt.delimiters.get(id(leaf))
if leaf_priority == delimiter_priority:
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
if current_line:
if (
trailing_comma_safe
and delimiter_priority == COMMA_PRIORITY
and current_line.leaves[-1].type != token.COMMA
and current_line.leaves[-1].type != STANDALONE_COMMENT
):
new_comma = Leaf(token.COMMA, ",")
current_line.append(new_comma)
yield current_line
@dont_increase_indentation
def standalone_comment_split(
line: Line, features: Collection[Feature] = ()
) -> Iterator[Line]:
"""Split standalone comments from the rest of the line."""
if not line.contains_standalone_comments(0):
raise CannotSplit("Line does not have any standalone comments")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
def append_to_line(leaf: Leaf) -> Iterator[Line]:
"""Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
for leaf in line.leaves:
yield from append_to_line(leaf)
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
if current_line:
yield current_line
def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
"""Leave existing extra newlines if not `inside_brackets`. Remove everything
else.
Note: don't use backslashes for formatting or you'll lose your voting rights.
"""
if not inside_brackets:
spl = leaf.prefix.split("#")
if "\\" not in spl[0]:
nl_count = spl[-1].count("\n")
if len(spl) > 1:
nl_count -= 1
leaf.prefix = "\n" * nl_count
return
leaf.prefix = ""
def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
"""Make existing optional parentheses invisible or create new ones.
`parens_after` is a set of string leaf values immediately after which parens
should be put.
Standardizes on visible parentheses for single-element tuples, and keeps
existing visible parentheses for other tuples and generator expressions.
"""
for pc in list_comments(node.prefix, is_endmarker=False):
if pc.value in FMT_OFF:
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
return
check_lpar = False
for index, child in enumerate(list(node.children)):
# Fixes a bug where invisible parens are not properly stripped from
# assignment statements that contain type annotations.
if isinstance(child, Node) and child.type == syms.annassign:
normalize_invisible_parens(child, parens_after=parens_after)
# Add parentheses around long tuple unpacking in assignments.
if (
index == 0
and isinstance(child, Node)
and child.type == syms.testlist_star_expr
):
check_lpar = True
if check_lpar:
if child.type == syms.atom:
if maybe_make_parens_invisible_in_atom(child, parent=node):
wrap_in_parentheses(node, child, visible=False)
elif is_one_tuple(child):
wrap_in_parentheses(node, child, visible=True)
elif node.type == syms.import_from:
# "import from" nodes store parentheses directly as part of
# the statement
if is_lpar_token(child):
assert is_rpar_token(node.children[-1])
# make parentheses invisible
child.value = ""
node.children[-1].value = ""
elif child.type != token.STAR:
# insert invisible parentheses
node.insert_child(index, Leaf(token.LPAR, ""))
node.append_child(Leaf(token.RPAR, ""))
break
elif not (isinstance(child, Leaf) and is_multiline_string(child)):
wrap_in_parentheses(node, child, visible=False)
check_lpar = isinstance(child, Leaf) and child.value in parens_after
def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
"""If it's safe, make the parens in the atom `node` invisible, recursively.
Additionally, remove repeated, adjacent invisible parens from the atom `node`
as they are redundant.
Returns whether the node should itself be wrapped in invisible parentheses.
"""
if (
node.type != syms.atom
or is_empty_tuple(node)
or is_one_tuple(node)
or (is_yield(node) and parent.type != syms.expr_stmt)
or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
):
return False
if is_walrus_assignment(node):
if parent.type in [
syms.annassign,
syms.expr_stmt,
syms.assert_stmt,
syms.return_stmt,
# these ones aren't useful to end users, but they do please fuzzers
syms.for_stmt,
syms.del_stmt,
]:
return False
first = node.children[0]
last = node.children[-1]
if is_lpar_token(first) and is_rpar_token(last):
middle = node.children[1]
# make parentheses invisible
first.value = ""
last.value = ""
maybe_make_parens_invisible_in_atom(middle, parent=parent)
if is_atom_with_invisible_parens(middle):
# Strip the invisible parens from `middle` by replacing
# it with the child in-between the invisible parens
middle.replace(middle.children[1])
return False
return True
def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
"""Should `line` be immediately split with `delimiter_split()` after RHS?"""
if not (opening_bracket.parent and opening_bracket.value in "[{("):
return False
# We're essentially checking if the body is delimited by commas and there's more
# than one of them (we're excluding the trailing comma and if the delimiter priority
# is still commas, that means there's more).
exclude = set()
trailing_comma = False
try:
last_leaf = line.leaves[-1]
if last_leaf.type == token.COMMA:
trailing_comma = True
exclude.add(id(last_leaf))
max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
except (IndexError, ValueError):
return False
return max_priority == COMMA_PRIORITY and (
(line.mode.magic_trailing_comma and trailing_comma)
# always explode imports
or opening_bracket.parent.type in {syms.atom, syms.import_from}
)
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
Brackets can be omitted if the entire trailer up to and including
a preceding closing bracket fits in one line.
Yielded sets are cumulative (contain results of previous yields, too). First
set is empty, unless the line should explode, in which case bracket pairs until
the one that needs to explode are omitted.
"""
omit: Set[LeafID] = set()
if not line.magic_trailing_comma:
yield omit
length = 4 * line.depth
opening_bracket: Optional[Leaf] = None
closing_bracket: Optional[Leaf] = None
inner_brackets: Set[LeafID] = set()
for index, leaf, leaf_length in line.enumerate_with_length(reversed=True):
length += leaf_length
if length > line_length:
break
has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
if leaf.type == STANDALONE_COMMENT or has_inline_comment:
break
if opening_bracket:
if leaf is opening_bracket:
opening_bracket = None
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if (
prev
and prev.type == token.COMMA
and leaf.opening_bracket is not None
and not is_one_tuple_between(
leaf.opening_bracket, leaf, line.leaves
)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
inner_brackets.add(id(leaf))
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if prev and prev.type in OPENING_BRACKETS:
# Empty brackets would fail a split so treat them as "inner"
# brackets (e.g. only add them to the `omit` set if another
# pair of brackets was good enough.
inner_brackets.add(id(leaf))
continue
if closing_bracket:
omit.add(id(closing_bracket))
omit.update(inner_brackets)
inner_brackets.clear()
yield omit
if (
prev
and prev.type == token.COMMA
and leaf.opening_bracket is not None
and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
if leaf.value:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
def run_transformer(
line: Line,
transform: Transformer,
mode: Mode,
features: Collection[Feature],
*,
line_str: str = "",
) -> List[Line]:
if not line_str:
line_str = line_to_string(line)
result: List[Line] = []
for transformed_line in transform(line, features):
if str(transformed_line).strip("\n") == line_str:
raise CannotTransform("Line transformer returned an unchanged result")
result.extend(transform_line(transformed_line, mode=mode, features=features))
if (
transform.__class__.__name__ != "rhs"
or not line.bracket_tracker.invisible
or any(bracket.value for bracket in line.bracket_tracker.invisible)
or line.contains_multiline_strings()
or result[0].contains_uncollapsable_type_comments()
or result[0].contains_unsplittable_type_ignore()
or is_line_short_enough(result[0], line_length=mode.line_length)
# If any leaves have no parents (which _can_ occur since
# `transform(line)` potentially destroys the line's underlying node
# structure), then we can't proceed. Doing so would cause the below
# call to `append_leaves()` to fail.
or any(leaf.parent is None for leaf in line.leaves)
):
return result
line_copy = line.clone()
append_leaves(line_copy, line, line.leaves)
features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES}
second_opinion = run_transformer(
line_copy, transform, mode, features_fop, line_str=line_str
)
if all(
is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion
):
result = second_opinion
return result
| psf/black | src/black/linegen.py | Python | mit | 42,162 | [
"VisIt"
] | f4b1a5395b8d9fbd938551ad4d9e2672fa3756b93e44476947de1cb037050050 |
import fnmatch
import os
import posixpath
import re
import sys
from ast import AST, Attribute, iter_fields, Name
if sys.version_info[0] == 3:
string_types = (str,)
else:
string_types = (basestring,) # noqa
IGNORED_DIRS = [
'*.egg-info',
'.bzr',
'.cache',
'.git',
'.hg',
'.idea',
'.svn',
'.tox',
'__pycache__',
'_build',
'bower_components',
'CVS',
'htmlcov',
'node_modules',
'var',
'venv*',
'static',
]
IGNORED_PATH_REGEXPS = [
'^.*shuup/admin/static/shuup_admin.*', # autogenerated
'^.*shuup/xtheme/static/*', # autogenerated
'^build/', # built files
]
IGNORED_PATTERNS = [
'*-bundle.js',
'*.bat',
'*.bz2',
'*.csv',
'*.dat',
'*.doctree',
'*.eot',
'*.gif',
'*.gz',
'*.ico',
'*.inv',
'*.jpg',
'*.map', # source map
'*.min.js',
'*.mo',
'*.otf',
'*.pickle',
'*.png',
'*.pyc',
'*.sqlite3',
'*.svg',
'*.ttf',
'*.woff',
'*.woff2',
'*.zip',
'*.xls',
'*.xlsx',
'.coverage',
'_version.py',
'coverage.xml',
'Makefile',
'vendor.js',
]
def find_files(
roots, generated_resources=None,
ignored_patterns=IGNORED_PATTERNS,
ignored_dirs=IGNORED_DIRS,
ignored_path_regexps=IGNORED_PATH_REGEXPS,
allowed_extensions=None
):
"""
Find files in `roots` with ignores, `generated_resources` handling etc.
:param roots: Root directory or directories
:type roots: str
:param generated_resources:
Output set of generated resources (mutated during find_files)
:type generated_resources: set
:param ignored_patterns: fnmatch file patterns to ignore
:type ignored_patterns: Iterable[str]
:param ignored_dirs: fnmatch directory patterns to ignore
:type ignored_dirs: Iterable[str]
:param ignored_path_regexps: Path regexps to ignore
:type ignored_path_regexps: Iterable[str]
:param allowed_extensions:
Extensions (really filename suffixes) to ignore (optional)
:type allowed_extensions: Iterable[str]|None
:return: Iterable of file paths
:rtype: Iterable[str]
"""
if generated_resources is None:
generated_resources = set()
if isinstance(roots, string_types):
roots = [roots]
if isinstance(allowed_extensions, string_types):
allowed_extensions = set([allowed_extensions])
for root in roots:
for (path, dirs, files) in os.walk(root):
path = posixpath.normpath(path.replace(os.sep, "/"))
_remove_ignored_directories(path, dirs, ignored_dirs, ignored_path_regexps)
for filename in files:
filepath = posixpath.join(path, filename)
if filename == "generated_resources.txt":
_process_generated_resources(path, filepath, generated_resources)
continue
if not all(not fnmatch.fnmatch(filename, x) for x in ignored_patterns):
continue
if not _check_allowed_extension(filepath, allowed_extensions):
continue
yield filepath
def _check_allowed_extension(filepath, allowed_extensions):
if allowed_extensions is None:
return True
return any(filepath.endswith(extension) for extension in allowed_extensions)
def _process_generated_resources(path, manifest_filename, generated_resources):
with open(manifest_filename, "r") as generated_resources_manifest:
for line in generated_resources_manifest:
line = line.strip()
if line:
generated_resources.add(posixpath.join(path, line))
def _remove_ignored_directories(path, dirs, ignored_dirs, ignored_path_regexps):
matches = set()
for ignored_dir in ignored_dirs:
matches.update(set(dir for dir in dirs if fnmatch.fnmatch(dir, ignored_dir)))
for ignore_re in ignored_path_regexps:
matches.update(dir for dir in dirs if re.match(ignore_re, posixpath.join(path, dir)))
for ignored_dir in matches:
dirs.remove(ignored_dir)
class XNodeVisitor(object):
def visit(self, node, parents=None):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node=node, parents=parents)
def generic_visit(self, node, parents=None):
parents = (parents or []) + [node]
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, parents)
elif isinstance(value, AST):
self.visit(value, parents)
def dotify_ast_name(name):
if isinstance(name, Attribute):
return "%s.%s" % (dotify_ast_name(name.value), name.attr)
if isinstance(name, Name):
return name.id
return "<%s>" % name.__class__.__name__
def get_assign_first_target(assign):
for target in assign.targets:
id = getattr(target, "id", None)
if id:
return id
| suutari-ai/shoop | _misc/sanity_utils.py | Python | agpl-3.0 | 5,137 | [
"VisIt"
] | 9792af562b33e081a62f295373accd9225c0752de22c2150e9ca2cb46421df2a |
import numpy as np
from operator import attrgetter
from scipy.ndimage.morphology import distance_transform_edt as scipy_edt
from scipy.spatial.distance import euclidean
import glob
import os
import sys
# http://www.gaussian.com/g_tech/g_ur/k_constants.htm
angstrom_per_bohr = 0.5291772086
AXES = ['x', 'y', 'z']
class GridError(Exception):
pass
class InputFormatError(Exception):
pass
def _check_for_nans(values):
try:
values = values.flat
except AttributeError:
pass
# http://stackoverflow.com/a/6736970
if np.isnan(np.sum(values)):
raise InputFormatError("Values contain NANs!")
class Cube(object):
title_to_type = {
' Electrostatic pot': 'esp',
' Electron density ': 'ed',
# Cube file generated by the ``bader`` program from Henkelman's group
' Bader charge': 'bader',
}
def __init__(self, cube_fn, coords_in_bohr=True):
self.cube_fn = cube_fn
with open(cube_fn, 'r') as f:
self.gaussian_input = f.readline().rstrip('\n')
self.title = f.readline().rstrip('\n')
try:
self.cube_type = Cube.title_to_type[self.title[:18]]
except KeyError:
self.cube_type = "unrecognized"
# TODO: Cubes created by this program are currently not
# recognized. When fixing this look for another use of the word
# 'unrecognized' in this file.
# raise NotImplementedError(
# "Cube title '" + self.title + "' is not associated "
# "with a known cube type.")
line = f.readline().split()
if len(line) == 5:
self.atom_count, *origin_coords, nval = line
elif len(line) == 4 and self.cube_type == 'bader':
self.atom_count, *origin_coords = line
nval = 1
else:
raise InputFormatError(
"Cube file incorrectly formatted! Expected five fields "
"(atom count, 3*origin coordinates, NVal) on line 3, found"
" {0}.".format(len(line)))
if float(nval) != 1:
raise GridError('NVal in the cube is different than 1. Not '
'sure what it means in practice.')
self.atom_count = int(self.atom_count)
grid = Grid([f.readline().split() for i in range(3)],
coords_in_bohr)
grid.origin_coords = [float(coord) for coord in origin_coords]
if coords_in_bohr:
grid.origin_coords = [angstrom_per_bohr*coord for coord in
grid.origin_coords]
self.molecule = Molecule(self)
# The atoms will be added to the Molecule in the order of
# occurrence in the input, which is assumed to correspond to
# Gaussian labels.
for label in range(self.atom_count):
atom_temp = f.readline().split()
for index in range(4):
atom_temp[index+1] = float(atom_temp[index+1])
new_atom = Atom(int(label)+1, int(atom_temp[0]), atom_temp[2:],
coords_in_bohr)
new_atom.charges['cube'] = atom_temp[1]
self.molecule.append(new_atom)
# TODO: this may be unfeasible for very large cubes
field = f.read().split()
self.field = GridField(Cube.field_from_raw(field, grid), grid,
self.cube_type, 'input')
@staticmethod
def field_from_raw(raw_field, grid):
field = np.array(list(map(float, raw_field)))
if len(field) != np.prod(grid.points_on_axes):
raise GridError('The number of points in the cube {0} is not equal'
' to the product of number of points in the XYZ '
'directions given in the cube header: {1}.'
.format(len(field), grid.points_on_axes))
field.resize(grid.points_on_axes)
return field
class Atom(object):
# http://www.science.co.il/PTelements.asp
# TODO: this should be handled by a library
periodic = [('H', 'Hydrogen'),
('He', 'Helium'),
('Li', 'Lithium'),
('Be', 'Beryllium'),
('B', 'Boron'),
('C', 'Carbon'),
('N', 'Nitrogen'),
('O', 'Oxygen'),
('F', 'Fluorine'),
('Ne', 'Neon'),
('Na', 'Sodium'),
('Mg', 'Magnesium'),
('Al', 'Aluminum'),
('Si', 'Silicon'),
('P', 'Phosphorus'),
('S', 'Sulfur'),
('Cl', 'Chlorine'),
('Ar', 'Argon'),
('XX', 'Unrecognized')]
# Inverse look-up
inv_periodic = {v[0]: i+1 for i, v in enumerate(periodic)}
def __init__(self, label, atomic_no, coords=None, coords_in_bohr=None):
self.label = label
self.atomic_no = atomic_no
try:
self.identity = Atom.periodic[atomic_no-1][0]
except IndexError:
print('WARNING: Element of atomic number {0} not implemented. '
'Setting its identity to atomic number'.format(atomic_no))
self.identity = str(atomic_no)
self.charges = {}
self.coords = coords
if coords is not None:
if coords_in_bohr is None:
raise ValueError("When creating an Atom with coordinates, the "
"units must be specified through the "
"`coords_in_bohr` parameter.")
if coords_in_bohr:
self.coords = [angstrom_per_bohr*coord for coord in coords]
def print_with_charge(self, charge_type, f=sys.stdout):
print(self, ', charge: {0: .4f}'.format(self.charges[charge_type]),
sep='', file=f)
def __str__(self):
return 'Atom {0:2}: {1:2}'.format(self.label, self.identity)
def __repr__(self):
return str(self)
def __eq__(self, other):
result = self.atomic_no == other.atomic_no
if self.coords is not None and other.coords is not None:
result = result and self.coords == other.coords
return result
class Molecule(list):
"""A list of atoms with extra functionalities."""
def __init__(self, parent_cube, *args):
list.__init__(self, *args)
self.parent_cube = parent_cube
def verbose_compare(self, other):
if self == other:
print("The molecules are the same.")
return
# Otherwise:
print("The molecules differ at the following atoms:")
for atom, other_atom in zip(self, other):
if atom != other_atom:
print("{0} != {1}".format(atom, other_atom))
if len(self) != len(other):
which = self if len(self) > len(other) else other
which_str = 'first' if len(self) > len(other) else 'second'
print("The {0} molecule has {1} more atoms:".format(
which_str, abs(len(other) - len(self))))
for atom in which[min(len(self), len(other)):]:
print(atom)
def extract_qtaim_basins(self, grid, path):
"""Extract QTAIM basins from Henkelman group's ``bader`` program
The ``bader`` command needed to generate input cube files is::
bader -p all_atom -vac off density.cube
Assigning low density points to vacuum needs to be switched off in
order to allow the basins to extend to infinity.
This method returns a field with atomic labels indicating which basin
each point belongs to.
"""
output_files = glob.glob(path + 'BvAt*.cube')
expected = [path + 'BvAt{0:04}.cube'.format(i+1) for i in
range(len(self))]
if sorted(output_files) != expected:
if len(output_files) == 0:
msg = "No ``bader`` output cube files found!"
else:
for output_file, expected_file in zip(output_files, expected):
if output_file != expected_file:
msg += "Missing expected ``bader`` output cube file: "
msg += os.path.basename(expected)
break
raise InputFormatError(msg + " To generate the files use the "
"command: ``bader -p all_atom -vac off "
"density.cube``")
cubes = [Cube(expected_file) for expected_file in expected]
# Compare grids with that provided. TODO: Would be better to use the
# function field_comparison._check_grids, but can't import that module
# here and won't be able to pass a special message. All that requires
# some refactoring but is a sensible thing to do.
for i, cube in enumerate(cubes):
if cube.field.grid != grid:
raise GridError("The grid of `bader' cube number {0} is "
"different from that of the molecule "
"requesting extraction.".format(i+1))
result = []
# Iterate all the cubes element-wise and produce a field with atomic
# labels indicating which basin each point belongs to.
# (This probably isn't the numpy way of doing this. It operates on
# iterators though, so should be memory efficient.)
for point in zip(*[cube.field.values.flat for cube in cubes]):
point_bool = [True if elem else False for elem in point]
if sum(point_bool) == 0:
raise InputFormatError("Found point not assigned to any atom "
"by the ``bader`` program. Maybe the "
"``-vac off`` option was not set?")
elif sum(point_bool) > 1:
raise InputFormatError("Found point assigned to many atoms "
"by the ``bader`` program. Possible "
"numerical inconsistency in algorithm.")
result.append(point_bool.index(True)+1)
result = np.array(result)
result.resize(self.parent_cube.field.grid.points_on_axes)
return GridField(result, self.parent_cube.field.grid, 'parent_atom',
['qtaim'])
class Field(object):
def __init__(self, values, field_type, field_info, check_nans):
self.check_nans = check_nans
self.values = values
self.field_type = field_type
self.field_info = field_info
def __setattr__(self, name, value):
if name == 'values' and self.check_nans:
_check_for_nans(value)
super().__setattr__(name, value)
def lookup_name(self):
"""Return free-form name
The main purpose will probably be labelling axes when plotting.
"""
if self.field_type in ['esp', 'ed']:
if self.field_type == 'esp':
result = "ESP value"
elif self.field_type == 'ed':
result = "ED value"
if self.field_info[0] == 'input':
result += " from input cube file"
elif self.field_type == 'rep_esp':
result = "Reproduced ESP value"
if self.field_info[0]:
result += " from {0} charges".format(self.field_info[0]
.upper())
elif self.field_type == 'dist':
result = "Distance"
if self.field_info[0] == 'ed':
result += " from ED isosurface {0}".format(self.field_info[1])
elif self.field_info[0] == 'Voronoi':
result += " from closest atom"
elif self.field_info[0] == 'Voronoi':
# This is not currently implemented
result += " from QTAIM atom"
elif self.field_type == 'diff':
result = "difference"
if 'abs' in self.field_info[0]:
result += 'absolute'
if 'rel' in self.field_info[0]:
result += 'relative'
result = result.capitalize()
if len(self.field_info[1]) == 2:
result += " between {0} and\n {1}".format(*self.field_info[1])
elif self.field_type == 'parent_atom':
result = "Parent atom"
if self.field_info[0] == 'Voronoi':
result += "of Voronoi basin"
elif self.field_info[0] == 'qtaim':
result += "of QTAIM basin"
elif self.field_type == "unrecognized":
result = "Unrecognized"
else:
raise NotImplementedError("Free-form name not implemented for "
"Field of type '{0}' and info '{1}'"
.format(self.field_type,
self.field_info))
return result
class GridField(Field):
def __init__(self, values, grid, field_type, field_info=None,
check_nans=True):
self.grid = grid
super().__init__(values, field_type, field_info, check_nans)
def distance_transform(self, isovalue):
"""This should only be applied to the electron density cube."""
if self.field_type != 'ed':
print("WARNING: Distance transform should only be applied to "
"electron density fields, attempted on field type: '{0}'."
.format(self.field_type))
if not self.grid.aligned_to_coord:
raise GridError('Distance transform not implemented for grid not '
'aligned with the coordinate system.')
# Select isosurface and its interior as a 3D solid of 0s.
select_iso = lambda x: 1 if x < isovalue else 0
field = np.vectorize(select_iso)(self.values)
dist = scipy_edt(field, sampling=self.grid.dir_intervals)
return GridField(dist, self.grid, 'dist', ['ed', isovalue])
def write_cube(self, output_fn, molecule, charge_type=None,
write_coords_in_bohr=True):
"""Write the field as a Gaussian cube file.
Raises FileExistsError when the file exists.
"""
with open(output_fn, 'x') as f:
f.write(' Cube file generated by repESP.\n')
f.write(' Cube file for field of type {0}.\n'.format(
self.field_type))
origin_coords = self.grid.origin_coords
if write_coords_in_bohr:
origin_coords = [elem/angstrom_per_bohr for elem in
origin_coords]
f.write(' {0:4} {1: .6f} {2: .6f} {3: .6f} 1\n'.format(
len(molecule), *origin_coords))
for axis in self.grid.axes:
axis_intervals = axis.intervals
if write_coords_in_bohr:
axis_intervals = [elem/angstrom_per_bohr for elem in
axis_intervals]
f.write(' {0:4} {1: .6f} {2: .6f} {3: .6f}\n'.format(
axis.point_count, *axis_intervals))
for atom in molecule:
if charge_type is None:
charge = atom.atomic_no
else:
charge = atom.charges[charge_type]
atom_coords = atom.coords
if write_coords_in_bohr:
atom_coords = [coord/angstrom_per_bohr for coord in
atom_coords]
f.write(' {0:4} {1: .6f} {2: .6f} {3: .6f} {4: .6f}\n'
.format(atom.atomic_no, charge, *atom_coords))
i = 1
for value in self.values.flatten():
f.write(' {0: .5E}'.format(value))
if not i % 6:
f.write('\n')
if not i % self.grid.axes[2].point_count:
f.write('\n')
i = 1
else:
i += 1
def get_values(self):
return list(self.values.flat)
def get_points(self):
return self.grid.get_points()
class Grid(object):
def __init__(self, grid_input, coords_in_bohr):
self.origin_coords = None
if np.shape(grid_input) != (3, 4):
raise GridError('Incorrect grid formatting. Expected a list of '
'shape 3x4, instead got: ' + str(grid_input))
self.axes = [GridAxis(label) for label in AXES]
self.aligned_to_coord = True
for axis_number, input_axis in enumerate(grid_input):
aligned_to_axis = self._add_axis(axis_number, input_axis,
coords_in_bohr)
# All axes must fulfil this condition, hence the logic
self.aligned_to_coord = self.aligned_to_coord and aligned_to_axis
self.dir_intervals = []
if self.aligned_to_coord:
for axis in range(3):
self.dir_intervals.append(self.axes[axis].dir_interval)
else:
raise GridError('The cube is not aligned with coordinate system.')
self.points_on_axes = [axis.point_count for axis in self.axes]
def __eq__(self, other):
return self.__dict__ == other.__dict__
def _add_axis(self, axis_number, input_axis, coords_in_bohr):
axis_to_set = self.axes[axis_number]
axis_to_set.set_point_count(input_axis.pop(0))
return axis_to_set.set_intervals(input_axis, coords_in_bohr)
def get_points(self):
result = []
for i in range(self.points_on_axes[0]):
for j in range(self.points_on_axes[1]):
for k in range(self.points_on_axes[2]):
result.append([
self.origin_coords[0] + i*self.dir_intervals[0],
self.origin_coords[1] + j*self.dir_intervals[1],
self.origin_coords[2] + k*self.dir_intervals[2]
])
return result
class GridAxis(object):
def __init__(self, label):
self.label = label
self.point_count = None
self.intervals = [] # xyz
self.dir_interval = None # Interval in its 'own' direction
def __eq__(self, other):
return self.__dict__ == other.__dict__
def set_point_count(self, point_count):
if int(point_count) != float(point_count):
raise GridError('Number of points in direction {0} is not an '
'integer: {1}'.format(self.label, point_count))
self.point_count = int(point_count)
def set_intervals(self, intervals, coords_in_bohr):
aligned_to_coord_axis = True
for direction, interval in enumerate(intervals):
interval = float(interval)
if coords_in_bohr:
interval *= angstrom_per_bohr
self.intervals.append(interval)
if AXES[direction] == self.label:
self.dir_interval = interval
elif interval != 0:
aligned_to_coord_axis = False
if not aligned_to_coord_axis:
print('INFO: Cube axis {0} is not aligned to its coordinate'
' axis: The intervals are: {1}'.format(self.label,
intervals))
return aligned_to_coord_axis
| jszopi/repESP | repESP_old/cube_helpers.py | Python | gpl-3.0 | 19,741 | [
"Gaussian"
] | 45214023bbef6fc35ddb1a0ecbc74882c53672fb4cb7a088cd70cc990b33286f |
""" ARC Computing Element
Using the ARC API now
**Configuration Parameters**
Configuration for the ARCComputingElement submission can be done via the configuration system.
XRSLExtraString:
Default additional string for ARC submit files. Should be written in the following format::
(key = "value")
XRSLMPExtraString:
Default additional string for ARC submit files for multi-processor jobs. Should be written in the following format::
(key = "value")
Host:
The host for the ARC CE, used to overwrite the CE name.
WorkingDirectory:
Directory where the pilot log files are stored locally. For instance::
/opt/dirac/pro/runit/WorkloadManagement/SiteDirectorArc
EndpointType:
Name of the protocol to use to interact with ARC services: Emies and Gridftp are supported.
Gridftp communicates with gridftpd services providing authentication and encryption for file transfers.
ARC developers are going to drop it in the future.
Emies is another protocol that allows to interact with A-REX services that provide additional features
(support of OIDC tokens).
Preamble:
Line that should be executed just before the executable file.
**Code Documentation**
"""
import six
import os
import stat
import arc # Has to work if this module is called #pylint: disable=import-error
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getCESiteMapping
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Resources.Computing.PilotBundle import writeScript
from DIRAC.WorkloadManagementSystem.Client import PilotStatus
# Uncomment the following 5 lines for getting verbose ARC api output (debugging)
# import sys
# logstdout = arc.LogStream(sys.stdout)
# logstdout.setFormat(arc.ShortFormat)
# arc.Logger_getRootLogger().addDestination(logstdout)
# arc.Logger_getRootLogger().setThreshold(arc.VERBOSE)
CE_NAME = "ARC"
MANDATORY_PARAMETERS = ["Queue"] # Mandatory for ARC CEs in GLUE2?
STATES_MAP = {
"Accepted": PilotStatus.WAITING,
"Preparing": PilotStatus.WAITING,
"Submitting": PilotStatus.WAITING,
"Queuing": PilotStatus.WAITING,
"Undefined": PilotStatus.UNKNOWN,
"Running": PilotStatus.RUNNING,
"Finishing": PilotStatus.RUNNING,
"Deleted": PilotStatus.ABORTED,
"Killed": PilotStatus.ABORTED,
"Failed": PilotStatus.FAILED,
"Hold": PilotStatus.FAILED,
"Finished": PilotStatus.DONE,
"Other": PilotStatus.DONE,
}
class ARCComputingElement(ComputingElement):
#############################################################################
def __init__(self, ceUniqueID):
"""Standard constructor."""
super(ARCComputingElement, self).__init__(ceUniqueID)
self.ceType = CE_NAME
self.submittedJobs = 0
self.mandatoryParameters = MANDATORY_PARAMETERS
self.pilotProxy = ""
self.queue = ""
self.gridEnv = ""
self.ceHost = self.ceName
self.endpointType = "Gridftp"
self.usercfg = arc.common.UserConfig()
self.preamble = ""
# set the timeout to the default 20 seconds in case the UserConfig constructor did not
self.usercfg.Timeout(20) # pylint: disable=pointless-statement
self.ceHost = self.ceParameters.get("Host", self.ceName)
self.gridEnv = self.ceParameters.get("GridEnv", self.gridEnv)
# Used in getJobStatus
self.mapStates = STATES_MAP
# Do these after all other initialisations, in case something barks
self.xrslExtraString = self.__getXRSLExtraString()
self.xrslMPExtraString = self.__getXRSLExtraString(multiprocessor=True)
#############################################################################
def __getARCJob(self, jobID):
"""Create an ARC Job with all the needed / possible parameters defined.
By the time we come here, the environment variable X509_USER_PROXY should already be set
"""
j = arc.Job()
j.JobID = str(jobID)
j.IDFromEndpoint = os.path.basename(j.JobID)
if self.endpointType == "Gridftp":
statURL = "ldap://%s:2135/Mds-Vo-Name=local,o=grid??sub?(nordugrid-job-globalid=%s)" % (self.ceHost, jobID)
j.JobStatusURL = arc.URL(str(statURL))
j.JobStatusInterfaceName = "org.nordugrid.ldapng"
mangURL = "gsiftp://%s:2811/jobs/" % (self.ceHost)
j.JobManagementURL = arc.URL(str(mangURL))
j.JobManagementInterfaceName = "org.nordugrid.gridftpjob"
j.ServiceInformationURL = j.JobManagementURL
j.ServiceInformationInterfaceName = "org.nordugrid.ldapng"
else:
commonURL = "https://%s:8443/arex" % self.ceHost
j.JobStatusURL = arc.URL(str(commonURL))
j.JobStatusInterfaceName = "org.ogf.glue.emies.activitymanagement"
j.JobManagementURL = arc.URL(str(commonURL))
j.JobManagementInterfaceName = "org.ogf.glue.emies.activitymanagement"
j.ServiceInformationURL = arc.URL(str(commonURL))
j.ServiceInformationInterfaceName = "org.ogf.glue.emies.resourceinfo"
j.PrepareHandler(self.usercfg)
return j
def __getXRSLExtraString(self, multiprocessor=False):
# For the XRSL additional string from configuration - only done at initialisation time
# If this string changes, the corresponding (ARC) site directors have to be restarted
#
# Variable = XRSLExtraString (or XRSLMPExtraString for multi processor mode)
# Default value = ''
# If you give a value, I think it should be of the form
# (aaa = "xxx")
# Otherwise the ARC job description parser will have a fit
# Locations searched in order :
# Top priority : Resources/Sites/<Grid>/<Site>/CEs/<CE>/XRSLExtraString
# Second priority : Resources/Sites/<Grid>/<Site>/XRSLExtraString
# Default : Resources/Computing/CEDefaults/XRSLExtraString
#
xrslExtraString = "" # Start with the default value
result = getCESiteMapping(self.ceHost)
if not result["OK"] or not result["Value"]:
self.log.error("Unknown CE ...")
return
self.site = result["Value"][self.ceHost]
# Now we know the site. Get the grid
grid = self.site.split(".")[0]
# The different possibilities that we have agreed upon
if multiprocessor:
xtraVariable = "XRSLMPExtraString"
else:
xtraVariable = "XRSLExtraString"
firstOption = "Resources/Sites/%s/%s/CEs/%s/%s" % (grid, self.site, self.ceHost, xtraVariable)
secondOption = "Resources/Sites/%s/%s/%s" % (grid, self.site, xtraVariable)
defaultOption = "Resources/Computing/CEDefaults/%s" % xtraVariable
# Now go about getting the string in the agreed order
self.log.debug("Trying to get %s : first option %s" % (xtraVariable, firstOption))
result = gConfig.getValue(firstOption, defaultValue="")
if result != "":
xrslExtraString = result
self.log.debug("Found %s : %s" % (xtraVariable, xrslExtraString))
else:
self.log.debug("Trying to get %s : second option %s" % (xtraVariable, secondOption))
result = gConfig.getValue(secondOption, defaultValue="")
if result != "":
xrslExtraString = result
self.log.debug("Found %s : %s" % (xtraVariable, xrslExtraString))
else:
self.log.debug("Trying to get %s : default option %s" % (xtraVariable, defaultOption))
result = gConfig.getValue(defaultOption, defaultValue="")
if result != "":
xrslExtraString = result
self.log.debug("Found %s : %s" % (xtraVariable, xrslExtraString))
if xrslExtraString:
self.log.always("%s : %s" % (xtraVariable, xrslExtraString))
self.log.always(" --- to be added to pilots going to CE : %s" % self.ceHost)
return xrslExtraString
#############################################################################
def _addCEConfigDefaults(self):
"""Method to make sure all necessary Configuration Parameters are defined"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults(self)
#############################################################################
def __writeXRSL(self, executableFile, inputs=None, outputs=None, executables=None):
"""Create the JDL for submission
:param str executableFile: executable to wrap in a XRSL file
:param str/list inputs: path of the dependencies to include along with the executable
:param str/list outputs: path of the outputs that we want to get at the end of the execution
:param str/list executables: path to inputs that should have execution mode on the remote worker node
"""
diracStamp = makeGuid()[:8]
# Evaluate the number of processors to allocate
nProcessors = self.ceParameters.get("NumberOfProcessors", 1)
xrslMPAdditions = ""
if nProcessors and nProcessors > 1:
xrslMPAdditions = """
(count = %(processors)u)
(countpernode = %(processorsPerNode)u)
%(xrslMPExtraString)s
""" % {
"processors": nProcessors,
"processorsPerNode": nProcessors, # This basically says that we want all processors on the same node
"xrslMPExtraString": self.xrslMPExtraString,
}
# Files that would need execution rights on the remote worker node
xrslExecutables = ""
if executables:
if not isinstance(executables, list):
executables = [executables]
xrslExecutables = "(executables=%s)" % " ".join(map(os.path.basename, executables))
# Add them to the inputFiles
if not inputs:
inputs = []
if not isinstance(inputs, list):
inputs = [inputs]
inputs += executables
# Dependencies that have to be embedded along with the executable
xrslInputs = ""
if inputs:
if not isinstance(inputs, list):
inputs = [inputs]
for inputFile in inputs:
xrslInputs += '(%s "%s")' % (os.path.basename(inputFile), inputFile)
# Output files to retrieve once the execution is complete
xrslOutputs = '("%s.out" "") ("%s.err" "")' % (diracStamp, diracStamp)
if outputs:
if not isinstance(outputs, list):
outputs = [outputs]
for outputFile in outputs:
xrslOutputs += '(%s "")' % (outputFile)
xrsl = """
&(executable="%(executable)s")
(inputFiles=(%(executable)s "%(executableFile)s") %(xrslInputAdditions)s)
(stdout="%(diracStamp)s.out")
(stderr="%(diracStamp)s.err")
(outputFiles=%(xrslOutputFiles)s)
(queue=%(queue)s)
%(xrslMPAdditions)s
%(xrslExecutables)s
%(xrslExtraString)s
""" % {
"executableFile": executableFile,
"executable": os.path.basename(executableFile),
"xrslInputAdditions": xrslInputs,
"diracStamp": diracStamp,
"queue": self.arcQueue,
"xrslOutputFiles": xrslOutputs,
"xrslMPAdditions": xrslMPAdditions,
"xrslExecutables": xrslExecutables,
"xrslExtraString": self.xrslExtraString,
}
return xrsl, diracStamp
def _bundlePreamble(self, executableFile):
"""Bundle the preamble with the executable file"""
wrapperContent = "%s\n./%s" % (self.preamble, executableFile)
return writeScript(wrapperContent, os.getcwd())
#############################################################################
def _reset(self):
self.queue = self.ceParameters.get("CEQueueName", self.ceParameters["Queue"])
if "GridEnv" in self.ceParameters:
self.gridEnv = self.ceParameters["GridEnv"]
self.preamble = self.ceParameters.get("Preamble", self.preamble)
# ARC endpoint types (Gridftp, Emies)
endpointType = self.ceParameters.get("EndpointType", self.endpointType)
if endpointType not in ["Gridftp", "Emies"]:
self.log.warn("Unknown ARC endpoint, change to default", self.endpointType)
else:
self.endpointType = endpointType
return S_OK()
#############################################################################
def submitJob(self, executableFile, proxy, numberOfJobs=1, inputs=None, outputs=None):
"""Method to submit job"""
# Assume that the ARC queues are always of the format nordugrid-<batchSystem>-<queue>
# And none of our supported batch systems have a "-" in their name
self.arcQueue = self.queue.split("-", 2)[2]
result = self._prepareProxy()
if not result["OK"]:
self.log.error("ARCComputingElement: failed to set up proxy", result["Message"])
return result
self.usercfg.ProxyPath(os.environ["X509_USER_PROXY"])
self.log.verbose("Executable file path: %s" % executableFile)
if not os.access(executableFile, 5):
os.chmod(executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH + stat.S_IXOTH)
executables = None
if self.preamble:
executables = [executableFile]
executableFile = self._bundlePreamble(executableFile)
batchIDList = []
stampDict = {}
if self.endpointType == "Gridftp":
endpoint = arc.Endpoint(str(self.ceHost + ":2811/jobs"), arc.Endpoint.JOBSUBMIT, "org.nordugrid.gridftpjob")
else:
endpoint = arc.Endpoint(
str("https://" + self.ceHost + ":8443/arex"),
arc.Endpoint.JOBSUBMIT,
"org.ogf.glue.emies.activitycreation",
)
# Submit jobs iteratively for now. Tentatively easier than mucking around with the JobSupervisor class
for __i in range(numberOfJobs):
# The basic job description
jobdescs = arc.JobDescriptionList()
# Get the job into the ARC way
xrslString, diracStamp = self.__writeXRSL(executableFile, inputs, outputs, executables)
self.log.debug("XRSL string submitted : %s" % xrslString)
self.log.debug("DIRAC stamp for job : %s" % diracStamp)
# The arc bindings don't accept unicode objects in Python 2 so xrslString must be explicitly cast
result = arc.JobDescription_Parse(str(xrslString), jobdescs)
if not result:
self.log.error("Invalid job description", "%r, message=%s" % (xrslString, result.str()))
break
# Submit the job
jobs = arc.JobList() # filled by the submit process
submitter = arc.Submitter(self.usercfg)
result = submitter.Submit(endpoint, jobdescs, jobs)
# Save info or else ..else.
if result == arc.SubmissionStatus.NONE:
# Job successfully submitted
pilotJobReference = jobs[0].JobID
batchIDList.append(pilotJobReference)
stampDict[pilotJobReference] = diracStamp
self.log.debug("Successfully submitted job %s to CE %s" % (pilotJobReference, self.ceHost))
else:
message = "Failed to submit job because "
if result.isSet(arc.SubmissionStatus.NOT_IMPLEMENTED): # pylint: disable=no-member
self.log.warn("%s feature not implemented on CE? (weird I know - complain to site admins" % message)
if result.isSet(arc.SubmissionStatus.NO_SERVICES): # pylint: disable=no-member
self.log.warn("%s no services are running on CE? (open GGUS ticket to site admins" % message)
if result.isSet(arc.SubmissionStatus.ENDPOINT_NOT_QUERIED): # pylint: disable=no-member
self.log.warn("%s endpoint was not even queried. (network ..?)" % message)
if result.isSet(arc.SubmissionStatus.BROKER_PLUGIN_NOT_LOADED): # pylint: disable=no-member
self.log.warn("%s BROKER_PLUGIN_NOT_LOADED : ARC library installation problem?" % message)
if result.isSet(arc.SubmissionStatus.DESCRIPTION_NOT_SUBMITTED): # pylint: disable=no-member
self.log.warn(
"%s Job not submitted - incorrect job description? (missing field in XRSL string?)" % message
)
if result.isSet(arc.SubmissionStatus.SUBMITTER_PLUGIN_NOT_LOADED): # pylint: disable=no-member
self.log.warn("%s SUBMITTER_PLUGIN_NOT_LOADED : ARC library installation problem?" % message)
if result.isSet(arc.SubmissionStatus.AUTHENTICATION_ERROR): # pylint: disable=no-member
self.log.warn(
"%s authentication error - screwed up / expired proxy? Renew / upload pilot proxy on machine?"
% message
)
if result.isSet(arc.SubmissionStatus.ERROR_FROM_ENDPOINT): # pylint: disable=no-member
self.log.warn("%s some error from the CE - possibly CE problems?" % message)
self.log.warn("%s ... maybe above messages will give a hint." % message)
break # Boo hoo *sniff*
if self.preamble:
os.unlink(executableFile)
if batchIDList:
result = S_OK(batchIDList)
result["PilotStampDict"] = stampDict
else:
result = S_ERROR("No pilot references obtained from the ARC job submission")
return result
#############################################################################
def killJob(self, jobIDList):
"""Kill the specified jobs"""
result = self._prepareProxy()
if not result["OK"]:
self.log.error("ARCComputingElement: failed to set up proxy", result["Message"])
return result
self.usercfg.ProxyPath(os.environ["X509_USER_PROXY"])
jobList = list(jobIDList)
if isinstance(jobIDList, six.string_types):
jobList = [jobIDList]
self.log.debug("Killing jobs %s" % jobIDList)
jobs = []
for jobID in jobList:
jobs.append(self.__getARCJob(jobID))
# JobSupervisor is able to aggregate jobs to perform bulk operations and thus minimizes the communication overhead
# We still need to create chunks to avoid timeout in the case there are too many jobs to supervise
for chunk in breakListIntoChunks(jobs, 100):
job_supervisor = arc.JobSupervisor(self.usercfg, chunk)
if not job_supervisor.Cancel():
errorString = " - ".join(jobList).strip()
return S_ERROR("Failed to kill at least one of these jobs: %s. CE(?) not reachable?" % errorString)
return S_OK()
#############################################################################
def getCEStatus(self):
"""Method to return information on running and pending jobs.
We hope to satisfy both instances that use robot proxies and those which use proper configurations.
"""
result = self._prepareProxy()
if not result["OK"]:
self.log.error("ARCComputingElement: failed to set up proxy", result["Message"])
return result
self.usercfg.ProxyPath(os.environ["X509_USER_PROXY"])
# Try to find out which VO we are running for.
vo = ""
res = getVOfromProxyGroup()
if res["OK"]:
vo = res["Value"]
result = S_OK()
result["SubmittedJobs"] = 0
if not vo:
# Presumably the really proper way forward once the infosys-discuss WG comes up with a solution
# and it is implemented. Needed for DIRAC instances which use robot certificates for pilots.
if self.endpointType == "Gridftp":
endpoints = [
arc.Endpoint(
str("ldap://" + self.ceHost + "/MDS-Vo-name=local,o=grid"),
arc.Endpoint.COMPUTINGINFO,
"org.nordugrid.ldapng",
)
]
else:
endpoints = [
arc.Endpoint(
str("https://" + self.ceHost + ":8443/arex"),
arc.Endpoint.COMPUTINGINFO,
"org.ogf.glue.emies.resourceinfo",
)
]
retriever = arc.ComputingServiceRetriever(self.usercfg, endpoints)
retriever.wait() # Takes a bit of time to get and parse the ldap information
targets = retriever.GetExecutionTargets()
ceStats = targets[0].ComputingShare
self.log.debug("Running jobs for CE %s : %s" % (self.ceHost, ceStats.RunningJobs))
self.log.debug("Waiting jobs for CE %s : %s" % (self.ceHost, ceStats.WaitingJobs))
result["RunningJobs"] = ceStats.RunningJobs
result["WaitingJobs"] = ceStats.WaitingJobs
else:
# The system which works properly at present for ARC CEs that are configured correctly.
# But for this we need the VO to be known - ask me (Raja) for the whole story if interested.
# cmd = 'ldapsearch -x -LLL -H ldap://%s:2135 -b mds-vo-name=resource,o=grid "(GlueVOViewLocalID=%s)"' % (
# self.ceHost, vo.lower())
if not self.queue:
self.log.error("ARCComputingElement: No queue ...")
res = S_ERROR("Unknown queue (%s) failure for site %s" % (self.queue, self.ceHost))
return res
cmd1 = "ldapsearch -x -o ldif-wrap=no -LLL -h %s:2135 -b 'o=glue' " % self.ceHost
cmd2 = '"(&(objectClass=GLUE2MappingPolicy)(GLUE2PolicyRule=vo:%s))"' % vo.lower()
cmd3 = " | grep GLUE2MappingPolicyShareForeignKey | grep %s" % (self.queue.split("-")[-1])
cmd4 = " | sed 's/GLUE2MappingPolicyShareForeignKey: /GLUE2ShareID=/' "
cmd5 = " | xargs -L1 ldapsearch -x -o ldif-wrap=no -LLL -h %s:2135 -b 'o=glue' " % self.ceHost
cmd6 = " | egrep '(ShareWaiting|ShareRunning)'"
res = shellCall(0, cmd1 + cmd2 + cmd3 + cmd4 + cmd5 + cmd6)
if not res["OK"]:
self.log.debug("Could not query CE %s - is it down?" % self.ceHost)
return res
try:
ldapValues = res["Value"][1].split("\n")
running = [lValue for lValue in ldapValues if "GLUE2ComputingShareRunningJobs" in lValue]
waiting = [lValue for lValue in ldapValues if "GLUE2ComputingShareWaitingJobs" in lValue]
result["RunningJobs"] = int(running[0].split(":")[1])
result["WaitingJobs"] = int(waiting[0].split(":")[1])
except IndexError:
res = S_ERROR("Unknown ldap failure for site %s" % self.ceHost)
return res
return result
#############################################################################
def getJobStatus(self, jobIDList):
"""Get the status information for the given list of jobs"""
result = self._prepareProxy()
if not result["OK"]:
self.log.error("ARCComputingElement: failed to set up proxy", result["Message"])
return result
self.usercfg.ProxyPath(os.environ["X509_USER_PROXY"])
jobTmpList = list(jobIDList)
if isinstance(jobIDList, six.string_types):
jobTmpList = [jobIDList]
# Pilots are stored with a DIRAC stamp (":::XXXXX") appended
jobList = []
for j in jobTmpList:
if ":::" in j:
job = j.split(":::")[0]
else:
job = j
jobList.append(job)
jobs = []
for jobID in jobList:
jobs.append(self.__getARCJob(jobID))
# JobSupervisor is able to aggregate jobs to perform bulk operations and thus minimizes the communication overhead
# We still need to create chunks to avoid timeout in the case there are too many jobs to supervise
jobsUpdated = []
for chunk in breakListIntoChunks(jobs, 100):
job_supervisor = arc.JobSupervisor(self.usercfg, chunk)
job_supervisor.Update()
jobsUpdated.extend(job_supervisor.GetAllJobs())
resultDict = {}
jobsToRenew = []
jobsToCancel = []
for job in jobsUpdated:
jobID = job.JobID
self.log.debug("Retrieving status for job %s" % jobID)
arcState = job.State.GetGeneralState()
self.log.debug("ARC status for job %s is %s" % (jobID, arcState))
if arcState: # Meaning arcState is filled. Is this good python?
resultDict[jobID] = self.mapStates[arcState]
# Renew proxy only of jobs which are running or queuing
if arcState in ("Running", "Queuing"):
nearExpiry = arc.Time() + arc.Period(10000) # 2 hours, 46 minutes and 40 seconds
if job.ProxyExpirationTime < nearExpiry:
# Jobs to renew are aggregated to perform bulk operations
jobsToRenew.append(job)
self.log.debug(
"Renewing proxy for job %s whose proxy expires at %s" % (jobID, job.ProxyExpirationTime)
)
if arcState == "Hold":
# Jobs to cancel are aggregated to perform bulk operations
# Cancel held jobs so they don't sit in the queue forever
jobsToCancel.append(job)
self.log.debug("Killing held job %s" % jobID)
else:
resultDict[jobID] = PilotStatus.UNKNOWN
# If done - is it really done? Check the exit code
if resultDict[jobID] == PilotStatus.DONE:
exitCode = int(job.ExitCode)
if exitCode:
resultDict[jobID] = PilotStatus.FAILED
self.log.debug("DIRAC status for job %s is %s" % (jobID, resultDict[jobID]))
# JobSupervisor is able to aggregate jobs to perform bulk operations and thus minimizes the communication overhead
# We still need to create chunks to avoid timeout in the case there are too many jobs to supervise
for chunk in breakListIntoChunks(jobsToRenew, 100):
job_supervisor_renew = arc.JobSupervisor(self.usercfg, chunk)
if not job_supervisor_renew.Renew():
self.log.warn("At least one of the jobs failed to renew its credentials")
for chunk in breakListIntoChunks(jobsToCancel, 100):
job_supervisor_cancel = arc.JobSupervisor(self.usercfg, chunk)
if not job_supervisor_cancel.Cancel():
self.log.warn("At least one of the jobs failed to be cancelled")
if not resultDict:
return S_ERROR("No job statuses returned")
return S_OK(resultDict)
#############################################################################
def getJobOutput(self, jobID, workingDirectory=None):
"""Get the specified job standard output and error files.
Standard output and error are returned as strings.
If further outputs are retrieved, they are stored in workingDirectory.
"""
result = self._prepareProxy()
if not result["OK"]:
self.log.error("ARCComputingElement: failed to set up proxy", result["Message"])
return result
self.usercfg.ProxyPath(os.environ["X509_USER_PROXY"])
if jobID.find(":::") != -1:
pilotRef, stamp = jobID.split(":::")
else:
pilotRef = jobID
stamp = ""
if not stamp:
return S_ERROR("Pilot stamp not defined for %s" % pilotRef)
job = self.__getARCJob(pilotRef)
arcID = os.path.basename(pilotRef)
self.log.debug("Retrieving pilot logs for %s" % pilotRef)
if not workingDirectory:
if "WorkingDirectory" in self.ceParameters:
workingDirectory = os.path.join(self.ceParameters["WorkingDirectory"], arcID)
else:
workingDirectory = arcID
outFileName = os.path.join(workingDirectory, "%s.out" % stamp)
errFileName = os.path.join(workingDirectory, "%s.err" % stamp)
self.log.debug("Working directory for pilot output %s" % workingDirectory)
# Retrieve the job output:
# last parameter allows downloading the outputs even if workingDirectory already exists
isItOkay = job.Retrieve(self.usercfg, arc.URL(str(workingDirectory)), True)
if isItOkay:
output = None
error = None
try:
with open(outFileName, "r") as outFile:
output = outFile.read()
os.unlink(outFileName)
with open(errFileName, "r") as errFile:
error = errFile.read()
os.unlink(errFileName)
except IOError as e:
self.log.error("Error downloading outputs", repr(e).replace(",)", ")"))
return S_ERROR("Error downloading outputs")
self.log.debug("Pilot output = %s" % output)
self.log.debug("Pilot error = %s" % error)
else:
job.Update()
arcState = job.State.GetGeneralState()
if arcState != "Undefined":
return S_ERROR(
"Failed to retrieve output for %s as job is not finished (maybe not started yet)" % jobID
)
self.log.debug(
"Could not retrieve pilot output for %s - either permission / proxy error or could not connect to CE"
% pilotRef
)
return S_ERROR("Failed to retrieve output for %s" % jobID)
return S_OK((output, error))
| DIRACGrid/DIRAC | src/DIRAC/Resources/Computing/ARCComputingElement.py | Python | gpl-3.0 | 30,607 | [
"DIRAC"
] | 2195489d793c6d13e71d732de2594ee6021b239dadf1f07fe423405158ed9905 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Mar 18, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 18, 2012"
import unittest
import os
import warnings
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class BorgQueenTest(unittest.TestCase):
def test_get_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
data = self.queen.get_data()
self.assertEqual(len(data), 7)
def test_load_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
if __name__ == "__main__":
unittest.main()
| czhengsci/pymatgen | pymatgen/apps/borg/tests/test_queen.py | Python | mit | 1,372 | [
"pymatgen"
] | e841a3e0c8cfd99931bc932925c5538436edcfaf29019b0f45a6e925b6c65082 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides templates which allow variable sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import traceback
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
__all__ = ["make_template"]
def make_template(name_, func_, create_scope_now_=False, unique_name_=None,
custom_getter_=None, **kwargs):
"""Given an arbitrary function, wrap it so that it does variable sharing.
This wraps `func_` in a Template and partially evaluates it. Templates are
functions that create variables the first time they are called and reuse them
thereafter. In order for `func_` to be compatible with a `Template` it must
have the following properties:
* The function should create all trainable variables and any variables that
should be reused by calling `tf.get_variable`. If a trainable variable is
created using `tf.Variable`, then a ValueError will be thrown. Variables
that are intended to be locals can be created by specifying
`tf.Variable(..., trainable=false)`.
* The function may use variable scopes and other templates internally to
create and reuse variables, but it shouldn't use `tf.global_variables` to
capture variables that are defined outside of the scope of the function.
* Internal scopes and variable names should not depend on any arguments that
are not supplied to `make_template`. In general you will get a ValueError
telling you that you are trying to reuse a variable that doesn't exist
if you make a mistake.
In the following example, both `z` and `w` will be scaled by the same `y`. It
is important to note that if we didn't assign `scalar_name` and used a
different name for z and w that a `ValueError` would be thrown because it
couldn't reuse the variable.
```python
def my_op(x, scalar_name):
var1 = tf.get_variable(scalar_name,
shape=[],
initializer=tf.constant_initializer(1))
return x * var1
scale_by_y = tf.make_template('scale_by_y', my_op, scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
```
As a safe-guard, the returned function will raise a `ValueError` after the
first call if trainable variables are created by calling `tf.Variable`.
If all of these are true, then 2 properties are enforced by the template:
1. Calling the same template multiple times will share all non-local
variables.
2. Two different templates are guaranteed to be unique, unless you reenter the
same variable scope as the initial definition of a template and redefine
it. An examples of this exception:
```python
def my_op(x, scalar_name):
var1 = tf.get_variable(scalar_name,
shape=[],
initializer=tf.constant_initializer(1))
return x * var1
with tf.variable_scope('scope') as vs:
scale_by_y = tf.make_template('scale_by_y', my_op, scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
# Creates a template that reuses the variables above.
with tf.variable_scope(vs, reuse=True):
scale_by_y2 = tf.make_template('scale_by_y', my_op, scalar_name='y')
z2 = scale_by_y2(input1)
w2 = scale_by_y2(input2)
```
Depending on the value of `create_scope_now_`, the full variable scope may be
captured either at the time of first call or at the time of construction. If
this option is set to True, then all Tensors created by repeated calls to the
template will have an extra trailing _N+1 to their name, as the first time the
scope is entered in the Template constructor no Tensors are created.
Note: `name_`, `func_` and `create_scope_now_` have a trailing underscore to
reduce the likelihood of collisions with kwargs.
Args:
name_: A name for the scope created by this template. If necessary, the name
will be made unique by appending `_N` to the name.
func_: The function to wrap.
create_scope_now_: Boolean controlling whether the scope should be created
when the template is constructed or when the template is called. Default
is False, meaning the scope is created when the template is called.
unique_name_: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is false,
an error is raised. Defaults to None.
custom_getter_: Optional custom getter for variables used in `func_`. See
the @{tf.get_variable} `custom_getter` documentation for
more information.
**kwargs: Keyword arguments to apply to `func_`.
Returns:
A function to encapsulate a set of variables which should be created once
and reused. An enclosing scope will created, either where `make_template`
is called, or wherever the result is called, depending on the value of
`create_scope_now_`. Regardless of the value, the first time the template
is called it will enter the scope with no reuse, and call `func_` to create
variables, which are guaranteed to be unique. All subsequent calls will
re-enter the scope and reuse those variables.
Raises:
ValueError: if the name is None.
"""
if kwargs:
func_ = functools.partial(func_, **kwargs)
return Template(
name_, func_, create_scope_now=create_scope_now_,
unique_name=unique_name_, custom_getter=custom_getter_)
def _skip_common_stack_elements(stacktrace, base_case):
"""Skips items that the target stacktrace shares with the base stacktrace."""
for i, (trace, base) in enumerate(zip(stacktrace, base_case)):
if trace != base:
return stacktrace[i:]
return stacktrace[-1:]
class Template(object):
"""Wrap a function to aid in variable sharing.
Templates are functions that create variables the first time they are called
and reuse them thereafter. See `make_template` for full documentation.
Note: By default, the full variable scope is captured at the time of first
call. If `create_scope_now_` is passed as True to the constructor, the full
scope will be captured there, but no variables will created until the first
call.
"""
def __init__(self, name, func, create_scope_now=False, unique_name=None,
custom_getter=None):
"""Creates a template for the given function.
Args:
name: A name for the scope created by this template. The
name will be made unique by appending `_N` to the it (see how
`tf.variable_scope` treats the `default_name` for details).
func: The function to apply each time.
create_scope_now: Whether to create the scope at Template construction
time, rather than first call. Defaults to false. Creating the scope at
construction time may be more convenient if the template is to passed
through much lower level code, and you want to be sure of the scope
name without knowing exactly where it will be first called. If set to
True, the scope will be created in the constructor, and all subsequent
times in __call__, leading to a trailing numeral being added to the
names of all created Tensors. If set to False, the scope will be created
at the first call location.
unique_name: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is
false, an error is raised. Defaults to None.
custom_getter: optional custom getter to pass to variable_scope()
Raises:
ValueError: if the name is None.
"""
self._func = func
self._stacktrace = traceback.format_stack()[:-2]
self._name = name
self._unique_name = unique_name
self._custom_getter = custom_getter
if name is None:
raise ValueError("name cannot be None.")
if create_scope_now:
with variable_scope._pure_variable_scope( # pylint:disable=protected-access
(self._unique_name or
variable_scope._get_unique_variable_scope(self._name)), # pylint:disable=protected-access
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
else:
self._variable_scope = None
# This variable keeps track of whether the template has been called yet,
# which is not the same as whether the scope has been created.
self._variables_created = False
def _call_func(self, args, kwargs, check_for_new_variables):
try:
vars_at_start = len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
trainable_at_start = len(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
result = self._func(*args, **kwargs)
if check_for_new_variables:
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
# If a variable that we intend to train is created as a side effect
# of creating a template, then that is almost certainly an error.
if trainable_at_start != len(trainable_variables):
raise ValueError("Trainable variable created when calling a template "
"after the first time, perhaps you used tf.Variable "
"when you meant tf.get_variable: %s" %
(trainable_variables[trainable_at_start:],))
# Non-trainable tracking variables are a legitimate reason why a new
# variable would be created, but it is a relatively advanced use-case,
# so log it.
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
if vars_at_start != len(variables):
logging.info("New variables created when calling a template after "
"the first time, perhaps you used tf.Variable when you "
"meant tf.get_variable: %s",
variables[vars_at_start:])
return result
except Exception as exc:
# Reraise the exception, but append the original definition to the
# trace.
args = exc.args
if not args:
arg0 = ""
else:
arg0 = args[0]
trace = "".join(_skip_common_stack_elements(self._stacktrace,
traceback.format_stack()))
arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace)
new_args = [arg0]
new_args.extend(args[1:])
exc.args = tuple(new_args)
raise
def __call__(self, *args, **kwargs):
if self._variable_scope:
if self._variables_created:
# This is not the first visit to __call__, so variables have already
# been created, and we want to reuse them.
with variable_scope.variable_scope(self._variable_scope, reuse=True):
return self._call_func(args, kwargs, check_for_new_variables=True)
else:
# This is the first visit to __call__, but the scope has already been
# created in the constructor. Set _variables_created so that subsequent
# calls take the if branch above.
self._variables_created = True
with variable_scope.variable_scope(self._variable_scope):
return self._call_func(args, kwargs, check_for_new_variables=False)
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
self._variables_created = True
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
return self._call_func(args, kwargs, check_for_new_variables=False)
@property
def variable_scope(self):
"""Returns the variable scope object created by this Template."""
return self._variable_scope
@property
@deprecated(
"2017-02-21", "The .var_scope property is deprecated. Please change your "
"code to use the .variable_scope property")
def var_scope(self):
"""Returns the variable scope object created by this Template."""
return self._variable_scope
| strint/tensorflow | tensorflow/python/ops/template.py | Python | apache-2.0 | 12,917 | [
"VisIt"
] | 3437cae55a86f29652b9a92eaf7809974a937e49bd5122e67d9aa20d108babef |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file tls_vissimXML2SUMOnet_update.py
@author Lukas Grohmann <Lukas.Grohmann@ait.ac.at>
@author Gerald Richter <Gerald.Richter@ait.ac.at>
@date Jun 11 2015
@version $Id: tls_vissimXML2SUMOnet_update.py 22608 2017-01-17 06:28:54Z behrisch $
Converts a VISSIM-tls-description into a SUMO-tls-description and writes
the appended information to a copy of the given sumo.net file
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2009-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
# TODO: usage doc.ref
from __future__ import print_function
from __future__ import absolute_import
import os
import argparse
from xml.dom import minidom
from copy import deepcopy
import numpy as np
def dict_from_node_attributes(node):
"""takes a xml node and returns a dictionary with its attributes"""
return dict((attn, node.getAttribute(attn)) for attn in
node.attributes.keys())
def nparr_from_dict_list(dicl_tab, col_ns, col_ts):
"""converts a dictionary into an np array table structure"""
return np.array([tuple(rd.get(cn, '0') for cn in col_ns) for rd in
dicl_tab], dtype=np.dtype(list(zip(col_ns, col_ts))))
def get_conn_verb_rel(conn_tab, from_to_tab):
"""
returns a dictionary to get the connection id for a
given verbinder id and vice versa
"""
conn_link_d = {} # key = verbinder.id, value = list<connection.id>
for conn in conn_tab:
if ':' not in conn['from']:
link = from_to_tab[
(from_to_tab['f_link'] == conn['from'].split("[")[0])
& (from_to_tab['t_link'] == conn['to'].split("[")[0])]
if len(link) > 0:
if link["_link"][0] in conn_link_d:
conn_link_d[link["_link"][0]].append(conn['via'])
else:
conn_link_d[link["_link"][0]] = [conn['via']]
else:
print("from: " + conn['from'] + "to: " + conn['to'])
return conn_link_d
def parse_vissim_net_data(inpxdoc):
link_tab = []
from_to_tab = []
for lin in inpxdoc.getElementsByTagName('link'):
link_d = dict_from_node_attributes(lin)
link_tab.append(link_d)
if lin.hasChildNodes():
lep_d = {} # link end point dict
for ept in ('fromLinkEndPt', 'toLinkEndPt'):
lep_nd = lin.getElementsByTagName(ept)
ch0 = ept[0] # identifier 'f'rom / 't'o
if len(lep_nd) > 0:
dic = dict_from_node_attributes(lep_nd.item(0))
dic['link'], dic['lane'] = dic['lane'].split(' ')
lep_d.update(dict((ch0 + '_' + key, value)
for key, value in dic.items()))
lep_d.update({'_link': link_d['no'], })
from_to_tab.append(lep_d)
# which columns to pick ?
link_tab = nparr_from_dict_list(link_tab, 'no name'.split(), ('O',) * 2)
# lane_tab = just the number of lanes and width
from_to_tab = nparr_from_dict_list(
from_to_tab,
'_link f_link f_lane t_link t_lane'.split(),
'O O i O i'.split())
return link_tab, from_to_tab
def parse_sumo_net_data(sumodoc):
junc_tab = []
conn_tab = []
junc_tab = [dict_from_node_attributes(nd) for
nd in sumodoc.getElementsByTagName('junction')]
col_n = ('id', 'type', 'x', 'y', 'incLanes', 'intLanes')
col_t = ('O', ) * 6
junc_tab = nparr_from_dict_list(junc_tab, col_n, col_t)
conn_tab = [dict_from_node_attributes(nd) for
nd in sumodoc.getElementsByTagName('connection')]
col_n = ('from', 'to', 'fromLane', 'toLane', 'via')
col_t = ('O', ) * 5
conn_tab = nparr_from_dict_list(conn_tab, col_n, col_t)
return junc_tab, conn_tab
def compute_signal_tables(disp_name_id_d, signal_state_d, prog_list):
"""
completes the signal tables with all duration and beginning times
(in the VISSIm .sig files are only the beginning times of the red and
green phases as well as the durations of the other phases given )
"""
for key, program in signal_state_d.items():
cycletime = int([prog for prog in prog_list
if prog["id"] == key][0]["cycletime"])
for sig_group in program.values():
sig_seq = sig_group["signal_sequence"]
sig_tab = deepcopy(disp_name_id_d[sig_seq])
# durations füllen
for state in sig_group["durations"]:
itemindex = np.where(sig_tab == int(state["display"]))
sig_tab[itemindex[0][0]][itemindex[1][0] + 2] \
= int(state["duration"])
# begins füllen
for cmd in sig_group["begins"]:
itemindex = np.where(sig_tab == int(cmd["display"]))
sig_tab[itemindex[0][0]][itemindex[1][0] + 1] \
= int(cmd["begin"])
# begin Zeiten berechnen
# bei zufälligem begin Eintrag starten
i = itemindex[0][0]
check = 0
while check != len(sig_tab):
if sig_tab[i - 1][1] == -1: # duration bekannt
# überlauf cycletime
if (sig_tab[i][1] - sig_tab[i - 1][2]) < 0:
sig_tab[i - 1][1] = cycletime - \
(sig_tab[i - 1][2] - sig_tab[i][1])
else:
sig_tab[i - 1][1] = sig_tab[i][1] - sig_tab[i - 1][2]
elif sig_tab[i - 1][2] == -1: # begin bekannt
if sig_tab[i - 1][1] > sig_tab[i][1]: # überlauf cycletime
sig_tab[i - 1][2] = \
(cycletime - sig_tab[i - 1][1]) + sig_tab[i][1]
else:
sig_tab[i - 1][2] = sig_tab[i][1] - sig_tab[i - 1][1]
i -= 1
check += 1
i = 0
while i < len(sig_tab):
if (sig_tab[i][1] + sig_tab[i][2]) > cycletime:
diff = cycletime - sig_tab[i][1]
dur = sig_tab[i][2]
sig_tab[i][2] = diff
sig_tab = np.insert(
sig_tab, i, np.array(
(sig_tab[i][0], 0, dur - diff)), 0)
break
i += 1
sig_tab = sig_tab[np.argsort(sig_tab[:, 1])]
sig_group["signal_table"] = sig_tab
def sigtable_split_time(signal_state_d, prog_list):
# FIXME: doc
reference_time_d = {}
for key, program in signal_state_d.items():
cycletime = int([prog for prog in prog_list
if prog["id"] == key][0]["cycletime"])
reference_time = np.array([], dtype="int")
reference_duration = np.array([], dtype="int")
for sg in program.values():
reference_time = np.append(
reference_time, sg["signal_table"][:, 1])
reference_time = np.unique(reference_time)
i = 0
while i < len(reference_time):
if i == len(reference_time) - 1:
ele = cycletime - reference_time[i]
else:
ele = reference_time[i + 1] - reference_time[i]
reference_duration = np.append(reference_duration, ele)
i += 1
reference_time_d[key] = {}
reference_time_d[key]["begin"] = reference_time
reference_time_d[key]["duration"] = reference_duration
return reference_time_d
def compute_sumo_signal_tables(reference_time_d,
signal_state_d,
sig_disp_list,
tls_state_vissim2SUMO):
# FIXME: doc
for key, program in signal_state_d.items():
for sg in program.values():
state = sg["signal_table"]
ref_time = reference_time_d[key]["begin"]
sumo_tab = ""
for time in ref_time:
i = 0
while i < len(state):
if state[i][1] <= time < state[i][1] + state[i][2]:
sumo_state = tls_state_vissim2SUMO[
[sig for sig in sig_disp_list if
sig["id"] == str(state[i][0])][0]["state"]]
sumo_tab = "".join([sumo_tab, sumo_state])
break
i += 1
sg["sumo_signal_table"] = sumo_tab
def get_sigcon_junc_relation(sig_con_tab, sig_group_conn_d, junc_tab):
"""
allocates the VISSIM signalcontrollers to SUMO junctions
"""
sigCon_junc_d = {}
for sig_con in sig_con_tab:
conn_l = []
for sg in sig_con["_sgs"]:
if sg["_sg"] in sig_group_conn_d:
conn_l += sig_group_conn_d[sg["_sg"]]
else:
continue
# intersection
junction = [
junc for junc in junc_tab if len(
set(conn_l).intersection(
junc['intLanes'].split(" "))) > 0]
if len(junction) > 0:
junction = junction[0]
else:
continue
sigCon_junc_d[sig_con["no"]] = junction["id"]
return sigCon_junc_d
def get_sigseq_id_list(sig_seq_tab, sig_disp_list):
# FIXME: doc
disp_name_id_d = {}
for seq in sig_seq_tab:
id_list = []
names = seq["name"].split("-")
for name in names:
id_list.append([[disp for disp in sig_disp_list
if disp["name"] == name][0]["id"], -1, -1])
disp_name_id_d[seq["id"]] = np.array(id_list, dtype="int")
return disp_name_id_d
def get_sg_connection_data(
conn_tab,
sig_con_tab,
sig_head_d,
edge_list,
conn_link_d):
# FIXME: doc
sig_group_conn_d = {} # dic [sigCon ID] = List <[conn via]>
for con in sig_con_tab:
for sg in con['_sgs']:
# check if a signalHead exists for the given signalGroup
if sg['_sg'] in sig_head_d:
for signal in sig_head_d[sg['_sg']]:
link = signal['link']
lane = str(int(signal['lane']) - 1)
# tls on normal edge or verbinder?
if is_verbinder_d[link] == False:
if link in edge_list:
connection = conn_tab[
(conn_tab["from"] == link) & (
conn_tab["fromLane"] == lane)]
else:
check = True
split_len = 0
while check:
if "".join(
[link, "[", str(split_len), "]"]) \
in edge_list:
split_len += 1
else:
check = False
print("".join([link, "[", str(split_len), "]"]))
connection = conn_tab[(conn_tab["from"] == "".join(
[link, "[", str(split_len), "]"]))
& (conn_tab["fromLane"] == lane)][0]
else:
connection = conn_tab[
(conn_tab["via"] ==
[conn for conn in
conn_link_d[link] if conn[-1] == lane])]
if sg['_sg'] in sig_group_conn_d:
for conn in connection:
sig_group_conn_d[sg['_sg']].append(conn["via"])
else:
sig_group_conn_d[sg['_sg']] = []
for conn in connection:
sig_group_conn_d[sg['_sg']].append(conn["via"])
else:
print(sg['_sg'])
return sig_group_conn_d
def parse_sig_file(sig_file):
xmldoc = minidom.parse(sig_file)
print('\n---\n\n* loading signal file:\n\t', sig_file)
# just getting single head node
sc_node = xmldoc.getElementsByTagName('sc').item(0)
sc_id = sc_node.getAttribute('id')
# get the signal displays; should be just 1 node
sig_disp_nodes = sc_node.getElementsByTagName('signaldisplays')
display_nodes = sig_disp_nodes.item(0).getElementsByTagName('display')
# build for single current signal
sig_disp_list = [dict_from_node_attributes(disp) for disp in display_nodes]
[sd.update({'_sc_id': sc_id}) for sd in sig_disp_list]
# signalsequences
sig_seq_tab = []
# sigStat_tab needed for default program
sigStat_tab = []
for sig_seq in sc_node.getElementsByTagName('signalsequence'):
sig_seq_d = dict_from_node_attributes(sig_seq)
sig_seq_tab.append(sig_seq_d)
sig_state_l = [dict_from_node_attributes(sst) for
sst in sig_seq.getElementsByTagName('state')]
[sst.update({'_sigSeq_id': sig_seq_d['id']}) for sst in sig_state_l]
sigStat_tab.extend(sig_state_l)
sgroup_list = []
# holds defaultDurations, fixedstates, cmds
prog_list = []
# dict[prog_id][signal_id]
# <signal_sequence>
# <begins>
# <durations>
signal_state_d = {}
# reading default program; should be just 1 node
sgs_list = sc_node.getElementsByTagName('sgs')
prog_id = '0' # unsaved
prog_d = dict((('id', prog_id), ))
prog_list.append(prog_d)
# default sg einlesen
for sg in sgs_list.item(0).getElementsByTagName('sg'):
sg_d = dict_from_node_attributes(sg)
sg_d.update({'_prog_id': prog_id, })
sgroup_list.append(sg_d)
# other sg reading
progs_node = sc_node.getElementsByTagName('progs').item(0)
for prog_node in progs_node.getElementsByTagName('prog'):
prog_d = dict_from_node_attributes(prog_node)
prog_list.append(prog_d)
prog_id = prog_d['id']
signal_state_d[prog_id] = {}
sg_nl = prog_node.getElementsByTagName(
'sgs').item(0).getElementsByTagName('sg')
for sg in sg_nl:
sg_d = dict_from_node_attributes(sg)
signal_state_d[prog_id][sg_d["sg_id"]] = {}
signal_state_d[prog_id][sg_d["sg_id"]][
"signal_sequence"] = sg_d["signal_sequence"]
signal_state_d[prog_id][sg_d["sg_id"]]["begins"] = []
signal_state_d[prog_id][sg_d["sg_id"]]["durations"] = []
sg_d.update({'_prog_id': prog_id, })
sgroup_list.append(sg_d)
# fixedstates
for fixStat in sg.getElementsByTagName('fixedstates').item(0).\
getElementsByTagName('fixedstate'):
fixst = dict_from_node_attributes(fixStat)
signal_state_d[prog_id][sg_d["sg_id"]][
"durations"].append(fixst)
# cmds
for cmd_node in sg.getElementsByTagName('cmds').item(0).\
getElementsByTagName('cmd'):
cmd_d = dict_from_node_attributes(cmd_node)
signal_state_d[prog_id][sg_d["sg_id"]]["begins"].append(cmd_d)
return sig_seq_tab, signal_state_d, sig_disp_list, prog_list
def parse_inpx_sig_data(xmldoc):
"""parses the signal data from the .inpx file"""
sig_controller_tab = []
sig_head_d = dict()
for controller in xmldoc.getElementsByTagName('signalController'):
controller_d = dict_from_node_attributes(controller)
sgs_l = [dict_from_node_attributes(sgn) for
sgn in controller.getElementsByTagName('signalGroup')]
for sg in sgs_l:
sg['_sg'] = " ".join([controller.getAttribute('no'), sg['no']])
controller_d['_sgs'] = sgs_l
sig_controller_tab.append(controller_d)
# parse signalHeads
for s_head_item in xmldoc.getElementsByTagName('signalHead'):
sig_head = dict_from_node_attributes(s_head_item)
sig_head['link'], sig_head['lane'] = sig_head['lane'].split(" ")
# temp = sHead.getAttribute('lane').split(" ") # "link lane"
if sig_head['sg'] in sig_head_d:
sig_head_d[sig_head['sg']].append(sig_head)
else:
sig_head_d[sig_head['sg']] = [sig_head]
return sig_controller_tab, sig_head_d
def edit_connections(conn_l, sumodoc, junc_id):
i = 0
while i < len(conn_l):
for via in conn_l[i]:
connection = [conn for conn in
sumodoc.getElementsByTagName("connection")
if conn.getAttribute("via") == via][0]
connection.setAttribute("state", "o") # CHECK
connection.setAttribute("linkIndex", str(i))
connection.setAttribute("tl", junc_id)
i += 1
def is_verbinder(xmldoc):
"""checks if a given link is a verbinder"""
is_verbinder_d = dict()
for link in xmldoc.getElementsByTagName("link"):
if len(link.getElementsByTagName("fromLinkEndPt")) > 0:
is_verbinder_d[link.getAttribute("no")] = True
else:
is_verbinder_d[link.getAttribute("no")] = False
return is_verbinder_d
def generate_xml_doc(
sumo_tls_d, sigCon_junc_d,
sig_con_tab, reference_time_d,
sumodoc, prog_list_d, sig_group_conn_d):
for tls_id, programs in sumo_tls_d.items():
junc_id = sigCon_junc_d[tls_id]
default_prog_id = [
sig for sig in sig_con_tab if sig["no"] == tls_id][0]["progNo"]
for prog_id, program in programs.items():
signal_table = []
for sg_id, sg in program.items():
if " ".join([tls_id, sg_id]) in sig_group_conn_d:
signal_table.append([sg_id, sg["sumo_signal_table"]])
signal_table = np.array(signal_table)
signal_table = signal_table[
signal_table[:, 0].astype("int").argsort()]
sg_id_l = signal_table[:, 0]
conn_l = []
for s_id in sg_id_l:
conn_l.append(sig_group_conn_d[" ".join([tls_id, s_id])])
signal_table = np.delete(signal_table, 0, 1)
signal_table = np.ravel(signal_table)
state_l = []
i = 0
while i < len(signal_table[0]):
j = 0
duration = []
while j < len(signal_table):
duration.append(signal_table[j][i])
j += 1
state_l.append("".join(duration))
i += 1
duration_l = reference_time_d[tls_id][
prog_id]["duration"]
# edit net file
junction = [junc for junc in sumodoc.getElementsByTagName(
"junction") if junc.getAttribute("id") == junc_id][0]
junction.setAttribute("type", "traffic_light")
net = sumodoc.getElementsByTagName("net")[0]
edit_connections(conn_l, sumodoc, junc_id)
tl_logic = sumodoc.createElement("tlLogic")
tl_logic.setAttribute("id", junc_id)
tl_logic.setAttribute("type", "static")
tl_logic.setAttribute("programID",
[prog for prog in prog_list_d[tls_id]
if prog["id"] == prog_id][0]["name"])
tl_logic.setAttribute("offset", "0.00")
net.insertBefore(tl_logic, junction)
for state, duration in zip(state_l, duration_l):
phase = sumodoc.createElement("phase")
phase.setAttribute("duration", str(duration / 1000))
phase.setAttribute("state", state)
tl_logic.appendChild(phase)
# create WAUT
waut = sumodoc.createElement("WAUT")
waut.setAttribute("startProg",
[prog for prog in prog_list_d[tls_id]
if prog["id"] == default_prog_id][0]["name"])
waut.setAttribute("refTime", "100")
waut.setAttribute("id", "".join(["w", tls_id]))
# root.appendChild(WAUT)
net.insertBefore(waut, junction)
# create waut junction
waut_junc = sumodoc.createElement("wautJunction")
waut_junc.setAttribute("junctionID", junc_id)
waut_junc.setAttribute("wautID", "".join(["w", tls_id]))
# root.appendChild(wautJunction)
net.insertBefore(waut_junc, junction)
# global signal color translation definition
tls_state_vissim2SUMO = {'RED': 'r',
'REDAMBER': 'u',
'GREEN': 'g',
'AMBER': 'y',
# this should be different: like in SUMO 'o', 'O'
'FLASHING_GREEN': 'g',
'OFF': 'O'}
# MAIN
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='TLS conversion utility (VISSIM.inpx to SUMO)')
parser.add_argument('--vissim-input',
'-V', type=str,
help='VISSIM inpx file path')
parser.add_argument('--SUMO-net', '-S', type=str,
help='SUMO net file path')
parser.add_argument('--output-file', '-o', type=str,
help='output file name')
args = parser.parse_args()
print("\n", args, "\n")
print('\n---\n\n* loading VISSIM net:\n\t', args.vissim_input)
xmldoc = minidom.parse(args.vissim_input)
print('\n---\n\n* loading SUMO net:\n\t', args.SUMO_net,)
sumodoc = minidom.parse(args.SUMO_net)
edge_list = []
for edge in sumodoc.getElementsByTagName('edge'):
# is it a normal edge ?
if not edge.hasAttribute("function"):
edge_list.append(edge.getAttribute("id"))
# INPX read
sig_con_tab, sig_head_d = parse_inpx_sig_data(xmldoc)
link_tab, from_to_tab = parse_vissim_net_data(xmldoc)
is_verbinder_d = is_verbinder(xmldoc)
# SUMO NET read
junc_tab, conn_tab = parse_sumo_net_data(sumodoc)
conn_link_d = get_conn_verb_rel(conn_tab, from_to_tab)
# get the connections for every signal group
sig_group_conn_d = get_sg_connection_data(conn_tab,
sig_con_tab,
sig_head_d,
edge_list,
conn_link_d)
# related junction id for a given Signal Controller
sigCon_junc_d = get_sigcon_junc_relation(sig_con_tab,
sig_group_conn_d,
junc_tab)
# pick all the .sig files from the signalControllers
sig_files = set(sc[att] for sc in sig_con_tab for att in
sc.keys() if 'supplyFile' in att and '.sig' in sc[att])
# sig_files = ['TestsiteGraz_v01301.sig'] # DEBUG, just 1 file
reference_time_d = {}
sumo_tls_d = {}
prog_list_d = {}
for sig_file in sig_files:
sig_file = os.path.join(os.path.dirname(args.vissim_input), sig_file)
sig_seq_tab = []
signal_state_d = {}
sig_disp_list = []
disp_name_id_d = {}
# parse .sig files
sig_seq_tab, signal_state_d, sig_disp_list, \
prog_list_d[sig_disp_list[0]["_sc_id"]] = parse_sig_file(sig_file)
tls_id = sig_disp_list[0]["_sc_id"]
# returns a numpy array with the reference signal Sequence table
# format: display_id || begin_time || duration
disp_name_id_d = get_sigseq_id_list(sig_seq_tab, sig_disp_list)
compute_signal_tables(
disp_name_id_d, signal_state_d, prog_list_d[tls_id])
# reference time and duration for every signal program
# times need to be split, to convert the sig table from VISSIM to SUMO
reference_time_d[tls_id] = sigtable_split_time(
signal_state_d, prog_list_d[tls_id])
compute_sumo_signal_tables(reference_time_d[tls_id],
signal_state_d,
sig_disp_list,
tls_state_vissim2SUMO)
# Format: [tls id][signal program id][signal group index]
sumo_tls_d[tls_id] = signal_state_d
generate_xml_doc(
sumo_tls_d, sigCon_junc_d, sig_con_tab,
reference_time_d, sumodoc, prog_list_d, sig_group_conn_d)
with open("%s.net.xml" % args.output_file, "w") as ofh:
sumodoc.writexml(ofh, addindent=' ', newl='\n')
ofh.close()
| 702nADOS/sumo | tools/import/vissim/tls_vissimXML2SUMOnet_update.py | Python | gpl-3.0 | 24,957 | [
"Amber"
] | 268427fe5ac6748b2ba8b5a73081368119b577a071022770f034758ab234f9ec |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#******************************************************************************
#*
#* Copyright (C) 2015 Kiran Karra <kiran.karra@gmail.com>
#*
#* This program is free software: you can redistribute it and/or modify
#* it under the terms of the GNU General Public License as published by
#* the Free Software Foundation, either version 3 of the License, or
#* (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program. If not, see <http://www.gnu.org/licenses/>.
#******************************************************************************
import math
import numpy as np
from scipy.interpolate import interp1d
"""
e_cdf.py contains routines which help perform empirical CDF Estimation.
"""
def ecdf(x_i, npoints):
""" Generates an Empirical CDF using the indicator function.
Inputs:
x_i -- the input data set, should be a numpy array
npoints -- the number of desired points in the empirical CDF estimate
Outputs:
y -- the empirical CDF
"""
# define the points over which we will generate the kernel density estimate
x = np.linspace(min(x_i), max(x_i), npoints)
n = float(x_i.size)
y = np.zeros(npoints)
for ii in np.arange(x.size):
idxs = np.where(x_i<=x[ii])
y[ii] = np.sum(idxs[0].size)/n
return (x,y)
def kde_integral(kde):
""" Generates a "smoother" Empirical CDF by integrating the KDE. For this,
the user should first generate the KDE using kde.py, and then pass the
density estimate to this function
Inputs:
kde -- the kernel density estimate
Outputs:
y -- the smoothed CDF estimate
"""
y = np.cumsum(kde)/sum(kde)
return y
def probability_integral_transform(X):
"""
Takes a data array X of dimension [M x N], and converts it to a uniform
random variable using the probability integral transform, U = F(X)
"""
M = X.shape[0]
N = X.shape[1]
# convert X to U by using the probability integral transform: F(X) = U
U = np.empty(X.shape)
for ii in range(0,N):
x_ii = X[:,ii]
# estimate the empirical cdf
(xx,pp) = ecdf(x_ii, M)
f = interp1d(xx, pp) # TODO: experiment w/ different kinds of interpolation?
# for example, cubic, or spline etc...?
# plug this RV sample into the empirical cdf to get uniform RV
u_ii = f(x_ii)
U[:,ii] = u_ii
return U
if __name__=='__main__':
import matplotlib.pyplot as plt
import sys
import kde
from scipy.stats import norm
from scipy.stats import expon
# test the E_CDF estimation
N1 = 100 # number of data in data set 1
m1 = -1 # mean value
s1 = 0.1 # % variance
N2 = 500 # number of data in data set 2
m2 = 2 # mean value
s2 = 0.5 # variance
h = 0.1 # bandwidth
npoints = 100 # number of abscis points in kde
x1 = math.sqrt(s1)*np.random.randn(N1,1) + m1
x2 = math.sqrt(s2)*np.random.randn(N2,1) + m2
x = np.concatenate((x1,x2),axis=0)
# Kernel Density Estimate
(xx,kde_estimate) = kde.kde(x,'Gaussian',h, npoints)
plt.plot(xx,kde_estimate, 'r', label='Kernel Density Estimate')
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='green', alpha=0.75, label='Histogram')
# empirical CDF
(xx,pp) = ecdf(x, npoints)
plt.plot(xx,pp, 'k', label='Empirical CDF')
# Smooth Empirical CDF (KDE Integral)
kde_integral = kde_integral(kde_estimate)
plt.plot(xx,kde_integral, 'm', label='Smooth Empirical CDF')
plt.legend(loc='upper left')
plt.show()
# test the probability integral transform
M = 100
N = 2
X = np.empty((M,N))
X[:,0] = norm.rvs(size=M)
X[:,1] = expon.rvs(size=M)
U = probability_integral_transform(X)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1.hist(X[:,0])
ax1.set_title('Guassian RV')
ax2.hist(U[:,0])
ax2.set_title('Gaussian Transformed to Uniform')
ax3.hist(X[:,1])
ax3.set_title('Exponential RV')
ax4.hist(U[:,1])
ax4.set_title('Exponential Transformed to Uniform')
plt.show()
| kkarrancsu/copula-bayesian-networks | ecdf.py | Python | gpl-3.0 | 4,661 | [
"Gaussian"
] | 5641a778d02b56bccf3731212e4acc6501f7afd3b67cd3a5c790e7d1979e20c6 |
from __future__ import division
from pylab import *
import utils
utils.backup(__file__)
from stats import AbstractStat
from stats import HistoryStat
from stats import _getvar
from common.sources import TrialSource
from utils.lstsq_reg import lstsq_reg
import cPickle as pickle
import gzip
def load_source(name,c):
try:
filename = c.logfilepath+name+".pickle"
sourcefile = gzip.open(filename,"r")
except IOError: # Cluster
filename = c.logfilepath+\
name+"_%s_%.3f.pickle"\
%(c.cluster.vary_param,\
c.cluster.current_param)
sourcefile = gzip.open(filename,"r")
source = pickle.load(sourcefile)
if isinstance(source,TrialSource):
source = source.source
return source
class CounterStat(AbstractStat):
def __init__(self):
self.name = 'num_steps'
self.collection = "reduce"
def start(self,c,obj):
c[self.name] = 0.0 # Everything needs to be a float :-/
def add(self,c,obj):
c[self.name] += 1
def report(self,c,obj):
return array(c[self.name]) # And an array :-/
# By making CounterStat a little longer we can make ClearCounterStat a
# lot shorter
class ClearCounterStat(CounterStat):
def __init__(self):
self.name = 'counter'
self.collection = "ignore"
(self.clear,self.start) = (self.start,self.clear)
class PopulationVariance(AbstractStat):
def __init__(self):
self.name = 'pop_var'
self.collection = 'reduce'
def clear(self,c,obj):
N = obj.c.N_e
c.pop_var = zeros(N+1)
def add(self,c,obj):
n = sum(obj.x)
c.pop_var[n] += 1.0
def report(self,c,obj):
return c.pop_var
class ActivityStat(AbstractStat):
"""
Gathers the state of the network at each step
If the parameter only_last is set, only the first and last steps are
collected
"""
def __init__(self):
self.name = 'activity'
self.collection = 'gather'
def clear(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
c.activity = zeros(sorn.c.stats.only_last\
+sorn.c.stats.only_last)
else:
c.activity = zeros(sorn.c.N_steps)
self.step = 0
def add(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
new_step = self.step - (sorn.c.N_steps\
-sorn.c.stats.only_last)
if new_step >= 0:
c.activity[new_step+sorn.c.stats.only_last] \
= sum(sorn.x)/sorn.c.N_e
elif self.step % (sorn.c.N_steps\
//sorn.c.stats.only_last) == 0:
c.activity[self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sum(sorn.x)/sorn.c.N_e
else:
c.activity[self.step] = sum(sorn.x)/sorn.c.N_e
self.step += 1
def report(self,c,sorn):
return c.activity
class InputIndexStat(AbstractStat):
"""
Gathers the index of the input at each step
"""
def __init__(self):
self.name = 'InputIndex'
self.collection = 'gather'
def clear(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
c.inputindex = zeros(sorn.c.stats.only_last\
+sorn.c.stats.only_last)
else:
c.inputindex = zeros(sorn.c.N_steps)
self.step = 0
def add(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
new_step = self.step - (sorn.c.N_steps\
-sorn.c.stats.only_last)
if new_step >= 0:
c.inputindex[new_step+sorn.c.stats.only_last] \
= sorn.source.global_index()
elif self.step % (sorn.c.N_steps\
//sorn.c.stats.only_last) == 0:
c.inputindex[self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sorn.source.global_index()
else:
c.inputindex[self.step] = sorn.source.global_index()
self.step += 1
def report(self,c,sorn):
return c.inputindex
class WordListStat(AbstractStat):
# OLD! use pickle of source instead!
def __init__(self):
self.name = 'WordList'
self.collection = 'gather'
def report(self,c,sorn):
return sorn.c.words
class InputUnitsStat(AbstractStat):
def __init__(self):
self.name = 'InputUnits'
self.collection = 'gather'
def report(self,c,sorn):
input_units = where(sum(sorn.W_eu.get_synapses(),1)>0)[0]
# to make them equal in size
tmp = array([z in input_units for z in arange(sorn.c.N_e)])
return tmp+0 # cast as double
class NormLastStat(AbstractStat):
'''
This is a helper Stat that computes the normalized last spikes
and input indices
'''
def __init__(self):
self.name = 'NormLast'
self.collection = 'gather'
def report(self,c,sorn):
steps_plastic = sorn.c.steps_plastic
steps_noplastic_train = sorn.c.steps_noplastic_train
steps_noplastic_test = sorn.c.steps_noplastic_test
plastic_train = steps_plastic+steps_noplastic_train
input_spikes = c.spikes[:,steps_plastic:plastic_train]
input_index = c.inputindex[steps_plastic:plastic_train]
# Filter out empty states
input_spikes = input_spikes[:,input_index != -1]
input_index = input_index[input_index != -1]
if sorn.c.stats.has_key('only_last'):
N_comparison = sorn.c.stats.only_last
else:
N_comparison = 2500
assert(N_comparison > 0)
assert(N_comparison <= steps_noplastic_test \
and N_comparison <= steps_noplastic_train)
maxindex = int(max(input_index))
# Only use spikes that occured at the end of learning and spont
last_input_spikes = input_spikes[:,-N_comparison:]
last_input_index = input_index[-N_comparison:]
# Get the minimal occurence of an index in the last steps
min_letter_count = inf
for i in range(maxindex+1):
tmp = sum(last_input_index == i)
if min_letter_count > tmp:
min_letter_count = tmp
# For each index, take the same number of states from the
# end phase of learning to avoid a bias in comparing states
norm_last_input_spikes = np.zeros((shape(last_input_spikes)[0],\
min_letter_count*(maxindex+1)))
norm_last_input_index = np.zeros(min_letter_count*(maxindex+1))
for i in range(maxindex+1):
indices = find(last_input_index == i)
norm_last_input_spikes[:,min_letter_count*i\
: min_letter_count*(i+1)]\
= last_input_spikes[:, indices[-min_letter_count:]]
norm_last_input_index[min_letter_count*i\
: min_letter_count*(i+1)]\
= last_input_index[indices[-min_letter_count:]]
# Shuffle to avoid argmin-problem of selecting only first match
indices = arange(shape(norm_last_input_index)[0])
shuffle(indices)
norm_last_input_index = norm_last_input_index[indices]
norm_last_input_spikes = norm_last_input_spikes[:,indices]
c.norm_last_input_index = norm_last_input_index
c.norm_last_input_spikes = norm_last_input_spikes
c.maxindex = maxindex
c.N_comparison = N_comparison
to_return = array([float(N_comparison)])
return to_return
class SpontPatternStat(AbstractStat):
"""
Computes the frequency of each pattern in the spontaneous activity
"""
def __init__(self):
self.name = 'SpontPattern'
self.collection = 'gather'
def report(self,c,sorn):
source_plastic = load_source("source_plastic",sorn.c)
steps_noplastic_test = sorn.c.steps_noplastic_test
spont_spikes = c.spikes[:,-steps_noplastic_test:]
norm_last_input_index = c.norm_last_input_index
norm_last_input_spikes = c.norm_last_input_spikes
maxindex = c.maxindex
N_comparison = c.N_comparison
last_spont_spikes = spont_spikes[:,-N_comparison:]
# Remove silent periods from spontspikes
last_spont_spikes = last_spont_spikes[:,sum(last_spont_spikes,0)>0]
N_comp_spont = shape(last_spont_spikes)[1]
# Find for each spontaneous state the evoked state with the
# smallest hamming distance and store the corresponding index
similar_input = zeros(N_comp_spont)
for i in xrange(N_comp_spont):
most_similar = argmin(sum(abs(norm_last_input_spikes.T\
-last_spont_spikes[:,i]),axis=1))
similar_input[i] = norm_last_input_index[most_similar]
# Count the number of spontaneous states for each index and plot
index = range(maxindex+1)
if self.collection == 'gatherv':
adding = 2
else:
adding = 1
pattern_freqs = zeros((2,maxindex+adding))
barcolor = []
for i in index:
pattern_freqs[0,i] = sum(similar_input==index[i])
# Compare patterns
# Forward patterns ([0,1,2,3],[4,5,6,7],...)
patterns = array([arange(len(w))+source_plastic.glob_ind[i] \
for (i,w) in enumerate(source_plastic.words)])
rev_patterns = array([x[::-1] for x in patterns])
maxlen = max([len(x) for x in patterns])
# Also get the reversed patterns
if maxlen>1: # Single letters can't be reversed
allpatterns = array(patterns.tolist()+rev_patterns.tolist())
else:
allpatterns = array(patterns.tolist())
for (i,p) in enumerate(allpatterns):
patternlen = len(p)
for j in xrange(N_comp_spont-maxlen):
if all(similar_input[j:j+patternlen] == p):
pattern_freqs[1,i] += 1
# Marker for end of freqs
if self.collection == 'gatherv':
pattern_freqs[:,-1] = -1
c.similar_input = similar_input
return(pattern_freqs)
class SpontTransitionStat(AbstractStat):
def __init__(self):
self.name = 'SpontTransition'
self.collection = 'gather'
def report(self,c,sorn):
similar_input = c.similar_input # from SpontPatternStat
maxindex = c.maxindex
transitions = np.zeros((maxindex+1,maxindex+1))
for (i_from, i_to) in zip(similar_input[:-1],similar_input[1:]):
transitions[i_to,i_from] += 1
return transitions
class SpontIndexStat(AbstractStat):
def __init__(self):
self.name = 'SpontIndex'
self.collection = 'gather'
def report (self,c,sorn):
return c.similar_input
class BayesStat(AbstractStat):
def __init__(self,pred_pos = 0):
self.name = 'Bayes'
self.collection = 'gather'
self.pred_pos = pred_pos # steps before M/N
def clear(self,c,sorn):
pass
# If raw_prediction is input to M/N neurons, this is needed
#~ self.M_neurons = where(sorn.W_eu.W[:,
#~ sorn.source.source.lookup['M']]==1)[0]
#~ self.N_neurons = where(sorn.W_eu.W[:,
#~ sorn.source.source.lookup['N']]==1)[0]
def report(self,c,sorn):
### Prepare spike train matrices for training and testing
# Separate training and test data according to steps
source_plastic = load_source("source_plastic",sorn.c)
steps_plastic = sorn.c.steps_plastic
N_train_steps = sorn.c.steps_noplastic_train
N_inputtrain_steps = steps_plastic + N_train_steps
N_test_steps = sorn.c.steps_noplastic_test
burnin = 3000
# Transpose because this is the way they are in test_bayes.py
Xtrain = c.spikes[:,steps_plastic+burnin:N_inputtrain_steps].T
Xtest = c.spikes[:,N_inputtrain_steps:].T
assert(shape(Xtest)[0] == N_test_steps)
inputi_train = c.inputindex[steps_plastic+burnin
:N_inputtrain_steps]
assert(shape(Xtrain)[0] == shape(inputi_train)[0])
inputi_test = c.inputindex[N_inputtrain_steps:]
assert(shape(inputi_test)[0]== N_test_steps)
N_fracs = len(sorn.c.frac_A)
# Filter out empty states
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
source = sorn.source
Xtrain = Xtrain[inputi_train != -1,:]
inputi_train = inputi_train[inputi_train != -1]
Xtest = Xtest[inputi_test != -1,:]
inputi_test = inputi_test[inputi_test != -1]
# Following snipplet modified from sorn_stats spont_stat
# Get the minimal occurence of an index in the last steps
maxindex = int(max(inputi_train))
min_letter_count = inf
for i in range(maxindex+1):
tmp = sum(inputi_train == i)
if min_letter_count > tmp:
min_letter_count = tmp
# For each index, take the same number of states from the
# end phase of learning to avoid a bias in comparing states
norm_Xtrain = np.zeros((min_letter_count*(maxindex+1),
shape(Xtrain)[1]))
norm_inputi_train = np.zeros(min_letter_count*(maxindex+1))
for i in range(maxindex+1):
indices = find(inputi_train == i)
norm_Xtrain[min_letter_count*i
: min_letter_count*(i+1), :]\
= Xtrain[indices[-min_letter_count:],:]
norm_inputi_train[min_letter_count*i
: min_letter_count*(i+1)]\
= inputi_train[indices[-min_letter_count:]]
Xtrain = norm_Xtrain
inputi_train = norm_inputi_train
noinput_units = where(sum(sorn.W_eu.W,1)==0)[0]
if sorn.c.stats.bayes_noinput:
Xtrain_noinput = Xtrain[:,noinput_units]
Xtest_noinput = Xtest[:,noinput_units]
else:
Xtrain_noinput = Xtrain
Xtest_noinput = Xtest
assert(source_plastic.words[0][0]=="A" and
source_plastic.words[1][0]=="B")
A_index = source_plastic.glob_ind[0] # start of first word
B_index = source_plastic.glob_ind[1] # start of second word
# position from which to predict end of word
pred_pos = len(source_plastic.words[0])-1-self.pred_pos
assert(pred_pos>=0
and pred_pos <= source_plastic.global_range())
R = np.zeros((2,shape(inputi_train)[0]))
R[0,:] = inputi_train == A_index+pred_pos
R[1,:] = inputi_train == B_index+pred_pos
if sorn.c.stats.relevant_readout:
Xtrain_relevant = Xtrain_noinput[((inputi_train ==
A_index+pred_pos) +
(inputi_train == B_index+pred_pos))>0,:]
R_relevant = R[:,((inputi_train == A_index+pred_pos) +
(inputi_train == B_index+pred_pos))>0]
classifier = lstsq_reg(Xtrain_relevant,R_relevant.T,
sorn.c.stats.lstsq_mue)
else:
classifier = lstsq_reg(Xtrain_noinput,R.T,
sorn.c.stats.lstsq_mue)
#~ # No real difference between LogReg, BayesRidge and my thing
#~ # If you do this, comment out raw_predictions further down
#~ from sklearn import linear_model
#~ clf0 = linear_model.LogisticRegression(C=1)#BayesianRidge()
#~ clf1 = linear_model.LogisticRegression(C=1)#BayesianRidge()
#~ clf0.fit(Xtrain_noinput,R.T[:,0])
#~ clf1.fit(Xtrain_noinput,R.T[:,1])
#~ raw_predictions = vstack((clf0.predict_proba(Xtest_noinput)[:,1]
#~ ,clf1.predict_proba(Xtest_noinput)[:,1])).T
# predict
#~ raw_predictions = Xtest.dot(classifier)
#~ # comment this out if you use sklearn
raw_predictions = Xtest_noinput.dot(classifier)
#~ # Historical stuff
#~ # Raw predictions = total synaptic input to M/N neurons
#~ raw_predictions[1:,0] = sum((sorn.W_ee*Xtest[:-1].T)[
#~ self.M_neurons],0)
#~ raw_predictions[1:,1] = sum((sorn.W_ee*Xtest[:-1].T)[
#~ self.N_neurons],0)
#~ # Raw predictions = total activation of M/N neurons
#~ raw_predictions[:,0] = sum(Xtest.T[self.M_neurons],0)
#~ raw_predictions[:,1] = sum(Xtest.T[self.N_neurons],0)
#~ # for testing: sum(raw_predictions[indices,0])>indices+-1,2,3
letters_for_frac = ['B']
# Because alphabet is sorted alphabetically, this list will
# have the letters corresponding to the list frac_A
for l in source.alphabet:
if not ((l=='A') or (l=='B') or (l=='M') or (l=='N')
or (l=='X') or (l=='_')):
letters_for_frac.append(l)
letters_for_frac.append('A')
output_drive = np.zeros((N_fracs,2))
output_std = np.zeros((N_fracs,2))
decisions = np.zeros((N_fracs,2))
denom = np.zeros(N_fracs)
for (s_word,s_index) in zip(source.words,source.glob_ind):
i = ''.join(letters_for_frac).find(s_word[0])
indices = find(inputi_test==s_index+pred_pos)
# A predicted
output_drive[i,0] += mean(raw_predictions[indices,0])
# B predicted
output_drive[i,1] += mean(raw_predictions[indices,1])
decisions[i,0] += mean(raw_predictions[indices,0]>\
raw_predictions[indices,1])
decisions[i,1] += mean(raw_predictions[indices,1]>=\
raw_predictions[indices,0])
output_std[i,0] += std(raw_predictions[indices,0])
output_std[i,1] += std(raw_predictions[indices,1])
denom[i] += 1
# Some words occur more than once
output_drive[:,0] /= denom
output_drive[:,1] /= denom
output_std[:,0] /= denom
output_std[:,1] /= denom
decisions[:,0] /= denom
decisions[:,1] /= denom
# for other stats (e.g. SpontBayesStat)
c.pred_pos = pred_pos
c.Xtest = Xtest
c.raw_predictions = raw_predictions
c.inputi_test = inputi_test
c.letters_for_frac = letters_for_frac
c.classifier = classifier
c.noinput_units = noinput_units
to_return = hstack((output_drive,output_std,decisions))
return to_return
class AttractorDynamicsStat(AbstractStat):
"""
This stat tracks the distance between output gains during the
input presentation to determine whether the decision is based on
attractor dynamics
"""
def __init__(self):
self.name = 'AttractorDynamics'
self.collection = 'gather'
def report(self,c,sorn):
# Read stuff in
letters_for_frac = c.letters_for_frac
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
source = sorn.source
word_length = min([len(x) for x in source.words])
N_words = len(source.words)
N_fracs = len(sorn.c.frac_A)
bayes_stat = None
for stat in sorn.stats.methods:
if stat.name is 'Bayes':
bayes_stat = stat
break
assert(bayes_stat is not None)
pred_pos_old = bayes_stat.pred_pos
#output_dist = np.zeros((word_length-1,N_fracs))
output_dist = np.zeros((word_length,N_fracs))
min_trials = inf
for i in range(int(max(c.inputi_test))+1):
tmp = sum(c.inputi_test == i)
if min_trials > tmp:
min_trials = tmp
decisions = np.zeros((N_words,word_length,min_trials),\
dtype=np.bool)
seq_count = np.zeros((N_words,4))
for (p,pp) in enumerate(arange(0,word_length)):
bayes_stat.pred_pos = pp
bayes_stat.report(c,sorn)
pred_pos = c.pred_pos
raw_predictions = c.raw_predictions
inputi_test = c.inputi_test
#~ summed = abs(raw_predictions[:,0])+abs(raw_predictions[:,1])
#~ summed[summed<1e-10] = 1 # if predicted 0, leave at 0
#~ raw_predictions[:,0] /= summed
#~ raw_predictions[:,1] /= summed
denom = np.zeros((N_fracs))
for (w,(s_word,s_index)) in enumerate(zip(source.words,
source.glob_ind)):
i = ''.join(letters_for_frac).find(s_word[0])
indices = find(inputi_test==s_index+pred_pos)
tmp = abs(raw_predictions[indices,0]-
raw_predictions[indices,1])
output_dist[p,i] += mean(tmp)
decisions[w,p,:] = raw_predictions[
indices[-min_trials:],0]>\
raw_predictions[indices[-min_trials:],1]
denom[i] += 1
output_dist[p,:] /= denom
for i in range(N_words):
# Full-length 1s to be expected
seq_count[i,0] = ((sum(decisions[i])/(1.*min_trials*
word_length))**(word_length))*min_trials
# Actual 1-series
seq_count[i,1] = sum(sum(decisions[i],0)==word_length)
# Same for 0-series
seq_count[i,2] = ((1-(sum(decisions[i])/(1.*min_trials*
word_length)))**(word_length))*min_trials
seq_count[i,3] = sum(sum(decisions[i],0)==0)
bayes_stat.pred_pos = pred_pos_old
bayes_stat.report(c,sorn)
return output_dist
class OutputDistStat(AbstractStat):
"""
This stat reports the distance between output gains as an indicator
for whether the decision is based on chance or on attractor dynamics
"""
def __init__(self):
self.name = 'OutputDist'
self.collection = 'gather'
def report(self,c,sorn):
# Read stuff in
letters_for_frac = c.letters_for_frac
raw_predictions = c.raw_predictions
inputi_test = c.inputi_test
pred_pos = c.pred_pos
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
source = sorn.source
N_fracs = len(sorn.c.frac_A)
summed = abs(raw_predictions[:,0])+abs(raw_predictions[:,1])
summed[summed<1e-10] = 1 # if predicted 0, leave at 0
raw_predictions[:,0] /= summed
raw_predictions[:,1] /= summed
output_dist = np.zeros((N_fracs))
output_std = np.zeros((N_fracs))
denom = np.zeros((N_fracs))
for (s_word,s_index) in zip(source.words,source.glob_ind):
i = ''.join(letters_for_frac).find(s_word[0])
indices = find(inputi_test==s_index+pred_pos)
tmp = abs(raw_predictions[indices,0]-
raw_predictions[indices,1])
output_dist[i] += mean(tmp)
output_std[i] += std(tmp)
denom[i] += 1
output_dist /= denom
output_std /= denom
to_return = vstack((output_dist,output_std))
return to_return
class TrialBayesStat(AbstractStat):
"""
This stat looks at the interaction of spontaneous activity before
stimulus onset with the final prediction
index: int
Word index (global) for which prediction is done
"""
def __init__(self):
self.name = 'TrialBayes'
self.collection = 'gather'
def report(self,c,sorn):
# Read stuff in
STA_window = 50
pred_pos = c.pred_pos
classifier_old = c.classifier
noinput_units = c.noinput_units
steps_plastic = sorn.c.steps_plastic
N_train_steps = sorn.c.steps_noplastic_train
N_inputtrain_steps = steps_plastic + N_train_steps
N_test_steps = sorn.c.steps_noplastic_test
# Transpose because this is the way they are in test_bayes.py
# Use all neurons because we're predicting from spont activity
Xtest = c.spikes[:,N_inputtrain_steps:].T
inputi_test = c.inputindex[N_inputtrain_steps:]
N_exc = shape(Xtest)[1]
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
raise NotImplementedError
# select middle word
index = source.glob_ind[1+(shape(source.glob_ind)[0]-3)//2]
forward_pred = sorn.c.stats.forward_pred
start_indices = find(inputi_test==index)
# * is element-wise AND
start_indices = start_indices[(start_indices>STA_window) *
((start_indices+pred_pos+forward_pred)<shape(inputi_test)[0])]
N_samples = shape(start_indices)[0]
pred_indices = find(inputi_test==(index+pred_pos))
pred_indices = pred_indices[(pred_indices>=start_indices[0])*
((pred_indices+forward_pred)<shape(inputi_test)[0])]
assert(N_samples == shape(pred_indices)[0])
if sorn.c.stats.bayes_noinput:
raw_predictions = Xtest[:,noinput_units].dot(classifier_old)
else:
raw_predictions = Xtest.dot(classifier_old)
predictions = raw_predictions[pred_indices,:]
# Two different baselines
#~ test_base = ones((shape(Xtest)[0],1))
test_base = Xtest.copy()
shuffle(test_base) # without shuffle, identical predictions
test_base = hstack((test_base,ones((shape(Xtest)[0],1))))
# Add bias term to exclude effects of varability
N_exc += 1
Xtest = hstack((Xtest,ones((shape(Xtest)[0],1))))
# Divide into train and test set
predictions_train = predictions[:N_samples//2]
predictions_test = predictions[N_samples//2:]
train_A = predictions_train[:,0]>predictions_train[:,1]
train_B = train_A==False
train_A = find(train_A==True)
train_B = find(train_B==True)
# This case is filtered out during plotting
if not(shape(train_A)[0]>0 and shape(train_B)[0]>0):
return np.ones((2,STA_window))*-1
agreement_lstsq = np.zeros(STA_window)
agreement_base = np.zeros(STA_window)
# This maps 0/1 spikes to -1/1 spikes for later * comparison
predtrain_lstsq = (predictions_train[:,0]>\
predictions_train[:,1])*2-1
predtest_lstsq = (predictions_test[:,0]>\
predictions_test[:,1])*2-1
# Prediction with spontaneous activity
for i in range(-STA_window,0):
classifier_lstsq = lstsq_reg(Xtest[\
start_indices[:N_samples//2]+i+forward_pred,:],\
predtrain_lstsq,sorn.c.stats.lstsq_mue)
predictions_lstsq = (Xtest[start_indices[N_samples//2:]+i\
+forward_pred,:]).dot(classifier_lstsq)
# this is where the -1/1 comes in
agreement_lstsq[i] = sum((predictions_lstsq*predtest_lstsq)\
>0)/(1.*N_samples//2)
# Baseline prediction (loop is unnecessary and for similarity)
for i in range(-STA_window,0):
classifier_base = lstsq_reg(test_base[\
start_indices[:N_samples//2]+i+forward_pred,:],\
predtrain_lstsq,sorn.c.stats.lstsq_mue)
predictions_base = (test_base[start_indices[N_samples//2:]+i\
+forward_pred,:]).dot(classifier_base)
agreement_base[i] = sum((predictions_base*predtest_lstsq)\
>0)/(1.*N_samples//2)
# STA - not used
trials = np.zeros((N_samples,STA_window,N_exc))
for i in range(N_samples):
trials[i,:,:] = Xtest[start_indices[i]-STA_window\
+forward_pred:start_indices[i]+forward_pred,:]
STA_A = mean(trials[train_A,:,:],0)
STA_B = mean(trials[train_B,:,:],0)
N_test = N_samples-N_samples//2
overlap_A = np.zeros((N_test,STA_window,N_exc))
overlap_B = np.zeros((N_test,STA_window,N_exc))
for i in range(N_samples//2,N_samples):
overlap_A[i-N_samples//2] = trials[i]*STA_A
overlap_B[i-N_samples//2] = trials[i]*STA_B
agreement = np.zeros(STA_window)
pred_gain_A = predictions_test[:,0]>predictions_test[:,1]
for i in range(STA_window):
pred_STA_A = sum(overlap_A[:,i,:],1)>sum(overlap_B[:,i,:],1)
agreement[i] = sum(pred_gain_A == pred_STA_A)
agreement /= float(shape(pred_gain_A)[0])
return vstack((agreement_base, agreement_lstsq))
class SpontBayesStat(AbstractStat):
def __init__(self):
self.name = 'SpontBayes'
self.collection = 'gather'
def report(self,c,sorn):
# Read stuff in
pred_pos = c.pred_pos
inputi_test = c.inputi_test
raw_predictions = c.raw_predictions
Xtest = c.Xtest
# Filter out empty states
if isinstance(sorn.source,TrialSource): # if TrialSource
source = sorn.source.source
else:
source = sorn.source
Xtest = Xtest[inputi_test != -1,:]
inputi_test = inputi_test[inputi_test != -1]
letters_for_frac = c.letters_for_frac
# Results will first be saved in dict for simplicity and later
# subsampled to an array
cue_act = {}
pred_gain = {}
minlen = inf
for (s_word,s_index) in zip(source.words,source.glob_ind):
i = ''.join(letters_for_frac).find(s_word[0])
# Indices that point to the presentation of the cue relative
# to the readout
cue_indices = find(inputi_test==s_index)
pred_indices = cue_indices+pred_pos
pred_indices = pred_indices[pred_indices
<shape(inputi_test)[0]]
# Get x-states at cue_indices and figure out the number of
# active input units for A and B
tmp_cue = Xtest[cue_indices]
tmp_cue = vstack((
sum(tmp_cue[:,1==sorn.W_eu.W[:,
source.lookup['A']]],1),
sum(tmp_cue[:,1==sorn.W_eu.W[:,
source.lookup['B']]],1))).T
tmp_gain = raw_predictions[pred_indices,:]
if cue_act.has_key(i):
cue_act[i] = np.append(cue_act[i],tmp_cue,axis=0)
pred_gain[i] = np.append(pred_gain[i],tmp_gain,axis=0)
else:
cue_act[i] = tmp_cue
pred_gain[i] = tmp_gain
if shape(cue_act[i])[0]<minlen:
minlen = shape(cue_act[i])[0]
# TODO super ugly - try to make prettier
minlen = 18 # hack for cluster - otherwise variable minlen
# subsample to make suitable for array
n_conditions = max(cue_act.keys())+1
to_return = np.zeros((n_conditions,minlen,4))
for i in range(n_conditions):
to_return[i,:,:2] = cue_act[i][-minlen:]
to_return[i,:,2:] = pred_gain[i][-minlen:]
return to_return
class EvokedPredStat(AbstractStat):
"""
This stat predicts evoked activity from spontaneous activity
traintimes is an interval of training data
testtimes is an interval of testing data
"""
def __init__(self,traintimes,testtimes,traintest):
self.name = 'EvokedPred'
self.collection = 'gather'
self.traintimes = traintimes
self.testtimes = testtimes
self.traintest = traintest
def report(self,c,sorn):
# Read data
traintimes = self.traintimes
testtimes = self.testtimes
Xtrain = c.spikes[:,traintimes[0]:traintimes[1]].T
Xtest = c.spikes[:,testtimes[0]:testtimes[1]].T
inputi_train = c.inputindex[traintimes[0]:traintimes[1]]
inputi_test = c.inputindex[testtimes[0]:testtimes[1]]
# Determine word length
source = load_source("source_%s"%self.traintest,sorn.c)
N_words = len(source.words)
max_word_length = int(max([len(x) for x in source.words]))
max_spont_length = int(sorn.c['wait_min_%s'%self.traintest]
+sorn.c['wait_var_%s'%self.traintest])
pred_window = max_word_length + max_spont_length+max_word_length
correlations = zeros((N_words,pred_window,2))
import scipy.stats as stats
# Convert 0/1 spike trains to -1/1 spike trains if needed
if sorn.c.stats.match:
Xtrain *= 2
Xtrain -= 1
Xtest *= 2
Xtest -= 1
word_length = 0
for (w,word) in enumerate(source.words):
word_starts_train = find(inputi_train==(word_length))
word_starts_train = word_starts_train[(word_starts_train>0)\
*(word_starts_train<(shape(Xtrain)[0]-pred_window))]
word_starts_test = find(inputi_test==(word_length))
word_starts_test = word_starts_test[word_starts_test<\
(shape(Xtest)[0]-pred_window)]
bias_train = ones((shape(word_starts_train)[0],1))
bias_test = ones((shape(word_starts_test)[0],1))
base_train = Xtrain[word_starts_train-1,:].copy()
base_test = Xtest[word_starts_test-1,:].copy()
shuffle(base_train)
shuffle(base_test)
base_train = hstack((bias_train,base_train))
base_test = hstack((bias_test,base_test))
sp_train = hstack((bias_train,Xtrain[word_starts_train-1,:]))
sp_test = hstack((bias_test,Xtest[word_starts_test-1,:]))
#~ sp_train = bias_train <-- this is a STA!
#~ sp_test = bias_test
for t in range(pred_window):
# First do a least-squares fit
Xt_train = Xtrain[word_starts_train+t,:]
Xt_test = Xtest[word_starts_test+t,:]
# regularize with mue to avoid problems when #samples <
# #neurons
classifier = lstsq_reg(sp_train,Xt_train,
sorn.c.stats.lstsq_mue)
classifier_base = lstsq_reg(base_train,Xt_train,
sorn.c.stats.lstsq_mue)
Xt_pred = sp_test.dot(classifier)
base_pred = base_test.dot(classifier)
# Baseline = STA
#~ base = mean(Xt_train,0)
#~ base_pred = array([base,]*shape(Xt_test)[0])
# Don't use this because the paper uses correlation
# Don't use this because of lower bound for zeros
# instead of pearsonr - lower bound = 1-h.ip
# -> spont pred always better
def match(x,y):
assert(shape(x) == shape(y))
x = x>0
y = y>0
return sum(x==y)/(1.0*shape(x)[0])
if not sorn.c.stats.match:
correlations[w,t,0] = stats.pearsonr(
Xt_pred.flatten(),Xt_test.flatten())[0]
correlations[w,t,1] = stats.pearsonr(
base_pred.flatten(),Xt_test.flatten())[0]
else:
correlations[w,t,0] = match(Xt_pred.flatten(),
Xt_test.flatten())
correlations[w,t,1] = match(base_pred.flatten(),
Xt_test.flatten())
word_length += len(word)
# Correlations are sorted like the words:
# A B C D E ... B = 0*A C = 0.1*A, D=0.2*A ...
return correlations
class SpikesStat(AbstractStat):
def __init__(self,inhibitory = False):
if inhibitory:
self.name = 'SpikesInh'
self.sattr = 'spikes_inh'
else:
self.name = 'Spikes'
self.sattr = 'spikes'
self.collection = 'gather'
self.inh = inhibitory
def clear(self,c,sorn):
if self.inh:
self.neurons = sorn.c.N_i
else:
self.neurons = sorn.c.N_e
if sorn.c.stats.has_key('only_last'):
steps = sorn.c.stats.only_last+sorn.c.stats.only_last
c[self.sattr] = zeros((self.neurons,steps))
else:
c[self.sattr] = zeros((self.neurons,sorn.c.N_steps))
self.step = 0
def add(self,c,sorn):
if self.inh:
spikes = sorn.y
else:
spikes = sorn.x
if sorn.c.stats.has_key('only_last'):
new_step = self.step - (sorn.c.N_steps\
-sorn.c.stats.only_last)
if new_step >= 0:
c[self.sattr][:,new_step+sorn.c.stats.only_last] \
= spikes
elif self.step % (sorn.c.N_steps\
//sorn.c.stats.only_last) == 0:
c[self.sattr][:,self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = spikes
else:
c[self.sattr][:,self.step] = spikes
self.step += 1
def report(self,c,sorn):
if sorn.c.stats.save_spikes:
return c[self.sattr]
else:
return zeros(0)
class CondProbStat(AbstractStat):
def __init__(self):
self.name='CondProb'
self.collection='gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
# return a marix with M_ij = frequency of a spike in i following
# a spike in j
# Look at test instead of training to get more diverse data
steps = sorn.c.steps_noplastic_test
spikes = c.spikes[:,-steps:]
N = shape(spikes)[0] # number of neurons
condspikes = np.zeros((N,N))
for t in xrange(1,steps):
condspikes[spikes[:,t]==1,:] += spikes[:,t-1]
spike_sum = sum(spikes,1)
for i in xrange(N):
condspikes[i,:] /= spike_sum
return condspikes
class BalancedStat(AbstractStat):
"""
This stat records the excitatory and inhibitory input and thresholds
to determine how balanced the network operates
"""
def __init__(self):
self.name='Balanced'
self.collection='gather'
def clear(self,c,sorn):
c.balanced = zeros((sorn.c.N_e*3,sorn.c.N_steps))
self.step = 0
self.N_e = sorn.c.N_e
def add(self,c,sorn):
c.balanced[:self.N_e,self.step] = sorn.W_ee*sorn.x
c.balanced[self.N_e:2*self.N_e,self.step] = sorn.W_ei*sorn.y
c.balanced[2*self.N_e:,self.step] = sorn.T_e
self.step += 1
def report(self,c,sorn):
return c.balanced
class RateStat(AbstractStat):
"""
This stat returns a matrix of firing rates of each presynaptic
neuron
"""
def __init__(self):
self.name = 'Rate'
self.collection='gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
# same interval as for condprob
steps = sorn.c.steps_noplastic_test
spikes = c.spikes[:,-steps:]
N = shape(spikes)[0] # number of neurons
rates = mean(spikes,1)
return array([rates,]*N)
class InputStat(AbstractStat):
def __init__(self):
self.name = 'Input'
self.collection = 'gather'
def clear(self,c,sorn):
c.inputs = zeros((sorn.c.N_e,sorn.c.N_steps))
self.step = 0
def add(self,c,sorn):
c.inputs[:,self.step] = sorn.W_eu*sorn.u
self.step += 1
def report(self,c,sorn):
return c.inputs
class FullEndWeightStat(AbstractStat):
def __init__(self):
self.name = 'FullEndWeight'
self.collection = 'gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
tmp1 = np.vstack((sorn.W_ee.get_synapses(),\
sorn.W_ie.get_synapses()))
tmp2 = np.vstack((sorn.W_ei.get_synapses(),\
np.zeros((sorn.c.N_i,sorn.c.N_i))))
return np.array(hstack((tmp1,tmp2)))
class EndWeightStat(AbstractStat):
def __init__(self):
self.name = 'endweight'
self.collection = 'gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
if sorn.c.W_ee.use_sparse:
return np.array(sorn.W_ee.W.todense())
else:
return sorn.W_ee.W*(sorn.W_ee.M==1)
class ISIsStat(AbstractStat):
def __init__(self,interval=[]):
self.name = 'ISIs'
self.collection = 'gather'
self.interval = interval
def clear(self,c,sorn):
self.mask = sum(sorn.W_eu.get_synapses(),1)==0
self.N_noinput = sum(self.mask)
self.ISIs = zeros((self.N_noinput,100))
self.isis = zeros(self.N_noinput)
self.step = 0
if self.interval == []:
self.interval = [0,sorn.c.N_steps]
def add(self,c,sorn):
if ((self.step > self.interval[0] and
self.step < self.interval[1]) and
((not sorn.c.stats.has_key('only_last')) \
or (self.step > sorn.c.stats.only_last))):
spikes = sorn.x[self.mask]
self.isis[spikes==0] += 1
isis_tmp = self.isis[spikes==1]
isis_tmp = isis_tmp[isis_tmp<100]
tmp = zip(where(spikes==1)[0],isis_tmp.astype(int))
for pair in tmp:
self.ISIs[pair] += 1
self.isis[spikes==1] = 0
self.step += 1
def report(self,c,sorn):
return self.ISIs
class SynapseFractionStat(AbstractStat):
def __init__(self):
self.name = 'SynapseFraction'
self.collection = 'reduce'
def report(self,c,sorn):
if sorn.c.W_ee.use_sparse:
return array(sum((sorn.W_ee.W.data>0)+0.0)\
/(sorn.c.N_e*sorn.c.N_e))
else:
return array(sum(sorn.W_ee.M)/(sorn.c.N_e*sorn.c.N_e))
class ConnectionFractionStat(AbstractStat):
def __init__(self):
self.name = 'ConnectionFraction'
self.collection = 'gather'
def clear(self,c,sorn):
self.step = 0
if sorn.c.stats.has_key('only_last'):
self.cf = zeros(sorn.c.stats.only_last\
+sorn.c.stats.only_last)
else:
self.cf = zeros(sorn.c.N_steps)
def add(self,c,sorn):
if sorn.c.stats.has_key('only_last'):
new_step = self.step \
- (sorn.c.N_steps-sorn.c.stats.only_last)
if new_step >= 0:
if sorn.c.W_ee.use_sparse:
self.cf[new_step+sorn.c.stats.only_last] = sum(\
(sorn.W_ee.W.data>0)+0)/(sorn.c.N_e*sorn.c.N_e)
else:
self.cf[new_step+sorn.c.stats.only_last] = sum(\
sorn.W_ee.M)/(sorn.c.N_e*sorn.c.N_e)
elif self.step%(sorn.c.N_steps\
//sorn.c.stats.only_last) == 0:
if sorn.c.W_ee.use_sparse:
self.cf[self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sum(\
(sorn.W_ee.W.data>0)+0)/(sorn.c.N_e*sorn.c.N_e)
else:
self.cf[self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sum(\
sorn.W_ee.M)/(sorn.c.N_e*sorn.c.N_e)
else:
if sorn.c.W_ee.use_sparse:
self.cf[self.step] = sum((sorn.W_ee.W.data>0)+0)\
/(sorn.c.N_e*sorn.c.N_e)
else:
self.cf[self.step] = sum(sorn.W_ee.M)\
/(sorn.c.N_e*sorn.c.N_e)
self.step += 1
def report(self,c,sorn):
return self.cf
class WeightLifetimeStat(AbstractStat):
def __init__(self):
self.name = 'WeightLifetime'
self.collection = 'gather'
def clear(self,c,sorn):
if sorn.c.W_ee.use_sparse:
self.last_M_ee = np.array(sorn.W_ee.W.todense())>0
else:
self.last_M_ee = sorn.W_ee.M.copy()
self.lifetimes = zeros((sorn.c.N_e,sorn.c.N_e))
self.diedat = np.zeros((1,0))
def add(self,c,sorn):
if sorn.c.W_ee.use_sparse:
new_M_ee = np.array(sorn.W_ee.W.todense())>0
else:
new_M_ee = sorn.W_ee.M
self.diedat = append(self.diedat, \
self.lifetimes[(new_M_ee+0-self.last_M_ee+0)==-1])
# remove dead synapses
self.lifetimes *= new_M_ee+0
#increase lifetime of existing ones
self.lifetimes += (self.lifetimes>0)+0
#add new ones
self.lifetimes += ((new_M_ee+0-self.last_M_ee+0)==1)+0
self.last_M_ee = new_M_ee.copy()
def report(self,c,sorn):
padding = (-1)*np.ones(2*sorn.c.N_steps\
+shape(self.last_M_ee)[0]**2-self.diedat.size)
return np.append(self.diedat,padding)
class WeightChangeStat(AbstractStat):
def __init__(self):
self.name = 'WeightChange'
self.collection = 'gather'
def clear(self,c,sorn):
self.step = 0
self.start = 2999
self.end = 5999
self.save_W_ee = []
self.abschange = []
self.relchange = []
self.weights = []
def add(self,c,sorn):
if(self.step == self.start):
if sorn.c.W_ee.use_sparse:
self.save_W_ee = np.array(sorn.W_ee.W.todense())
else:
self.save_W_ee = sorn.W_ee.W.copy()
if(self.step == self.end):
if sorn.c.W_ee.use_sparse:
diff = np.array(sorn.W_ee.W.todense())-self.save_W_ee
else:
diff = sorn.W_ee.W-self.save_W_ee
self.weights = self.save_W_ee[diff!=0]
self.abschange = (diff[diff!=0])
seterr(divide='ignore')
# Some weights become 0 and thereby elicit division by 0
# and try except RuntimeWarning didn't work
self.relchange = self.abschange/self.weights*100
seterr(divide='warn')
# append zeros to always have the same size
tmp_zeros = np.zeros(shape(self.save_W_ee)[0]**2\
-self.weights.size)
self.weights = np.append(self.weights,tmp_zeros)
self.abschange = np.append(self.abschange,tmp_zeros)
self.relchange = np.append(self.relchange,tmp_zeros)
self.step += 1
def report(self,c,sorn):
stacked = np.vstack((self.weights, self.abschange,\
self.relchange))
return stacked
class WeightChangeRumpelStat(AbstractStat):
def __init__(self):
self.name = 'WeightChangeRumpel'
self.collection = 'gather'
def clear(self,c,sorn):
self.step = 0
self.interval = 0
self.start = 50001
self.started = False
self.imaging_interval = 50000
self.N_intervals = (sorn.c.N_steps-self.start)\
//self.imaging_interval+1
self.save_W_ees = np.zeros((self.N_intervals,sorn.c.N_e,\
sorn.c.N_e))
self.constant_weights = []
self.abschange = []
self.relchange = []
self.weights = []
def add(self,c,sorn):
if(self.step%self.imaging_interval == 0 and self.started):
self.save_W_ees[self.interval,:,:] \
= sorn.W_ee.get_synapses()
self.constant_weights *= (self.save_W_ees[self.interval,\
:,:]>0)
self.interval += 1
if(self.step == self.start):
self.save_W_ees[self.interval,:,:] \
= sorn.W_ee.get_synapses()
self.constant_weights \
= (self.save_W_ees[self.interval,:,:].copy()>0)
self.interval = 1
self.started = True
self.step += 1
def report(self,c,sorn):
# compute diffs and multiply with const
import pdb
pdb.set_trace()
diffs = self.save_W_ees[1:,:,:] - self.save_W_ees[:-1,:,:]
diffs *= self.constant_weights
self.abschange = (diffs[diffs!=0])
self.weights = self.save_W_ees[:-1,:,:][diffs!=0]
self.relchange = self.abschange/self.weights*100
# append zeros to always have the same size
tmp_zeros = np.zeros((self.N_intervals-1)\
*shape(self.save_W_ees)[1]**2-self.weights.size)
self.weights = np.append(self.weights,tmp_zeros)
self.abschange = np.append(self.abschange,tmp_zeros)
self.relchange = np.append(self.relchange,tmp_zeros)
stacked = np.vstack((self.weights, self.abschange,\
self.relchange))
return stacked
class SmallWorldStat(AbstractStat):
def __init__(self):
self.name = 'smallworld'
self.collection = 'gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
if sorn.c.stats.rand_networks <= 0:
return np.array([])
if sorn.c.W_ee.use_sparse:
weights = np.array(sorn.W_ee.W.todense())
else:
weights = sorn.W_ee.W*(sorn.W_ee.M==1)
tmp = weights>0.0+0.0
binary_connections = tmp+0.0
def all_pairs_shortest_path(graph_matrix):
# adapted Floyd-Warshall Algorithm
N = shape(graph_matrix)[0]
distances = graph_matrix.copy()
#Set missing connections to max length
distances[distances==0] += N*N
for k in range(N):
for i in range(N):
for j in range(N):
if i==j:
distances[i,j] = 0
else:
distances[i,j] = min(distances[i,j],
distances[i,k]
+distances[k,j])
return distances
def characteristic_path_length(graph_matrix):
N = shape(graph_matrix)[0]
distances = all_pairs_shortest_path(graph_matrix.T)
if any(distances == N*N):
print 'Disconnected elements in char. path len calc.'
# ignore disconnected elements
distances[distances==N*N] = 0
average_length = sum(distances[distances>0]*1.0)\
/sum(graph_matrix[distances>0]*1.0)
return average_length
def cluster_coefficient(graph_matrix):
# From Fagiolo, 2007 and Gerhard, 2011
N = shape(graph_matrix)[0]
in_degree = sum(graph_matrix,1)
out_degree = sum(graph_matrix,0)
k = in_degree+out_degree
A = graph_matrix
A_T = A.transpose()
A_A_T = A + A_T
A_2 = np.dot(A,A)
nominator = np.dot(A_A_T,np.dot(A_A_T,A_A_T))
single_coeff = np.zeros(N)
for i in range(N):
single_coeff[i] = nominator[i,i]/(2.0*(k[i]*(k[i]-1)\
-2.0*(A_2[i,i])))
if(np.isnan(single_coeff[i])):
# if total degree <= 1, the formula divides by 0
single_coeff[i] = 0
return 1.0*sum(single_coeff)/(N*1.0)
L = characteristic_path_length(binary_connections)
C = cluster_coefficient(binary_connections)
# Average over some random networks
N = shape(binary_connections)[0]
edge_density = sum(binary_connections)/(1.0*N*N-N)
num_rand = sorn.c.stats.rand_networks
L_rand = np.zeros(num_rand)
C_rand = np.zeros(num_rand)
delete_diagonal = np.ones((N,N))
for i in range(N):
delete_diagonal[i,i] = 0
for i in range(num_rand):
sys.stdout.write('\rRand Graph No.%3i of %3i'%(i+1,\
num_rand))
sys.stdout.flush()
tmp = np.random.rand(N,N)<edge_density
rand_graph = tmp*delete_diagonal
L_rand[i] = characteristic_path_length(rand_graph)
C_rand[i] = cluster_coefficient(rand_graph)
sys.stdout.write('\rAll %i Graphs Done '%num_rand)
sys.stdout.flush()
L_r = sum(L_rand)*1.0/(num_rand*1.0)
C_r = sum(C_rand)*1.0/(num_rand*1.0)
gamma = C/C_r
lam = L/L_r
S_w = gamma/lam
return np.array([gamma, lam, S_w])
class ParamTrackerStat(AbstractStat):
def __init__(self):
self.name = 'paramtracker'
self.collection = 'gather'
def clear(self,c,sorn):
pass
def add(self,c,sorn):
pass
def report(self,c,sorn):
tmp = sorn.c
for item in sorn.c.cluster.vary_param.split('.'):
tmp = tmp[item]
return np.array([tmp*1.0])
class InputWeightStat(AbstractStat):
def __init__(self):
self.name = 'InputWeight'
self.collection = 'gather'
def clear(self,c,sorn):
self.step = 0
self.weights = np.zeros((sorn.c.N_e,sorn.c.N_u_e,\
sorn.c.stats.only_last*2))
def add(self,c,sorn):
if self.step % (sorn.c.N_steps//sorn.c.stats.only_last) == 0:
self.weights[:,:,self.step//(sorn.c.N_steps\
//sorn.c.stats.only_last)] = sorn.W_eu.get_synapses()
self.step += 1
def report(self,c,sorn):
return self.weights
class SVDStat(AbstractStat):
def __init__(self,nth = 200):
self.name = 'SVD'
self.collection = 'gather'
self.nth = nth
def clear(self,c,sorn):
self.step = 0
# Quick hack - there must be a prettier solution
if sorn.c.steps_plastic % self.nth == 0:
add1 = 0
else:
add1 = 1
c.SVD_singulars = np.zeros((sorn.c.steps_plastic//self.nth+add1
,sorn.c.N_e))
c.SVD_U = np.zeros((sorn.c.steps_plastic//self.nth+add1,
sorn.c.N_e,sorn.c.N_e))
c.SVD_V = np.zeros((sorn.c.steps_plastic//self.nth+add1,
sorn.c.N_e,sorn.c.N_e))
def add(self,c,sorn):
if self.step < sorn.c.steps_plastic and self.step%self.nth == 0:
# Time intensive!
synapses = sorn.W_ee.get_synapses()
U,s,V = linalg.svd(synapses)
c.SVD_singulars[self.step//self.nth,:] = s
step = self.step//self.nth
c.SVD_U[step] = U
# this returns the real V
# see http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.svd.html
c.SVD_V[step] = V.T
# Resolve sign ambiguity
# from http://www.models.life.ku.dk/signflipsvd
# http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf
for i in range(sorn.c.N_e):
tmp = synapses.T.dot(c.SVD_U[step,:,i])
tmp = np.squeeze(asarray(tmp))
s_left = sum(sign(tmp)*tmp**2)
tmp = synapses.T.dot(c.SVD_V[step,:,i])
tmp = np.squeeze(asarray(tmp))
s_right = sum(sign(tmp)*tmp**2)
if s_right*s_left < 0:
if s_left < s_right:
s_left = -s_left
else:
s_right = -s_right
c.SVD_U[step,:,i] *= sign(s_left)
c.SVD_V[step,:,i] *= sign(s_right)
self.step += 1
def report(self,c,sorn):
#~ figure() # combine same submatrices!
#~ imshow(c.SVD_U[-1][:,0].dot(c.SVD_V[-1][:,0].T)\
#~ *c.SVD_singulars[-1,0], interpolation='none')
return c.SVD_singulars
class SVDStat_U(AbstractStat):
def __init__(self):
self.name = 'SVD_U'
self.collection = 'gather'
def report(self,c,sorn):
rec_steps = shape(c.SVD_U)[0]
similar_input = zeros((rec_steps,sorn.c.N_e))
N_indices = max(c.norm_last_input_index)+1
indices = [where(c.norm_last_input_index==i)[0] for i in
range(int(N_indices))]
for s in xrange(rec_steps):
for i in xrange(sorn.c.N_e):
# U transforms back to "spike space"
# Check for best similarities
# Convolution works best:
#~ overlaps = c.norm_last_input_spikes.T.dot(
#~ c.SVD_U[s,:,i])
#~ index_overlap = np.zeros(N_indices)
#~ for j in range(int(N_indices)):
#~ index_overlap[j] = mean(overlaps[indices[j]])
#~ similar_input[s,i] = argmax(index_overlap)
# No big difference to this, but probably more robust
max_overlap = argmax(c.norm_last_input_spikes.T.dot(
c.SVD_U[s,:,i]))
similar_input[s,i] = c.norm_last_input_index[
max_overlap]
c.SVD_U_sim = similar_input # for debugging
return similar_input
class SVDStat_V(AbstractStat):
def __init__(self):
self.name = 'SVD_V'
self.collection = 'gather'
def report(self,c,sorn):
rec_steps = shape(c.SVD_V)[0]
similar_input = zeros((rec_steps,sorn.c.N_e))
N_indices = max(c.norm_last_input_index)+1
indices = [where(c.norm_last_input_index==i)[0] for i in
range(int(N_indices))]
for s in xrange(rec_steps):
for i in xrange(sorn.c.N_e):
# V transforms input by taking product
# Do same here and look which spike vector works best
#~ overlaps = c.norm_last_input_spikes.T.dot(
#~ c.SVD_V[s,:,i])
#~ index_overlap = np.zeros(N_indices)
#~ for j in range(int(N_indices)):
#~ index_overlap[j] = mean(overlaps[indices[j]])
#~ similar_input[s,i] = argmax(index_overlap)
# No big difference to this, but probably more robust
max_overlap = argmax(c.norm_last_input_spikes.T.dot(
c.SVD_V[s,:,i])) # euclidean norm w/o sqrt
similar_input[s,i] = c.norm_last_input_index[
max_overlap]
'''
# For testing purposes command line
!i = 30
!similar_input[:,i]
!c.SVD_U_sim[:,i]
!figure()
!plot(c.SVD_V[-1,:,i])
!max_overlap = argmax(c.norm_last_input_spikes.T.dot(c.SVD_V[s,:,i]))
!plot(c.norm_last_input_spikes[:,max_overlap])
!figure()
!plot(c.SVD_U[-1,:,i])
!max_overlap = argmax(c.norm_last_input_spikes.T.dot(c.SVD_U[s,:,i]))
!plot(c.norm_last_input_spikes[:,max_overlap])
!show()
'''
return similar_input
class MeanActivityStat(AbstractStat):
"""
This stat returns the mean activity for each inputindex
"""
def __init__(self,start,stop,N_indices,LFP=False):
self._start = start
self._stop = stop
self._N_indices = N_indices
self.name = 'meanactivity'
self.collection = 'gather'
self.LFP = LFP
self.tmp = -1
def clear(self,c,sorn):
self.means = zeros(self._N_indices)
self.counter = zeros(self._N_indices)
self.step = 0
self.index = None
def add(self,c,sorn):
if self.step > self._start and self.step < self._stop\
and self.step>0:
# for proper assignment, blank(-1)->0, 0->1...
self.index = sorn.source.global_index()+1
if self.index is not None:
if self.tmp >= 0:
self.counter[self.index] += 1.
if self.LFP:
# save input at current step, but can only compute
# input for next step!
if self.tmp >= 0:
self.means[self.index] += self.tmp+sum(sorn.W_eu
*sorn.u)
self.tmp = sum(sorn.W_ee*sorn.x)
else:
if self.tmp >= 0:
self.means[self.index] += sum(sorn.x)
self.tmp = 0 # dummy value never used
#~ # +1 due to -1 for blank trials
#~ self.index = sorn.source.global_index()+1
self.step += 1
def report(self,c,sorn):
return self.means/self.counter
class MeanPatternStat(AbstractStat):
"""
This stat returns the mean activity for each inputindex
"""
def __init__(self,start,stop,N_indices):
self._start = start
self._stop = stop
self._N_indices = N_indices
self.name = 'meanpattern'
self.collection = 'gather'
def clear(self,c,sorn):
self.means = zeros((self._N_indices,sorn.c.N_e))
self.counter = zeros(self._N_indices)
self.step = 0
self.index = None
def add(self,c,sorn):
if self.step > self._start and self.step < self._stop\
and self.step>0:
# for proper assignment, blank(-1)->0, 0->1...
self.index = sorn.source.global_index()+1
if self.index is not None:
self.counter[self.index] += 1.
self.means[self.index] += sorn.x
self.step += 1
def report(self,c,sorn):
return self.means/self.counter[:,None]
class PatternProbabilityStat(AbstractStat):
"""
This stat estimates the probability distribution of patterns
for different time intervals
Intervals: List of 2-entry lists
[[start1,stop1],...,[startn,stopn]]
zero_correction: Bool
Correct estimates by adding one observation to each pattern
subset: 1-D array
List of neuron indices that create the pattern
"""
def __init__(self,intervals,subset,zero_correction=True):
self.N_intervals = len(intervals)
self.intervals = intervals
self.zero_correction = zero_correction
self.N_nodes = len(subset)
self.subset = subset
self.name = 'patternprobability'
self.collection = 'gather'
self.conversion_array = [2**x for x in range(self.N_nodes)][::-1]
def convert(x):
return np.dot(x,self.conversion_array)
self.convert = convert
def clear(self,c,sorn):
self.patterns = zeros((self.N_intervals,2**self.N_nodes))
self.step = 0
def add(self,c,sorn):
for (i,(start,stop)) in enumerate(self.intervals):
if self.step > start and self.step < stop:
# Convert spiking pattern to integer by taking the
# pattern as a binary number
self.patterns[i,self.convert(sorn.x[self.subset])] += 1
self.step += 1
def report(self,c,sorn):
if self.zero_correction:
self.patterns += 1
# Normalize to probabilities
self.patterns /= self.patterns.sum(1)[:,None]
return self.patterns
class WeeFailureStat(AbstractStat):
def __init__(self):
self.name = 'weefail'
self.collection = 'gather'
def clear(self,c,sorn):
c.weefail = zeros(sorn.c.N_steps)
self.step = 0
def add(self,c,sorn):
if sorn.c.W_ee.use_sparse:
N_weights = sorn.W_ee.W.data.shape[0]
N_fail = N_weights-sum(sorn.W_ee.mask)
else:
N_weights = sum(sorn.W_ee.get_synapses()>0)
N_fail = N_weights-sum(sorn.W_ee.masked>0)
c.weefail[self.step] = N_fail/N_weights
self.step += 1
def report(self,c,sorn):
return c.weefail
class WeeFailureFuncStat(AbstractStat):
def __init__(self):
self.name = 'weefailfunc'
self.collection = 'gather'
def clear(self,c,sorn):
self.x = np.linspace(0,1,1000)
self.y = sorn.W_ee.fail_f(self.x)
def add(self,c,sorn):
pass
def report(self,c,sorn):
return np.array([self.x,self.y])
# From Philip
class XClassifierStat(AbstractStat):
def __init__(self,steps=None, classify_x=True, \
classify_r=False,detailed=False,**args):
'''Steps is a list with the step sizes over which to predict.
e.g.
- a step of +1 means predict the next state
- a step of 0 means identify the current state
- a step of -1 means identify the previous state
'''
if steps is None:
steps = [0]
self.steps = steps
self.classify_x = classify_x
self.classify_r = classify_r
self.detailed = detailed
@property
def name(self):
ans = []
if self.classify_x:
ans.append('xclassifier')
if self.classify_r:
ans.append('rclassifier')
return ans
def build_classifier(self,inp,out,offset):
# Use the input to build a classifier of the output with an
# offset
N = inp.shape[0]
inp_aug = hstack([inp, ones((N,1))])
(ib,ie) = (max(-offset,0),min(N-offset,N))
(ob,oe) = (max(+offset,0),min(N+offset,N))
try:
ans = linalg.lstsq(inp_aug[ib:ie,:],out[ob:oe,:])[0]
except LinAlgError:
ans = zeros( (inp.shape[1]+1,out.shape[1]) )
return ans
def use_classifier(self,inp,classifier,offset,correct):
N = inp.shape[0]
L = classifier.shape[1]
inp_aug = hstack([inp, ones((N,1))])
(ib,ie) = (max(-offset,0),min(N-offset,N))
(ob,oe) = (max(+offset,0),min(N+offset,N))
ind = argmax(inp_aug[ib:ie,:].dot(classifier),1)
actual = argmax(correct,1)[ob:oe]
num = zeros(L)
den = zeros(L)
for l in range(L):
l_ind = actual==l
num[l] = sum(actual[l_ind]==ind[l_ind])
den[l] = sum(l_ind)
return (num,den)
def report(self,_,sorn):
c = sorn.c
#Disable plasticity when measuring network
sorn.update = False
#Don't track statistics when measuring either
self.parent.disable = True
#Build classifiers
Nr = c.test_num_train
Nt = c.test_num_test
#~ (Xr,Rr,Ur) = sorn.simulation(Nr)
dic = sorn.simulation(Nr,['X','R_x','U'])
Xr = dic['X']
Rr = dic['R_x']
Ur = dic['U']
#~ (Xt,Rt,Ut) = sorn.simulation(Nt)
dic = sorn.simulation(Nt,['X','R_x','U'])
Xt = dic['X']
Rt = dic['R_x']
Ut = dic['U']
L = Ur.shape[1]
Rr = (Rr >= 0.0)+0
Rt = (Rt >= 0.0)+0
r = []
x = []
detail_r=[]
detail_x=[]
for step in self.steps:
if self.classify_x:
classifier = self.build_classifier(Xr,Ur,step)
(num,den) = self.use_classifier(Xt,classifier,step,Ut)
ans = sum(num)/sum(den)
x.append(ans)
if self.detailed:
detail_x.append(num/(den+1e-20))
if self.classify_r:
classifier = self.build_classifier(Rr,Ur,step)
(num,den) = self.use_classifier(Rt,classifier,step,Ut)
ans = sum(num)/sum(den)
r.append(ans)
if self.detailed:
detail_r.append(num/(den+1e-20))
ans = []
if self.classify_x:
ans.append( ('xclassifier', 'reduce', array(x)) )
if self.detailed:
ans.append( ('x_detail_classifier%d'%L,'reduce',\
array(detail_x)) )
if self.classify_r:
ans.append( ('rclassifier', 'reduce', array(r)) )
if self.detailed:
ans.append( ('r_detail_classifier%d'%L,'reduce',\
array(detail_r)) )
sorn.update = True
self.parent.disable = False
return ans
# From Philip
class XTotalsStat(AbstractStat):
def __init__(self):
self.name = 'x_tot'
self.collection = 'gather'
def clear(self,c,obj):
N = obj.c.N_e
c.x_tot = zeros(N)
def add(self,c,obj):
c.x_tot += obj.x
def report(self,c,obj):
return c.x_tot
# From Philip
class YTotalsStat(AbstractStat):
def __init__(self):
self.name = 'y_tot'
self.collection = 'gather'
def clear(self,c,obj):
N = obj.c.N_i
c.y_tot = zeros(N)
def add(self,c,obj):
c.y_tot += obj.y
def report(self,c,obj):
return c.y_tot
# From Philip
class SynapticDistributionStat(AbstractStat):
def __init__(self,collection='gatherv'):
self.name = 'synaptic_strength'
self.collection = collection
def report(self,_,sorn):
W = sorn.W_ee.T
Mask = sorn.M_ee.T
# This code might be a little fragile but fast
# (note transposes rely on memory laid out in particular order)
#~ N = sorn.c.N_e
#~ M = sorn.c.lamb
#This relies on a fixed # of non-zero synapses per neuron
#~ ans = (W[Mask]).reshape(N,M).T.copy()
ans = W[Mask]
return ans
# From Philip
class SuccessiveStat(AbstractStat):
def __init__(self):
self.name = 'successive'
self.collection = 'reduce'
def clear(self,c,sorn):
N = sorn.c.N_e
c.successive = zeros( (N+1,N+1) )
c.successive_prev = sum(sorn.x)
def add(self, c, sorn):
curr = sum(sorn.x)
c.successive[c.successive_prev,curr] += 1.0
c.successive_prev = curr
def report(self,c,sorn):
return c.successive
# From Philip
class RClassifierStat(AbstractStat):
def __init__(self,select=None):
if select is None:
select = [True,True,True]
self.name = 'classifier'
self.collection = 'reduce'
self.select = select
def report(self,_,sorn):
c = sorn.c
sorn.update = False
self.parent.disable = True
#Build classifiers
N = c.test_num_train
#~ (X,R,U) = sorn.simulation(N)
dic = sorn.simulation(N,['X','R_x','U'])
X = dic['X']
R = dic['R_x']
U = dic['U']
R = hstack([R>=0,ones((N,1))])
if self.select[0]:
classifier0 = linalg.lstsq(R,U)[0]
if self.select[1]:
classifier1 = dot(linalg.pinv(R),U)
if self.select[2]:
X_aug = hstack([X, ones((N,1))])
classifier2 = linalg.lstsq(X_aug[:-1,:],U[1:,:])[0]
#Now test classifiers
N = c.test_num_test
#~ (X,R,U) = sorn.simulation(N)
dic = sorn.simulation(N,['X','R_x','U'])
X = dic['X']
R = dic['R_x']
U = dic['U']
R = hstack([R>=0,ones((N,1))])
if self.select[0]:
ind0 = argmax(dot(R,classifier0),1)
if self.select[1]:
ind1 = argmax(dot(R,classifier1),1)
if self.select[2]:
X_aug = hstack([X, ones((N,1))])
ind2 = argmax(dot(X_aug[:-1,:],classifier2),1)
actual = argmax(U,1)
ans = []
if self.select[0]:
ans.append(mean(actual==ind0))
if self.select[1]:
ans.append(mean(actual==ind1))
if self.select[2]:
ans.append(mean(actual[1:]==ind2))
sorn.update = True
self.parent.disable = False
return array(ans)
class WeightHistoryStat(HistoryStat):
def add(self,c,obj):
if not (c.history[self.counter] % self.record_every_nth):
c.history[self.name].append(np.copy(
_getvar(obj,self.var).get_synapses()))
c.history[self.counter] += 1
| Saran-nns/SORN | common/sorn_stats.py | Python | mit | 74,077 | [
"NEURON"
] | 20d4638cf3db4898f7c31d2c53c9c08ff3c5dd208b6af0cc07dffa31f92baec2 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Nick Hall
# Copyright (C) 2011-2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Alex Roitman,
# largely based on the BaseDoc classes by Don Allingham
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from __future__ import print_function
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".Book")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from ...listmodel import ListModel
from gramps.gen.errors import FilterError, ReportError
from gramps.gen.constfunc import cuni
from ...pluginmanager import GuiPluginManager
from ...dialog import WarningDialog, ErrorDialog
from gramps.gen.plug.menu import PersonOption, FilterOption, FamilyOption
from gramps.gen.plug.docgen import StyleSheet
from ...managedwindow import ManagedWindow, set_titles
from ...glade import Glade
from ...utils import is_right_click, open_file_with_default_application
from ...user import User
from .. import make_gui_option
# Import from specific modules in ReportBase
from gramps.gen.plug.report import BookList, Book, BookItem, append_styles
from gramps.gen.plug.report import CATEGORY_BOOK, book_categories
from gramps.gen.plug.report._options import ReportOptions
from ._reportdialog import ReportDialog
from ._docreportdialog import DocReportDialog
from gramps.gen.display.name import displayer as _nd
#------------------------------------------------------------------------
#
# Private Constants
#
#------------------------------------------------------------------------
_UNSUPPORTED = _("Unsupported")
_RETURN = Gdk.keyval_from_name("Return")
_KP_ENTER = Gdk.keyval_from_name("KP_Enter")
#------------------------------------------------------------------------
#
# Private Functions
#
#------------------------------------------------------------------------
def _initialize_options(options, dbstate, uistate):
"""
Validates all options by making sure that their values are consistent with
the database.
menu: The Menu class
dbase: the database the options will be applied to
"""
if not hasattr(options, "menu"):
return
dbase = dbstate.get_database()
menu = options.menu
for name in menu.get_all_option_names():
option = menu.get_option_by_name(name)
value = option.get_value()
if isinstance(option, PersonOption):
if not dbase.get_person_from_gramps_id(value):
person_handle = uistate.get_active('Person')
person = dbase.get_person_from_handle(person_handle)
option.set_value(person.get_gramps_id())
elif isinstance(option, FamilyOption):
if not dbase.get_family_from_gramps_id(value):
person_handle = uistate.get_active('Person')
person = dbase.get_person_from_handle(person_handle)
family_list = person.get_family_handle_list()
if family_list:
family_handle = family_list[0]
else:
try:
family_handle = next(dbase.iter_family_handles())
except StopIteration:
family_handle = None
if family_handle:
family = dbase.get_family_from_handle(family_handle)
option.set_value(family.get_gramps_id())
else:
print("No family specified for ", name)
def _get_subject(options, dbase):
"""
Attempts to determine the subject of a set of options. The subject would
likely be a person (using a PersonOption) or a filter (using a
FilterOption)
options: The ReportOptions class
dbase: the database for which it corresponds
"""
if not hasattr(options, "menu"):
return ""
menu = options.menu
option_names = menu.get_all_option_names()
if not option_names:
return _("Entire Database")
for name in option_names:
option = menu.get_option_by_name(name)
if isinstance(option, FilterOption):
return option.get_filter().get_name()
elif isinstance(option, PersonOption):
gid = option.get_value()
person = dbase.get_person_from_gramps_id(gid)
return _nd.display(person)
elif isinstance(option, FamilyOption):
family = dbase.get_family_from_gramps_id(option.get_value())
if not family:
return ""
family_id = family.get_gramps_id()
fhandle = family.get_father_handle()
mhandle = family.get_mother_handle()
if fhandle:
father = dbase.get_person_from_handle(fhandle)
father_name = _nd.display(father)
else:
father_name = _("unknown father")
if mhandle:
mother = dbase.get_person_from_handle(mhandle)
mother_name = _nd.display(mother)
else:
mother_name = _("unknown mother")
name = _("%(father)s and %(mother)s (%(id)s)") % {
'father' : father_name,
'mother' : mother_name,
'id' : family_id }
return name
return ""
#------------------------------------------------------------------------
#
# BookList Display class
#
#------------------------------------------------------------------------
class BookListDisplay(object):
"""
Interface into a dialog with the list of available books.
Allows the user to select and/or delete a book from the list.
"""
def __init__(self, booklist, nodelete=0, dosave=0):
"""
Create a BookListDisplay object that displays the books in BookList.
booklist: books that are displayed
nodelete: if not 0 then the Delete button is hidden
dosave: if 1 then the book list is saved on hitting OK
"""
self.booklist = booklist
self.dosave = dosave
self.xml = Glade('book.glade')
self.top = self.xml.toplevel
self.unsaved_changes = False
set_titles(self.top, self.xml.get_object('title2'),
_('Available Books'))
if nodelete:
delete_button = self.xml.get_object("delete_button")
delete_button.hide()
self.xml.connect_signals({
"on_booklist_cancel_clicked" : self.on_booklist_cancel_clicked,
"on_booklist_ok_clicked" : self.on_booklist_ok_clicked,
"on_booklist_delete_clicked" : self.on_booklist_delete_clicked,
"on_book_ok_clicked" : self.do_nothing,
"destroy_passed_object" : self.do_nothing,
"on_setup_clicked" : self.do_nothing,
"on_down_clicked" : self.do_nothing,
"on_up_clicked" : self.do_nothing,
"on_remove_clicked" : self.do_nothing,
"on_add_clicked" : self.do_nothing,
"on_edit_clicked" : self.do_nothing,
"on_open_clicked" : self.do_nothing,
"on_save_clicked" : self.do_nothing,
"on_clear_clicked" : self.do_nothing
})
self.guilistbooks = self.xml.get_object('list')
self.guilistbooks.connect('button-press-event', self.on_button_press)
self.guilistbooks.connect('key-press-event', self.on_key_pressed)
self.blist = ListModel(self.guilistbooks, [('Name',-1,10)],)
self.redraw()
self.selection = None
self.top.run()
def redraw(self):
"""Redraws the list of currently available books"""
self.blist.model.clear()
names = self.booklist.get_book_names()
if not len(names):
return
for name in names:
the_iter = self.blist.add([name])
if the_iter:
self.blist.selection.select_iter(the_iter)
def on_booklist_ok_clicked(self, obj):
"""Return selected book. Saves the current list into xml file."""
store, the_iter = self.blist.get_selected()
if the_iter:
data = self.blist.get_data(the_iter, [0])
self.selection = self.booklist.get_book(cuni(data[0]))
if self.dosave:
self.booklist.save()
def on_booklist_delete_clicked(self, obj):
"""
Deletes selected book from the list.
This change is not final. OK button has to be clicked to save the list.
"""
store, the_iter = self.blist.get_selected()
if not the_iter:
return
data = self.blist.get_data(the_iter, [0])
self.booklist.delete_book(cuni(data[0]))
self.blist.remove(the_iter)
self.unsaved_changes = True
self.top.run()
def on_booklist_cancel_clicked(self, obj):
if self.unsaved_changes:
from ...dialog import QuestionDialog2
q = QuestionDialog2(
_('Discard Unsaved Changes'),
_('You have made changes which have not been saved.'),
_('Proceed'),
_('Cancel'))
if q.run():
return
else:
self.top.run()
def on_button_press(self, obj, event):
"""
Checks for a double click event. In the list, we want to
treat a double click as if it was OK button press.
"""
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
store, the_iter = self.blist.get_selected()
if not the_iter:
return False
self.on_booklist_ok_clicked(obj)
#emit OK response on dialog to close it automatically
self.top.response(-5)
return True
return False
def on_key_pressed(self, obj, event):
"""
Handles the return key being pressed on list. If the key is pressed,
the Edit button handler is called
"""
if event.type == Gdk.EventType.KEY_PRESS:
if event.keyval in (_RETURN, _KP_ENTER):
self.on_booklist_ok_clicked(obj)
#emit OK response on dialog to close it automatically
self.top.response(-5)
return True
return False
def do_nothing(self, object):
pass
#------------------------------------------------------------------------
#
# Book Options
#
#------------------------------------------------------------------------
class BookOptions(ReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
ReportOptions.__init__(self, name, dbase)
# Options specific for this report
self.options_dict = {
'bookname' : '',
}
self.options_help = {
'bookname' : ("=name",_("Name of the book. MANDATORY"),
BookList('books.xml',dbase).get_book_names(),
False),
}
#-------------------------------------------------------------------------
#
# Book creation dialog
#
#-------------------------------------------------------------------------
class BookSelector(ManagedWindow):
"""
Interface into a dialog setting up the book.
Allows the user to add/remove/reorder/setup items for the current book
and to clear/load/save/edit whole books.
"""
def __init__(self, dbstate, uistate):
self.db = dbstate.db
self.dbstate = dbstate
self.uistate = uistate
self.title = _('Book')
self.file = "books.xml"
ManagedWindow.__init__(self, uistate, [], self.__class__)
self.xml = Glade('book.glade', toplevel="top")
window = self.xml.toplevel
title_label = self.xml.get_object('title')
self.set_window(window, title_label, self.title)
window.show()
self.xml.connect_signals({
"on_add_clicked" : self.on_add_clicked,
"on_remove_clicked" : self.on_remove_clicked,
"on_up_clicked" : self.on_up_clicked,
"on_down_clicked" : self.on_down_clicked,
"on_setup_clicked" : self.on_setup_clicked,
"on_clear_clicked" : self.on_clear_clicked,
"on_save_clicked" : self.on_save_clicked,
"on_open_clicked" : self.on_open_clicked,
"on_edit_clicked" : self.on_edit_clicked,
"on_book_ok_clicked" : self.on_book_ok_clicked,
"destroy_passed_object" : self.close,
# Insert dummy handlers for second top level in the glade file
"on_booklist_ok_clicked" : lambda _:None,
"on_booklist_delete_clicked": lambda _:None,
"on_booklist_cancel_clicked": lambda _:None,
"on_booklist_ok_clicked" : lambda _:None,
"on_booklist_ok_clicked" : lambda _:None,
})
self.avail_tree = self.xml.get_object("avail_tree")
self.book_tree = self.xml.get_object("book_tree")
self.avail_tree.connect('button-press-event', self.avail_button_press)
self.book_tree.connect('button-press-event', self.book_button_press)
self.name_entry = self.xml.get_object("name_entry")
self.name_entry.set_text(_('New Book'))
avail_label = self.xml.get_object('avail_label')
avail_label.set_text("<b>%s</b>" % _("_Available items"))
avail_label.set_use_markup(True)
avail_label.set_use_underline(True)
book_label = self.xml.get_object('book_label')
book_label.set_text("<b>%s</b>" % _("Current _book"))
book_label.set_use_underline(True)
book_label.set_use_markup(True)
avail_titles = [ (_('Name'), 0, 230),
(_('Type'), 1, 80 ),
( '' , -1, 0 ) ]
book_titles = [ (_('Item name'), -1, 230),
(_('Type'), -1, 80 ),
( '', -1, 0 ),
(_('Subject'), -1, 50 ) ]
self.avail_nr_cols = len(avail_titles)
self.book_nr_cols = len(book_titles)
self.avail_model = ListModel(self.avail_tree, avail_titles)
self.book_model = ListModel(self.book_tree, book_titles)
self.draw_avail_list()
self.book = Book()
def build_menu_names(self, obj):
return (_("Book selection list"), self.title)
def draw_avail_list(self):
"""
Draw the list with the selections available for the book.
The selections are read from the book item registry.
"""
pmgr = GuiPluginManager.get_instance()
regbi = pmgr.get_reg_bookitems()
if not regbi:
return
available_reports = []
for pdata in regbi:
category = _UNSUPPORTED
if pdata.supported and pdata.category in book_categories:
category = book_categories[pdata.category]
available_reports.append([ pdata.name, category, pdata.id ])
for data in sorted(available_reports):
new_iter = self.avail_model.add(data)
self.avail_model.connect_model()
if new_iter:
self.avail_model.selection.select_iter(new_iter)
path = self.avail_model.model.get_path(new_iter)
col = self.avail_tree.get_column(0)
self.avail_tree.scroll_to_cell(path, col, 1, 1, 0.0)
def open_book(self, book):
"""
Open the book: set the current set of selections to this book's items.
book: the book object to load.
"""
if book.get_paper_name():
self.book.set_paper_name(book.get_paper_name())
if book.get_orientation() is not None: # 0 is legal
self.book.set_orientation(book.get_orientation())
if book.get_paper_metric() is not None: # 0 is legal
self.book.set_paper_metric(book.get_paper_metric())
if book.get_custom_paper_size():
self.book.set_custom_paper_size(book.get_custom_paper_size())
if book.get_margins():
self.book.set_margins(book.get_margins())
if book.get_format_name():
self.book.set_format_name(book.get_format_name())
if book.get_output():
self.book.set_output(book.get_output())
if book.get_dbname() == self.db.get_save_path():
same_db = 1
else:
same_db = 0
WarningDialog(_('Different database'), _(
'This book was created with the references to database '
'%s.\n\n This makes references to the central person '
'saved in the book invalid.\n\n'
'Therefore, the central person for each item is being set '
'to the active person of the currently opened database.' )
% book.get_dbname() )
self.book.clear()
self.book_model.clear()
for saved_item in book.get_item_list():
name = saved_item.get_name()
item = BookItem(self.db, name)
item.option_class = saved_item.option_class
# The option values were loaded magically by the book parser.
# But they still need to be applied to the menu options.
opt_dict = item.option_class.handler.options_dict
menu = item.option_class.menu
for optname in opt_dict:
menu_option = menu.get_option_by_name(optname)
if menu_option:
menu_option.set_value(opt_dict[optname])
_initialize_options(item.option_class, self.dbstate, self.uistate)
item.set_style_name(saved_item.get_style_name())
self.book.append_item(item)
data = [ item.get_translated_name(),
item.get_category(), item.get_name() ]
data[2] = _get_subject(item.option_class, self.db)
self.book_model.add(data)
def on_add_clicked(self, obj):
"""
Add an item to the current selections.
Use the selected available item to get the item's name in the registry.
"""
store, the_iter = self.avail_model.get_selected()
if not the_iter:
return
data = self.avail_model.get_data(the_iter, list(range(self.avail_nr_cols)))
item = BookItem(self.db, data[2])
_initialize_options(item.option_class, self.dbstate, self.uistate)
data[2] = _get_subject(item.option_class, self.db)
self.book_model.add(data)
self.book.append_item(item)
def on_remove_clicked(self, obj):
"""
Remove the item from the current list of selections.
"""
store, the_iter = self.book_model.get_selected()
if not the_iter:
return
row = self.book_model.get_selected_row()
self.book.pop_item(row)
self.book_model.remove(the_iter)
def on_clear_clicked(self, obj):
"""
Clear the whole current book.
"""
self.book_model.clear()
self.book.clear()
def on_up_clicked(self, obj):
"""
Move the currently selected item one row up in the selection list.
"""
row = self.book_model.get_selected_row()
if not row or row == -1:
return
store, the_iter = self.book_model.get_selected()
data = self.book_model.get_data(the_iter, list(range(self.book_nr_cols)))
self.book_model.remove(the_iter)
self.book_model.insert(row-1, data, None, 1)
item = self.book.pop_item(row)
self.book.insert_item(row-1, item)
def on_down_clicked(self, obj):
"""
Move the currently selected item one row down in the selection list.
"""
row = self.book_model.get_selected_row()
if row + 1 >= self.book_model.count or row == -1:
return
store, the_iter = self.book_model.get_selected()
data = self.book_model.get_data(the_iter, list(range(self.book_nr_cols)))
self.book_model.remove(the_iter)
self.book_model.insert(row+1, data, None, 1)
item = self.book.pop_item(row)
self.book.insert_item(row+1, item)
def on_setup_clicked(self, obj):
"""
Configure currently selected item.
"""
store, the_iter = self.book_model.get_selected()
if not the_iter:
WarningDialog(_('No selected book item'),
_('Please select a book item to configure.')
)
return
data = self.book_model.get_data(the_iter, list(range(self.book_nr_cols)))
row = self.book_model.get_selected_row()
item = self.book.get_item(row)
option_class = item.option_class
option_class.handler.set_default_stylesheet_name(item.get_style_name())
item.is_from_saved_book = bool(self.book.get_name())
item_dialog = BookItemDialog(self.dbstate, self.uistate,
item, self.track)
while True:
response = item_dialog.window.run()
if response == Gtk.ResponseType.OK:
# dialog will be closed by connect, now continue work while
# rest of dialog is unresponsive, release when finished
style = option_class.handler.get_default_stylesheet_name()
item.set_style_name(style)
subject = _get_subject(option_class, self.db)
self.book_model.model.set_value(the_iter, 2, subject)
self.book.set_item(row, item)
item_dialog.close()
break
elif response == Gtk.ResponseType.CANCEL:
item_dialog.close()
break
elif response == Gtk.ResponseType.DELETE_EVENT:
#just stop, in ManagedWindow, delete-event is already coupled to
#correct action.
break
def book_button_press(self, obj, event):
"""
Double-click on the current book selection is the same as setup.
Right click evokes the context menu.
"""
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
self.on_setup_clicked(obj)
elif is_right_click(event):
self.build_book_context_menu(event)
def avail_button_press(self, obj, event):
"""
Double-click on the available selection is the same as add.
Right click evokes the context menu.
"""
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
self.on_add_clicked(obj)
elif is_right_click(event):
self.build_avail_context_menu(event)
def build_book_context_menu(self, event):
"""Builds the menu with item-centered and book-centered options."""
store, the_iter = self.book_model.get_selected()
if the_iter:
sensitivity = 1
else:
sensitivity = 0
entries = [
(Gtk.STOCK_GO_UP, self.on_up_clicked, sensitivity),
(Gtk.STOCK_GO_DOWN, self.on_down_clicked, sensitivity),
(_("Setup"), self.on_setup_clicked, sensitivity),
(Gtk.STOCK_REMOVE, self.on_remove_clicked, sensitivity),
(None,None,0),
(Gtk.STOCK_CLEAR, self.on_clear_clicked, 1),
(Gtk.STOCK_SAVE, self.on_save_clicked, 1),
(Gtk.STOCK_OPEN, self.on_open_clicked, 1),
(_("Edit"), self.on_edit_clicked, 1),
]
menu = Gtk.Menu()
menu.set_title(_('Book Menu'))
for stock_id, callback, sensitivity in entries:
item = Gtk.ImageMenuItem(stock_id)
if callback:
item.connect("activate", callback)
item.set_sensitive(sensitivity)
item.show()
menu.append(item)
menu.popup(None, None, None, None, event.button, event.time)
def build_avail_context_menu(self, event):
"""Builds the menu with the single Add option."""
store, the_iter = self.avail_model.get_selected()
if the_iter:
sensitivity = 1
else:
sensitivity = 0
entries = [
(Gtk.STOCK_ADD, self.on_add_clicked, sensitivity),
]
menu = Gtk.Menu()
menu.set_title(_('Available Items Menu'))
for stock_id, callback, sensitivity in entries:
item = Gtk.ImageMenuItem(stock_id)
if callback:
item.connect("activate", callback)
item.set_sensitive(sensitivity)
item.show()
menu.append(item)
menu.popup(None, None, None, None, event.button, event.time)
def on_book_ok_clicked(self, obj):
"""
Run final BookDialog with the current book.
"""
if self.book.item_list:
BookDialog(self.dbstate, self.uistate,
self.book, BookOptions)
else:
WarningDialog(_('No items'), _('This book has no items.'))
return
self.close()
def on_save_clicked(self, obj):
"""
Save the current book in the xml booklist file.
"""
self.book_list = BookList(self.file, self.db)
name = cuni(self.name_entry.get_text())
if not name:
WarningDialog(_('No book name'), _(
'You are about to save away a book with no name.\n\n'
'Please give it a name before saving it away.')
)
return
if name in self.book_list.get_book_names():
from ...dialog import QuestionDialog2
q = QuestionDialog2(
_('Book name already exists'),
_('You are about to save away a '
'book with a name which already exists.'
),
_('Proceed'),
_('Cancel'))
if q.run():
self.book.set_name(name)
else:
return
else:
self.book.set_name(name)
self.book.set_dbname(self.db.get_save_path())
self.book_list.set_book(name, self.book)
self.book_list.save()
def on_open_clicked(self, obj):
"""
Run the BookListDisplay dialog to present the choice of books to open.
"""
self.book_list = BookList(self.file, self.db)
booklistdisplay = BookListDisplay(self.book_list, 1, 0)
booklistdisplay.top.destroy()
book = booklistdisplay.selection
if book:
self.open_book(book)
self.name_entry.set_text(book.get_name())
self.book.set_name(book.get_name())
def on_edit_clicked(self, obj):
"""
Run the BookListDisplay dialog to present the choice of books to delete.
"""
self.book_list = BookList(self.file, self.db)
booklistdisplay = BookListDisplay(self.book_list, 0, 1)
booklistdisplay.top.destroy()
book = booklistdisplay.selection
if book:
self.open_book(book)
self.name_entry.set_text(book.get_name())
self.book.set_name(book.get_name())
#------------------------------------------------------------------------
#
# Book Item Options dialog
#
#------------------------------------------------------------------------
class BookItemDialog(ReportDialog):
"""
This class overrides the interface methods common for different reports
in a way specific for this report. This is a book item dialog.
"""
def __init__(self, dbstate, uistate, item, track=[]):
option_class = item.option_class
name = item.get_name()
translated_name = item.get_translated_name()
self.category = CATEGORY_BOOK
self.database = dbstate.db
self.option_class = option_class
self.is_from_saved_book = item.is_from_saved_book
ReportDialog.__init__(self, dbstate, uistate,
option_class, name, translated_name, track)
def on_ok_clicked(self, obj):
"""The user is satisfied with the dialog choices. Parse all options
and close the window."""
# Preparation
self.parse_style_frame()
self.parse_user_options()
self.options.handler.save_options()
def setup_target_frame(self):
"""Target frame is not used."""
pass
def parse_target_frame(self):
"""Target frame is not used."""
return 1
def init_options(self, option_class):
try:
if issubclass(option_class, object):
self.options = option_class(self.raw_name, self.db)
except TypeError:
self.options = option_class
if not self.is_from_saved_book:
self.options.load_previous_values()
def add_user_options(self):
"""
Generic method to add user options to the gui.
"""
if not hasattr(self.options, "menu"):
return
menu = self.options.menu
options_dict = self.options.options_dict
for category in menu.get_categories():
for name in menu.get_option_names(category):
option = menu.get_option(category, name)
# override option default with xml-saved value:
if name in options_dict:
option.set_value(options_dict[name])
widget, label = make_gui_option(option, self.dbstate,
self.uistate, self.track,
self.is_from_saved_book)
if widget is not None:
if label:
self.add_frame_option(category,
option.get_label(),
widget)
else:
self.add_frame_option(category, "", widget)
#-------------------------------------------------------------------------
#
# _BookFormatComboBox
#
#-------------------------------------------------------------------------
class _BookFormatComboBox(Gtk.ComboBox):
def __init__(self, active):
GObject.GObject.__init__(self)
pmgr = GuiPluginManager.get_instance()
self.__bookdoc_plugins = []
for plugin in pmgr.get_docgen_plugins():
if plugin.get_text_support() and plugin.get_draw_support():
self.__bookdoc_plugins.append(plugin)
self.store = Gtk.ListStore(GObject.TYPE_STRING)
self.set_model(self.store)
cell = Gtk.CellRendererText()
self.pack_start(cell, True)
self.add_attribute(cell, 'text', 0)
index = 0
active_index = 0
for plugin in self.__bookdoc_plugins:
name = plugin.get_name()
self.store.append(row=[name])
if plugin.get_extension() == active:
active_index = index
index += 1
self.set_active(active_index)
def get_active_plugin(self):
"""
Get the plugin represented by the currently active selection.
"""
return self.__bookdoc_plugins[self.get_active()]
#------------------------------------------------------------------------
#
# The final dialog - paper, format, target, etc.
#
#------------------------------------------------------------------------
class BookDialog(DocReportDialog):
"""
A usual Report.Dialog subclass.
Create a dialog selecting target, format, and paper/HTML options.
"""
def __init__(self, dbstate, uistate, book, options):
self.format_menu = None
self.options = options
self.is_from_saved_book = False
self.page_html_added = False
self.book = book
DocReportDialog.__init__(self, dbstate, uistate, options,
'book', _("Book"))
self.options.options_dict['bookname'] = self.book.name
self.database = dbstate.db
response = self.window.run()
if response == Gtk.ResponseType.OK:
handler = oh = self.options.handler
if self.book.get_paper_name() != handler.get_paper_name():
self.book.set_paper_name(handler.get_paper_name())
if self.book.get_orientation() != handler.get_orientation():
self.book.set_orientation(handler.get_orientation())
if self.book.get_paper_metric() != handler.get_paper_metric():
self.book.set_paper_metric(handler.get_paper_metric())
if self.book.get_custom_paper_size() != oh.get_custom_paper_size():
self.book.set_custom_paper_size(oh.get_custom_paper_size())
if self.book.get_margins() != handler.get_margins():
self.book.set_margins(handler.get_margins())
if self.book.get_format_name() != handler.get_format_name():
self.book.set_format_name(handler.get_format_name())
if self.book.get_output() != self.options.get_output():
self.book.set_output(self.options.get_output())
try:
self.make_book()
except (IOError, OSError) as msg:
ErrorDialog(str(msg))
self.close()
def setup_style_frame(self): pass
def setup_other_frames(self): pass
def parse_style_frame(self): pass
def get_title(self):
return _("Book")
def get_header(self, name):
return _("Gramps Book")
def make_doc_menu(self, active=None):
"""Build a menu of document types that are appropriate for
this text report. This menu will be generated based upon
whether the document requires table support, etc."""
self.format_menu = _BookFormatComboBox( active )
def make_document(self):
"""Create a document of the type requested by the user."""
user = User()
self.rptlist = []
selected_style = StyleSheet()
pstyle = self.paper_frame.get_paper_style()
self.doc = self.format(None, pstyle)
for item in self.book.get_item_list():
item.option_class.set_document(self.doc)
report_class = item.get_write_item()
obj = write_book_item(self.database, report_class,
item.option_class, user)
self.rptlist.append(obj)
append_styles(selected_style, item)
self.doc.set_style_sheet(selected_style)
self.doc.open(self.target_path)
def make_book(self):
"""The actual book. Start it out, then go through the item list
and call each item's write_book_item method."""
self.doc.init()
newpage = 0
for rpt in self.rptlist:
if newpage:
self.doc.page_break()
newpage = 1
if rpt:
rpt.begin_report()
rpt.write_report()
self.doc.close()
if self.open_with_app.get_active():
open_file_with_default_application(self.target_path)
def init_options(self, option_class):
try:
if (issubclass(option_class, object) or # New-style class
isinstance(option_class, ClassType)): # Old-style class
self.options = option_class(self.raw_name, self.db)
except TypeError:
self.options = option_class
if not self.is_from_saved_book:
self.options.load_previous_values()
handler = self.options.handler
if self.book.get_paper_name():
handler.set_paper_name(self.book.get_paper_name())
if self.book.get_orientation() is not None: # 0 is legal
handler.set_orientation(self.book.get_orientation())
if self.book.get_paper_metric() is not None: # 0 is legal
handler.set_paper_metric(self.book.get_paper_metric())
if self.book.get_custom_paper_size():
handler.set_custom_paper_size(self.book.get_custom_paper_size())
if self.book.get_margins():
handler.set_margins(self.book.get_margins())
if self.book.get_format_name():
handler.set_format_name(self.book.get_format_name())
if self.book.get_output():
self.options.set_output(self.book.get_output())
#------------------------------------------------------------------------
#
# Generic task function for book
#
#------------------------------------------------------------------------
def write_book_item(database, report_class, options, user):
"""Write the report using options set.
All user dialog has already been handled and the output file opened."""
try:
return report_class(database, options, user)
except ReportError as msg:
(m1, m2) = msg.messages()
ErrorDialog(m1, m2)
except FilterError as msg:
(m1, m2) = msg.messages()
ErrorDialog(m1, m2)
except:
log.error("Failed to write book item.", exc_info=True)
return None
| pmghalvorsen/gramps_branch | gramps/gui/plug/report/_bookdialog.py | Python | gpl-2.0 | 39,233 | [
"Brian"
] | 0efce33f656cd49afc7d720e983932f13db3280d3a54d7dc3e83592138faa58a |
from django.utils.translation import ugettext as _
from django.conf import settings
import os, syslog
# ------------------------------------------- #
# CONFIGURATION
# ------------------------------------------- #
# Global
conf = {
'appdir': os.path.dirname(os.path.realpath(__file__)),
'taskdir': os.path.dirname(os.path.realpath(__file__))+'/tasks',
'python': '/bin/python3.6',
'binary': '/bin/bash',
'backstart': '/bin/nohup',
'backend': '&',
'checkext': '.sh',
'syslog': False,
'sysloglvl': 5,
'killscript': 3600,
'host': 'localhost',
'ip': '127.0.0.1',
'export': 30,
'salt': 'y-;1n430^484ylwf$9@`4I1NZ.4xHK',
'store': 'folvis',
'first': 'firvis',
'charset': 'utf-8',
'maxage': 86400,
'ndatas': 50,
'port': 27080,
'connected': True,
'pathConnected': None,
}
# Content Type
conf['contenttype_csv'] = 'text/csv; charset=%s' % conf['charset']
conf['contenttype_txt'] = 'text/plain; charset=%s' % conf['charset']
conf['contenttype_svg'] = 'image/svg+xml; charset=%s' % conf['charset']
conf['contenttype_js'] = 'application/javascript; charset=%s' % conf['charset']
# Tasks type
conf['tasks'] = (
('TRK_check_os', _('check(OS)')),
('TRK_sort_recurring', _('sort(Recurring)')),
('TRK_report_hourly', _('report(Hourly)')),
('TRK_report_daily', _('report(Daily)')),
('TRK_report_monthly', _('report(Monthly)')),
('TRK_report_annually', _('report(Annually)')),
('TRK_purge_visit', _('purge(Visit)')),
('TRK_purge_report', _('purge(Report)')),
('TRK_purge_task', _('purge(Task)')),
)
conf['subtasks'] = {
'TRK_sort_recurring': ['addVisitors', 'addAllInfos', 'delTrackedSort', 'listConnected'],
}
# Deltas tasks
conf['deltas'] = {
'TRK_sort_recurring': 300,
'TRK_report_hourly': 3600,
'TRK_report_daily': 86400,
'TRK_report_monthly': 'Monthly',
'TRK_report_annually': 'Annually',
'TRK_purge_visit': 300,
'TRK_purge_report': 3600,
'TRK_purge_task': 86400,
}
# Status
conf['status'] = (
(0, _('In error')),
(1, _('Ordered')),
(2, _('Started')),
(3, _('Running')),
(4, _('Complete')),
)
# Default datas
conf['datas'] = [
'visitor',
'url',
'title',
'route',
'connectEnd',
'connectStart',
'secureConnectionStart',
'domComplete',
'domContentLoadedEventEnd',
'domContentLoadedEventStart',
'domInteractive',
'domLoading',
'domainLookupEnd',
'domainLookupStart',
'fetchStart',
'loadEventEnd',
'loadEventStart',
'navigationStart',
'redirectCount',
'redirectEnd',
'redirectStart',
'requestStart',
'responseEnd',
'responseStart',
'timing',
'navigation',
'performance',
'type',
'unloadEventEnd',
'unloadEventStart',
'colorDepth',
'pixelDepth',
'height',
'width',
'availHeight',
'availWidth',
'innerWidth',
'innerHeight',
'outerWidth',
'outerHeight',
'ResizeInnerWidth',
'ResizeInnerHeight',
'ResizeOuterWidth',
'ResizeOuterHeight',
]
conf['events'] = [
'visitor',
'url',
'title',
'stay',
'click',
'scrolldown',
'scrollup',
]
conf['example'] = """<!--
[--URL_STATIC--] -> Static URL hosting the javascript file
[--URL_TRACKER--] -> URL of the tracker | Leave empty if the host is the same
-->
<!-- Own host JS | better -->
<script src="[--URL_STATIC--]/visit.js"></script>
<!-- Direct host JS -->
<script src="[--URL_TRACKER--]{0}"></script>
<!-- Use example -->
<script>
(function() {
visit = new visit('[--URL_TRACKER--]');
visit.add('height', window.screen.height);
visit.add('width', window.screen.width);
visit.visit();
})();
</script>
<noscript><img width=0 height=0 src="[--URL_TRACKER--]{1}"></noscript>
"""
for k,v in conf.items():
try:
conf[k] = MANAGER[k]
except Exception:
pass
# ------------------------------------------- #
# LOGMETHIS
# ------------------------------------------- #
# Function de log system
def logmethis(lvl, msg):
if conf['syslog'] is True and conf['sysloglvl'] >= lvl:
syslog.openlog(logoption=syslog.LOG_PID)
syslog.syslog(lvl, msg)
syslog.closelog() | hicinformatic/DJANGO-Tracker | settings.py | Python | gpl-3.0 | 4,355 | [
"VisIt"
] | a82164c8b7c2fe40bb9b628e1101a7fab1415afe8d6f4b6380215c5be5614cc4 |
"""
=======================================================
Diffeomorphic Registration with binary and fuzzy images
=======================================================
This example demonstrates registration of a binary and a fuzzy image.
This could be seen as aligning a fuzzy (sensed) image to a binary
(e.g., template) image.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import draw, filters
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
from dipy.align.metrics import SSDMetric
from dipy.viz import regtools
"""
Let's generate a sample template image as the combination of three ellipses.
We will generate the fuzzy (sensed) version of the image by smoothing
the reference image.
"""
def draw_ellipse(img, center, axis):
rr, cc = draw.ellipse(center[0], center[1], axis[0], axis[1],
shape=img.shape)
img[rr, cc] = 1
return img
img_ref = np.zeros((64, 64))
img_ref = draw_ellipse(img_ref, (25, 15), (10, 5))
img_ref = draw_ellipse(img_ref, (20, 45), (15, 10))
img_ref = draw_ellipse(img_ref, (50, 40), (7, 15))
img_in = filters.gaussian(img_ref, sigma=3)
"""
Let's define a small visualization function.
"""
def show_images(img_ref, img_warp, fig_name):
fig, axarr = plt.subplots(ncols=2, figsize=(12, 5))
axarr[0].set_title('warped image & reference contour')
axarr[0].imshow(img_warp)
axarr[0].contour(img_ref, colors='r')
ssd = np.sum((img_warp - img_ref) ** 2)
axarr[1].set_title('difference, SSD=%.02f' % ssd)
im = axarr[1].imshow(img_warp - img_ref)
plt.colorbar(im)
fig.tight_layout()
fig.savefig(fig_name + '.png')
show_images(img_ref, img_in, 'input')
"""
.. figure:: input.png
:align: center
Input images before alignment.
"""
"""
Let's use the general Registration function with some naive parameters,
such as set `step_length` as 1 assuming maximal step 1 pixel and a reasonably
small number of iterations since the deformation with already aligned images
should be minimal.
"""
sdr = SymmetricDiffeomorphicRegistration(metric=SSDMetric(img_ref.ndim),
step_length=1.0,
level_iters=[50, 100],
inv_iter=50,
ss_sigma_factor=0.1,
opt_tol=1.e-3)
"""
Perform the registration with equal images.
"""
mapping = sdr.optimize(img_ref.astype(float), img_ref.astype(float))
img_warp = mapping.transform(img_ref, 'linear')
show_images(img_ref, img_warp, 'output-0')
regtools.plot_2d_diffeomorphic_map(mapping, 5, 'map-0.png')
"""
.. figure:: output-0.png
:align: center
.. figure:: map-0.png
:align: center
Registration results for default parameters and equal images.
"""
"""
Perform the registration with binary and fuzzy images.
"""
mapping = sdr.optimize(img_ref.astype(float), img_in.astype(float))
img_warp = mapping.transform(img_in, 'linear')
show_images(img_ref, img_warp, 'output-1')
regtools.plot_2d_diffeomorphic_map(mapping, 5, 'map-1.png')
"""
.. figure:: output-1.png
:align: center
.. figure:: map-1.png
:align: center
Registration results for a naive parameter configuration.
"""
"""
Note, we are still using a multi-scale approach which makes `step_length`
in the upper level multiplicatively larger.
What happens if we set `step_length` to a rather small value?
"""
sdr.step_length = 0.1
"""
Perform the registration and examine the output.
"""
mapping = sdr.optimize(img_ref.astype(float), img_in.astype(float))
img_warp = mapping.transform(img_in, 'linear')
show_images(img_ref, img_warp, 'output-2')
regtools.plot_2d_diffeomorphic_map(mapping, 5, 'map-2.png')
"""
.. figure:: output-2.png
:align: center
.. figure:: map-2.png
:align: center
Registration results for decreased step size.
"""
"""
An alternative scenario is to use just a single-scale level.
Even though the warped image may look fine, the estimated deformations show
that it is off the mark.
"""
sdr = SymmetricDiffeomorphicRegistration(metric=SSDMetric(img_ref.ndim),
step_length=1.0,
level_iters=[100],
inv_iter=50,
ss_sigma_factor=0.1,
opt_tol=1.e-3)
"""
Perform the registration.
"""
mapping = sdr.optimize(img_ref.astype(float), img_in.astype(float))
img_warp = mapping.transform(img_in, 'linear')
show_images(img_ref, img_warp, 'output-3')
regtools.plot_2d_diffeomorphic_map(mapping, 5, 'map-3.png')
"""
.. figure:: output-3.png
:align: center
.. figure:: map-3.png
:align: center
Registration results for single level.
"""
| FrancoisRheaultUS/dipy | doc/examples/register_binary_fuzzy.py | Python | bsd-3-clause | 4,831 | [
"Gaussian"
] | e4b87cd9e9983a93f167619c8e0ffab0e23294d81ce9d6865ad713837b3eb1c3 |
"""
=================================
Map data to a normal distribution
=================================
.. currentmodule:: sklearn.preprocessing
This example demonstrates the use of the Box-Cox and Yeo-Johnson transforms
through :class:`~PowerTransformer` to map data from various
distributions to a normal distribution.
The power transform is useful as a transformation in modeling problems where
homoscedasticity and normality are desired. Below are examples of Box-Cox and
Yeo-Johnwon applied to six different probability distributions: Lognormal,
Chi-squared, Weibull, Gaussian, Uniform, and Bimodal.
Note that the transformations successfully map the data to a normal
distribution when applied to certain datasets, but are ineffective with others.
This highlights the importance of visualizing the data before and after
transformation.
Also note that even though Box-Cox seems to perform better than Yeo-Johnson for
lognormal and chi-squared distributions, keep in mind that Box-Cox does not
support inputs with negative values.
For comparison, we also add the output from
:class:`~QuantileTransformer`. It can force any arbitrary
distribution into a gaussian, provided that there are enough training samples
(thousands). Because it is a non-parametric method, it is harder to interpret
than the parametric ones (Box-Cox and Yeo-Johnson).
On "small" datasets (less than a few hundred points), the quantile transformer
is prone to overfitting. The use of the power transform is then recommended.
"""
# Author: Eric Chang <ericchang2017@u.northwestern.edu>
# Nicolas Hug <contact@nicolas-hug.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import QuantileTransformer
from sklearn.model_selection import train_test_split
print(__doc__)
N_SAMPLES = 1000
FONT_SIZE = 6
BINS = 30
rng = np.random.RandomState(304)
bc = PowerTransformer(method='box-cox')
yj = PowerTransformer(method='yeo-johnson')
# n_quantiles is set to the training set size rather than the default value
# to avoid a warning being raised by this example
qt = QuantileTransformer(n_quantiles=500, output_distribution='normal',
random_state=rng)
size = (N_SAMPLES, 1)
# lognormal distribution
X_lognormal = rng.lognormal(size=size)
# chi-squared distribution
df = 3
X_chisq = rng.chisquare(df=df, size=size)
# weibull distribution
a = 50
X_weibull = rng.weibull(a=a, size=size)
# gaussian distribution
loc = 100
X_gaussian = rng.normal(loc=loc, size=size)
# uniform distribution
X_uniform = rng.uniform(low=0, high=1, size=size)
# bimodal distribution
loc_a, loc_b = 100, 105
X_a, X_b = rng.normal(loc=loc_a, size=size), rng.normal(loc=loc_b, size=size)
X_bimodal = np.concatenate([X_a, X_b], axis=0)
# create plots
distributions = [
('Lognormal', X_lognormal),
('Chi-squared', X_chisq),
('Weibull', X_weibull),
('Gaussian', X_gaussian),
('Uniform', X_uniform),
('Bimodal', X_bimodal)
]
colors = ['#D81B60', '#0188FF', '#FFC107',
'#B7A2FF', '#000000', '#2EC5AC']
fig, axes = plt.subplots(nrows=8, ncols=3, figsize=plt.figaspect(2))
axes = axes.flatten()
axes_idxs = [(0, 3, 6, 9), (1, 4, 7, 10), (2, 5, 8, 11), (12, 15, 18, 21),
(13, 16, 19, 22), (14, 17, 20, 23)]
axes_list = [(axes[i], axes[j], axes[k], axes[l])
for (i, j, k, l) in axes_idxs]
for distribution, color, axes in zip(distributions, colors, axes_list):
name, X = distribution
X_train, X_test = train_test_split(X, test_size=.5)
# perform power transforms and quantile transform
X_trans_bc = bc.fit(X_train).transform(X_test)
lmbda_bc = round(bc.lambdas_[0], 2)
X_trans_yj = yj.fit(X_train).transform(X_test)
lmbda_yj = round(yj.lambdas_[0], 2)
X_trans_qt = qt.fit(X_train).transform(X_test)
ax_original, ax_bc, ax_yj, ax_qt = axes
ax_original.hist(X_train, color=color, bins=BINS)
ax_original.set_title(name, fontsize=FONT_SIZE)
ax_original.tick_params(axis='both', which='major', labelsize=FONT_SIZE)
for ax, X_trans, meth_name, lmbda in zip(
(ax_bc, ax_yj, ax_qt),
(X_trans_bc, X_trans_yj, X_trans_qt),
('Box-Cox', 'Yeo-Johnson', 'Quantile transform'),
(lmbda_bc, lmbda_yj, None)):
ax.hist(X_trans, color=color, bins=BINS)
title = 'After {}'.format(meth_name)
if lmbda is not None:
title += '\n$\\lambda$ = {}'.format(lmbda)
ax.set_title(title, fontsize=FONT_SIZE)
ax.tick_params(axis='both', which='major', labelsize=FONT_SIZE)
ax.set_xlim([-3.5, 3.5])
plt.tight_layout()
plt.show()
| glemaitre/scikit-learn | examples/preprocessing/plot_map_data_to_normal.py | Python | bsd-3-clause | 4,716 | [
"Gaussian"
] | b981315f216514de09b08e2131d44a572facaf82d104d98596a2bfe6226b9ebd |
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import numpy as np
import warnings
from ..base import BaseEstimator
from ..utils import check_random_state, deprecated
from ..utils.extmath import logsumexp, pinvh
from .. import cluster
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
from scipy import linalg
U, s, V = linalg.svd(covar)
sqrtS = np.diag(np.sqrt(s))
sqrt_covar = np.dot(U, np.dot(sqrtS, V))
rand = np.dot(sqrt_covar, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
`weights_` : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`covars_` : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Ininite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def eval(self, X):
"""Evaluate the model on data
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob: array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('the shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(
X, self.means_, self.covars_, self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
@deprecated("""will be removed in v0.13;
use the score or predict method instead, depending on the question""")
def decode(self, X):
"""Find most likely mixture components for each point in X.
DEPRECATED IN VERSION 0.11; WILL BE REMOVED IN VERSION 0.13.
use the score or predict method instead, depending on the question.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprobs : array_like, shape (n_samples,)
Log probability of each point in `obs` under the model.
components : array_like, shape (n_samples,)
Index of the most likelihod mixture components for each observation
"""
logprob, posteriors = self.eval(X)
return logprob, posteriors.argmax(axis=1)
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.eval(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.eval(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.eval(X)
return responsibilities
@deprecated("""will be removed in v0.13;
use the score or predict method instead, depending on the question""")
def rvs(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
DEPRECATED IN VERSION 0.11; WILL BE REMOVED IN VERSION 0.12
use sample instead
"""
return self.sample(n_samples, random_state)
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in xrange(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X, **kwargs):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
## initialization step
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
if kwargs:
warnings.warn("Setting parameters in the 'fit' method is"
"deprecated and will be removed in 0.13. Set it on "
"initialization instead.", DeprecationWarning,
stacklevel=2)
# initialisations for in case the user still adds parameters to fit
# so things don't break
if 'n_iter' in kwargs:
self.n_iter = kwargs['n_iter']
if 'n_init' in kwargs:
if kwargs['n_init'] < 1:
raise ValueError('GMM estimation requires n_init > 0.')
else:
self.n_init = kwargs['n_init']
if 'params' in kwargs:
self.params = kwargs['params']
if 'init_params' in kwargs:
self.init_params = kwargs['init_params']
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_ = False
for i in xrange(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.eval(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if log_likelihood[-1] > max_log_prob:
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (- 2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
from scipy import linalg
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
from scipy import linalg
import itertools
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(itertools.izip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape"
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in xrange(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
n_features = X.shape[1]
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0]
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/mixture/gmm.py | Python | agpl-3.0 | 29,124 | [
"Gaussian"
] | d4964b9854390f323e01dd377d378d0b1640f3b805bd85cfb7d0eec8f3f509cb |
#####
# Author : David Stewart
# Date : April 14, 2016
# Problem : https://projecteuler.net/problem=29
# Brief : How many unique numbers are there for, where 2 <= x,y <= 100, x^y?
# Comments : This one was a fun one. I thought of a couple different ways to approach it.
# First approach used a dictionary, simply adding a new key for each new number.
# After iterating through each number, I simply counted up the unique keys and got my answer.
# Average time for the dictionary: ~.042 seconds.
#
# Next approach was to use a list which I append each number to, then calling len(set(list))
# Calling set() first removed any duplicates which allowed len to count the unique elements.
# Average time for the list : ~.042 seconds
#
# Last approach used a set() instead of a list, which natively removes duplicates.
# Average time for set : ~.042 seconds
#
# I found no discernable difference between the three approaches. Only way I could potentially see speeding this up is through multiple threads, which I will visit at a later date.
#####
import math, time
#dictionary approach
def dict_trial():
dict = {}
for x in range(2,101):
for y in range(2,101):
dict[math.pow(x,y)] = '.'
unique = len(dict)
#list approach
def list_trial():
list = []
for x in range(2,101):
for y in range(2,101):
#num = math.pow(x,y)
list.append(math.pow(x,y))
unique = len(set(list))
#set approach
def set_trial():
list2 = set()
for x in range(2,101):
for y in range(2,101):
list2.add(math.pow(x,y))
unique = len(list2)
start = time.clock()
dict_trial()
print time.clock() - start
time.sleep(.5)
start = time.clock()
list_trial()
print time.clock() - start
time.sleep(.5)
start = time.clock()
set_trial()
print time.clock() - start
| DavidOStewart/ProjectEuler | 29.py | Python | mit | 1,986 | [
"VisIt"
] | 4255afa82a350b897c0eb1e483ec2e9e2ab5c789f79c8f0d9b0a344f81a27dfc |
"""
@author: Yuhuang Hu
@contact: duguyue100@gmail.com
@note: setup experiment of supervised classification task.
"""
import sys;
sys.path.append("..");
import time;
import numpy as np;
from sacred import Experiment;
import conceptors.util as util;
import conceptors.net as net;
from conceptors.dataset import load_arlab_feature;
exp=Experiment("Classification Task");
@exp.config
def classify_exp_config():
filename_train="";
filename_test="";
save_path="";
range_start=5;
range_end=500;
range_step=5;
ap_N=10;
num_inter_samples=900;
@exp.automain
def classify_experiment(filename_train,
filename_test,
save_path,
range_start,
range_end,
range_step,
ap_N,
num_inter_samples):
"""
Supervised Classification Task
@param filename_train: train data file
@param filename_test: test data file
@param save_path: result save path
@param range_start: number, start number of neuron range
@param range_end: number, end number of neuron range
@param range_step: number, neuron step
"""
# Load data
train_data, test_data=load_arlab_feature(filename_train, filename_test);
# Global paramete settings
num_classes=int(np.max(train_data[:,0])+1);
num_train=train_data.shape[0];
num_test=test_data.shape[0];
neuron_range=np.arange(range_start, range_end+range_step, range_step);
train_input, train_label, test_input, test_label=util.parse_arlab_feature(train_data, test_data);
print test_label[0:100];
train_input, test_input=util.normalize_arlab_feature(train_input, test_input);
train_input=train_input.T;
test_input=test_input.T;
_, tr_start_idx=np.unique(train_label, return_index=True);
#_, te_start_idx=np.unique(test_label, return_index=True);
print "[MESSAGE] Data is prepared.";
for trail in xrange(len(neuron_range)):
start_time=time.clock();
## parameter settings
save_file=open(save_path, "a+");
num_in=train_input.shape[0];
num_neuron=neuron_range[trail];
print "[MESSAGE] Trail %d --- Number of Neuron: %d" % ((trail+1), num_neuron);
## create network
network=net.ConceptorNetwork(num_in=num_in,
num_neuron=num_neuron,
sr=1.5,
in_scale=1.5,
bias_scale=0.2,
washout_length=0,
learn_length=1,
signal_plot_length=0,
tychonov_alpha_readout=0.01,
tychonov_alpha_readout_w=0.0001);
def calculate_block(block, num_classes):
if block==num_classes-1:
start_idx=tr_start_idx[block];
end_idx=train_input.shape[1];
else:
start_idx=tr_start_idx[block];
end_idx=tr_start_idx[block+1];
return start_idx, end_idx;
all_train_states=np.array([]);
for block in xrange(num_classes):
start_idx, end_idx=calculate_block(block, num_classes);
temp_train_states=network.drive_class(train_input[:, start_idx:end_idx]);
if not all_train_states.size:
all_train_states=temp_train_states;
else:
all_train_states=np.hstack((all_train_states,temp_train_states));
print "[MESSAGE] Train data driven"
R_all=all_train_states.dot(all_train_states.T);
C_poss=[]; C_negs=[]; R_poss=[]; R_others=[];
for block in xrange(num_classes):
start_idx, end_idx=calculate_block(block, num_classes);
C_pos_class, C_neg_class, R, R_other=network.compute_conceptor(all_train_states[:, start_idx:end_idx],
ap_N,
R_all,
(num_train-(end_idx-start_idx)));
C_poss.append(C_pos_class);
C_negs.append(C_neg_class);
R_poss.append(R);
R_others.append(R_other);
print "[MESSAGE] Conceptors Computed"
best_aps_poss=np.zeros(num_classes);
best_aps_negs=np.zeros(num_classes);
for i in xrange(num_classes):
best_aps_poss[i], best_aps_negs[i]=network.compute_aperture(C_poss[i],
C_negs[i],
ap_N,
num_inter_samples);
best_ap_pos=np.mean(best_aps_poss);
best_ap_neg=np.mean(best_aps_negs);
print "[MESSAGE] Best Positive Aperture: %.2f, Best Negative Aperture: %.2f" % (best_ap_pos, best_ap_neg);
C_pos_best=[]; C_neg_best=[];
for block in xrange(num_classes):
start_idx, end_idx=calculate_block(block, num_classes);
c_pos_best, c_neg_best=network.compute_best_conceptor(R_poss[block],
R_others[block],
best_ap_pos,
best_ap_neg,
end_idx-start_idx,
num_train-(end_idx-start_idx));
C_pos_best.append(c_pos_best);
C_neg_best.append(c_neg_best);
print "[MESSAGE] Best conceptors computed"
x_test=network.drive_class(test_input);
xTx=x_test.T.dot(x_test).diagonal();
pos_ev=np.zeros((num_classes, num_test));
neg_ev=np.zeros((num_classes, num_test));
comb_ev=np.zeros((num_classes, num_test));
for i in xrange(num_classes):
for j in xrange(num_test):
pos_ev[i,j]=x_test[:,j].dot(C_pos_best[i]).dot(x_test[:,j][None].T)/xTx[j];
neg_ev[i,j]=x_test[:,j].dot(C_neg_best[i]).dot(x_test[:,j][None].T)/xTx[j];
comb_ev[i,j]=pos_ev[i,j]+neg_ev[i,j];
print "[MESSAGE] %i class evidence is calculated" % (i+1);
output_label=np.argmax(comb_ev, axis=0);
pos_out_label=np.argmax(pos_ev, axis=0);
neg_out_label=np.argmax(neg_ev, axis=0);
accuracy=float(np.sum(output_label==test_label))/float(num_test);
pos_accuracy=float(np.sum(pos_out_label==test_label))/float(num_test);
neg_accuracy=float(np.sum(neg_out_label==test_label))/float(num_test);
print "[MESSAGE] Accuracy %.2f %%" % (accuracy*100);
end_time=time.clock();
print "[MESSAGE] Total for %.2fm" % ((end_time-start_time)/60);
info=np.column_stack((num_neuron, best_ap_pos, best_ap_pos, accuracy, pos_accuracy, neg_accuracy, ((end_time-start_time)/60)));
np.savetxt(save_file, info, delimiter=',',newline='\n');
save_file.close();
## remove variable
del output_label; del pos_out_label; del neg_out_label;
del pos_ev; del neg_ev; del comb_ev;
del xTx;
del network; | duguyue100/conceptors | scripts/classify_exp.py | Python | gpl-3.0 | 7,155 | [
"NEURON"
] | be35164e18730626bea2d0e7a3aa19892fad82547aa4fccd663ae8959a637c6f |
from sdssgaussfitter import gaussfit
import numpy as np
from scipy import interpolate
import os,sys
from util import utils
from util.readDict import readDict
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def aperture(startpx,startpy,radius=7):
r = radius
length = 2*r
height = length
allx = xrange(startpx-int(np.ceil(length/2.0)),startpx+int(np.floor(length/2.0))+1)
ally = xrange(startpy-int(np.ceil(height/2.0)),startpy+int(np.floor(height/2.0))+1)
pixx = []
pixy = []
mask=np.ones((46,44))
for x in allx:
for y in ally:
if (np.abs(x-startpx))**2+(np.abs(y-startpy))**2 <= (r)**2 and 0 <= y and y < 46 and 0 <= x and x < 44:
mask[y,x]=0.
return mask
#def gaussian(height, center_x, center_y, width_x, width_y,offset):
# """Returns a gaussian function with the given parameters"""
# width_x = float(width_x)
# width_y = float(width_y)
# return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)+offset
#testy = np.array([[gaussian(2,10,10,3,3,5)(x,y) for y in range(46)] for x in range(44)])
#utils.plotArray(testy,cbar=True)
param = readDict()
#param.read_from_file('G158-100params.dict')
#param.read_from_file('pg0220params.dict')
#param.read_from_file('landolt9542params.dict')
#param.read_from_file('corot18params.dict')
if len(sys.argv)<2:
print "Provide file name to fit. Syntax >>python fitPsf.py objectparams.dict [filenumber]"
sys.exit(1)
#read in parameter file as command line argument
param.read_from_file(sys.argv[1])
#provide optional file number if the object in the param file has alternate .npz files to be specified individually
fileNum = None
if len(sys.argv)>2:
fileNum = "_"+str(sys.argv[2])
npzLoadFile = param['npzLoadFile']
npzfitpsf = param['npzfitpsf']
giffitpsf = param['giffitpsf']
if fileNum != None:
npzLoadFile = npzLoadFile.split('.')[0]+fileNum+'.'+npzLoadFile.split('.')[1]
npzfitpsf = npzfitpsf.split('.')[0]+fileNum+'.'+npzfitpsf.split('.')[1]
giffitpsf = giffitpsf.split('.')[0]+fileNum+'.'+giffitpsf.split('.')[1]
FramesPerFile = param['FramesPerFile']
#NumFiles = param['NumFiles']
#for filenum in range(len(NumFiles)):
# if NumFiles[filenum] > 0:
# NumFiles[filenum] = NumFiles[filenum]*FramesPerFile
#NumFrames = NumFiles
NumFrames = 31
print NumFrames
guessX = param['guessX'][0]
guessY = param['guessY'][0]
stackDict = np.load(npzLoadFile)
stack = stackDict['stack']
wvls = stackDict['wvls']
print len(wvls)
paramsList = []
errorsList = []
fitImgList = []
chisqList = []
plt.ion()
for iFrame in range(0,np.shape(stack)[0]):
frame = stack[iFrame,:,:]
#for interval in xrange(len(NumFrames)-1):
# if NumFrames[interval] != NumFrames[interval+1]:
# if NumFrames[interval] < iFrame <= NumFrames[interval+1]:
# guessX = guessX[interval]
# guessY = guessY[interval]
# print guessX, guessY
#TRY SPLINE INTERPOLATION TO FILL IN BLANK SPACES AND HELP FIT
grid = np.zeros((np.shape(frame)[0],np.shape(frame)[1],2),dtype = int)
for i in xrange(np.shape(frame)[0]):
for j in xrange(np.shape(frame)[1]):
grid[i,j]= i,j
#print interpFrame
reshapeFrame =np.reshape(frame,(46*44))
reshapeGrid = np.reshape(grid,(46*44,2))
interpFrame = interpolate.griddata(reshapeGrid[reshapeFrame!=0],reshapeFrame[reshapeFrame!=0],reshapeGrid)
print np.reshape(grid,(46*44,2))
interpFrame = np.reshape(interpFrame,(np.shape(frame)[0],np.shape(frame)[1]))
print frame
origFrame = frame
print interpFrame
frame = interpFrame
nanMask = np.isnan(frame)
apertureMask = aperture(guessX,guessY,radius=7)
#err = np.sqrt(frame) #divide by 2 to constrain PSF fit even tighter to avoid fitting to wrong peak if PSF is divided by dead pixels
err = np.ones(np.shape(frame))
err[frame<10] = 100
frame[nanMask]=0#set to finite value that will be ignored
#err[nanMask] = 1E6 #ignore these data points
err[frame==0] = 1E6
#err[apertureMask==1] = 1 #np.sqrt(frame[apertureMask==1]) #weight points closer to the expected psf higher
nearDeadCutoff=1#100/15 cps for 4000-6000 angstroms
err[frame<nearDeadCutoff] = 1E6
entireMask = (err==1E6)
maFrame = np.ma.masked_array(frame,entireMask)
guessAmp = 120.
guessHeight = 3.
guessWidth=1.6
guessParams = [guessHeight,guessAmp,guessX,guessY,guessWidth]
limitedmin = 5*[True]
limitedmax = 5*[True]
#minpars = [0,0,0,0,.1] #default min pars, usually work fine
minpars = [0,116,25,25,1] #tighter constraint on PSF width to avoid fitting wrong peak if PSF is divided by dead pixels
maxpars = [3,200,40,40,10]
#usemoments=[True,True,True,True,True] #doesn't use our guess values, default
usemoments=[False,False,False,False,False]
out = gaussfit(data=maFrame,err=err,params=guessParams,returnfitimage=True,quiet=True,limitedmin=limitedmin,limitedmax=limitedmax,minpars=minpars,maxpars=maxpars,circle=1,usemoments=usemoments,returnmp=True)
mp = out[0]
outparams = mp.params
paramErrors = mp.perror
chisq = mp.fnorm
dof = mp.dof
reducedChisq = chisq/dof
print "reducedChisq =", reducedChisq
fitimg = out[1]
chisqList.append([chisq,dof])
paramsList.append(outparams)
errorsList.append(paramErrors)
print "outparams = ", outparams
print "paramErrors = ", paramErrors
# expectedResiduals = np.ma.masked_array(np.sqrt(frame),mask=entireMask)
# residuals = np.ma.masked_array(np.abs(frame-fitimg),mask=entireMask)
# utils.plotArray(expectedResiduals,cbar=True)
# utils.plotArray(residuals,cbar=True)
# fig = plt.figure()
# ax = fig.add_subplot(111,projection='3d')
# x = np.arange(0,44)
# y = np.arange(0,46)
# X,Y = np.meshgrid(x,y)
# linearMask = np.ravel(entireMask==0)
# ax.plot_wireframe(X,Y,fitimg)
# ax.scatter(outparams[2],outparams[3],outparams[0]+outparams[1],c='black')
# ax.scatter(np.ravel(X)[linearMask],np.ravel(Y)[linearMask],np.ravel(frame)[linearMask],c='red')
#
fitimg[nanMask]=0
# print fitimg[np.isnan(fitimg)]
fitImgList.append(fitimg)
# utils.plotArray(frame,cbar=True)
# utils.plotArray(maFrame,cbar=True)
# utils.plotArray(fitimg,cbar=True)
# plt.show()
# utils.confirm('Enter to continue.')
# plt.close()
# plt.close()
# plt.close()
frame[nanMask]=np.nan
# fig = plt.figure()
# ax1=fig.add_subplot(211)
# ax2 = fig.add_subplot(212)
# for iRow in range(len(frame)):
# ax1.scatter(range(44),frame[iRow,:],c='red',marker='o',alpha=.5,label='data')
# ax1.scatter(range(44),fitimg[iRow,:],c='blue',marker='^',alpha=.5,label='fit')
# ax1.set_title('Fit seen along Cols')
# for iCol in range(np.shape(frame)[1]):
# ax2.scatter(range(46),frame[:,iCol],c='red',marker='o',alpha=.5,label='data')
# ax2.scatter(range(46),fitimg[:,iCol],c='blue',marker='^',alpha=.5,label='fit')
# ax2.set_title('Fit seen along Rows')
# plt.show()
plt.close()
print 'closed'
cube = np.array(fitImgList)
chisqs = np.array(chisqList)
params = np.array(paramsList)
errors = np.array(errorsList)
np.savez(npzfitpsf,fitImg=cube,params=params,errors=errors,chisqs=chisqs,wvls=wvls)
print 'saved'
#utils.makeMovie(fitImgList,frameTitles=wvls, cbar=True, outName=giffitpsf, normMin=0, normMax=50)
stop = 'n'
while stop != 'q':
plt.matshow(interpFrame,vmin=0,vmax = 100)
plt.matshow(origFrame,vmin=0,vmax=100)
plt.show()
stop = raw_input(" q for stop ----> ")
| bmazin/ARCONS-pipeline | examples/Pal2012_landoltPhot/landoltFitPsf.py | Python | gpl-2.0 | 7,656 | [
"Gaussian"
] | 0bca95809ad3d0862d0328d47834e025cc75f4474dbda64f708431cdd7501c9e |
# -*- coding: utf-8 -*-
#
# gap_junctions_two_neurons.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Gap Junctions: Two neuron example
--------------------------------------
This script simulates two Hodgkin-Huxley neurons of type `hh_psc_alpha_gap`
connected by a gap junction. Both neurons receive a constant current of
100.0 pA. The neurons are initialized with different membrane potentials and
synchronize over time due to the gap-junction connection.
References
~~~~~~~~~~~
See Also
~~~~~~~~~~
:Authors:
KEYWORDS:
"""
import nest
import pylab as pl
import numpy
nest.ResetKernel()
###############################################################################
# First we set the resolution of the simulation, create two neurons and
# create a `voltmeter` for recording.
nest.SetKernelStatus({'resolution': 0.05})
neuron = nest.Create('hh_psc_alpha_gap', 2)
vm = nest.Create('voltmeter', params={'to_file': False,
'withgid': True,
'withtime': True,
'interval': 0.1})
###############################################################################
# Then we set the constant current input, modify the inital membrane
# potential of one of the neurons and connect the neurons to the `voltmeter`.
nest.SetStatus(neuron, {'I_e': 100.})
nest.SetStatus([neuron[0]], {'V_m': -10.})
nest.Connect(vm, neuron, 'all_to_all')
###############################################################################
# In order to create the `gap_junction` connection we employ the
# `all_to_all` connection rule: Gap junctions are bidirectional connections,
# therefore we need to connect `neuron[0]` to `neuron[1]` and `neuron[1]` to
# `neuron[0]`:
nest.Connect(neuron, neuron,
{'rule': 'all_to_all', 'autapses': False},
{'model': 'gap_junction', 'weight': 0.5})
###############################################################################
# Finally we start the simulation and plot the membrane potentials of both
# neurons.
nest.Simulate(351.)
senders = nest.GetStatus(vm, 'events')[0]['senders']
times = nest.GetStatus(vm, 'events')[0]['times']
V = nest.GetStatus(vm, 'events')[0]['V_m']
pl.figure(1)
pl.plot(times[numpy.where(senders == 1)],
V[numpy.where(senders == 1)], 'r-')
pl.plot(times[numpy.where(senders == 2)],
V[numpy.where(senders == 2)], 'g-')
pl.xlabel('time (ms)')
pl.ylabel('membrane potential (mV)')
pl.show()
| terhorstd/nest-simulator | pynest/examples/gap_junctions_two_neurons.py | Python | gpl-2.0 | 3,140 | [
"NEURON"
] | d5ec08ebdfe9367020c0e850971b70cf1b542fa98fa1bf749a17d9b4d1a32552 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from collections import deque
from collections import Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, KeysView)
except:
SEQUENCETYPE = Sequence
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = binary_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
src=dict(),
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
follow=dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content=dict(no_log=True),
backup=dict(),
force=dict(),
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
attributes=dict(aliases=['attr']),
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
PERM_BITS = 0o7777 # file mode permission bits
EXEC_PERM_BITS = 0o0111 # execute permission bits
DEFAULT_PERM = 0o0666 # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (float(size) / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search('^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
class AnsibleFallbackNotFound(Exception):
pass
class _SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Set):
return list(obj)
return super(_SetEncoder, self).default(obj)
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity',
'_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility',
'_ansible_socket']
self._options_context = list()
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path),
str(':'.join(new_context)))
except OSError:
e = get_exception()
self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception:
e = get_exception()
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=str(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError:
e = get_exception()
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise e
except Exception:
e = get_exception()
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=str(e))
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except:
e = get_exception()
path = to_text(b_path)
self.fail_json(path=path, msg='chattr failed', details=str(e))
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split(' ')[0:2]
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception:
e = get_exception()
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif k == '_ansible_socket':
self._socket_path = v
elif check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
# clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ','.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (','.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % (check,)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ','.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % (check,)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ",".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but the following are missing: %s" % (key, val, ','.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
if param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ",".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception:
e = get_exception()
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if spec is None or not params[k]:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(self.mutually_exclusive, param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(self.required_together, param)
self._check_required_one_of(self.required_one_of, param)
self._check_required_if(self.required_if, param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError):
e = get_exception()
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" % (k, type(value), wanted, e))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, cls=_SetEncoder)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, cls=_SetEncoder)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError):
e = get_exception()
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError:
e = get_exception()
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', [])
current_attribs = ''.join(current_attribs)
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError:
e = get_exception()
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError):
e = get_exception()
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
else:
b_dest_dir = os.path.dirname(b_dest)
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
native_dest_dir = b_dest_dir
native_suffix = os.path.basename(b_dest)
native_prefix = b('.ansible_tmp')
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix)
except (OSError, IOError):
e = get_exception()
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), e)
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError):
e = get_exception()
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e), exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([shlex_quote(x) for x in args])
shell = True
elif isinstance(args, (binary_type, text_type)) and use_unsafe_shell:
shell = True
elif isinstance(args, (binary_type, text_type)):
if not use_unsafe_shell:
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
if executable is None:
executable = os.environ.get('SHELL')
if executable:
args = [executable, '-c', args]
else:
shell = True
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(shlex_quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + clean_args)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError):
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
except Exception:
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
| fernandezcuesta/ansible | lib/ansible/module_utils/basic.py | Python | gpl-3.0 | 112,433 | [
"VisIt"
] | 407856092d8526798d89e8aa0ec29d7b812b290351f3234c1c61e9334fda4031 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects describing the basic parameters of the
pseudopotentials used in Abinit, and a parser to instantiate pseudopotential objects..
"""
from __future__ import unicode_literals, division, print_function
import abc
import collections
import json
import logging
import os
import sys
import numpy as np
import six
from collections import OrderedDict, defaultdict, namedtuple
from monty.collections import AttrDict, Namespace
from tabulate import tabulate
#from monty.dev import deprecated
from monty.functools import lazy_property
from monty.itertools import iterator_from_slice
from monty.json import MSONable, MontyDecoder
from monty.os.path import find_exts
from monty.string import list_strings, is_string
from pymatgen.core.periodic_table import Element
from pymatgen.core.xcfunc import XcFunc
from pymatgen.util.serialization import pmg_serialize
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
logger = logging.getLogger(__name__)
__all__ = [
"Pseudo",
"PseudoTable",
]
__author__ = "Matteo Giantomassi"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return "\n".join((traceback.format_exc(), str(sys.exc_info()[0])))
def _read_nlines(filename, nlines):
"""
Read at most nlines lines from file filename.
If nlines is < 0, the entire file is read.
"""
if nlines < 0:
with open(filename, 'r') as fh:
return fh.readlines()
lines = []
with open(filename, 'r') as fh:
for lineno, line in enumerate(fh):
if lineno == nlines: break
lines.append(line)
return lines
_l2str = {
0: "s",
1: "p",
2: "d",
3: "f",
4: "g",
5: "h",
6: "i",
}
_str2l = {v: k for k, v in _l2str.items()}
def l2str(l):
"""Convert the angular momentum l (int) to string."""
try:
return _l2str[l]
except KeyError:
return "Unknown angular momentum, received l = %s" % l
def str2l(s):
"""Convert a string to the angular momentum l (int)"""
return _str2l[s]
class Pseudo(six.with_metaclass(abc.ABCMeta, MSONable, object)):
"""
Abstract base class defining the methods that must be
implemented by the concrete pseudopotential sub-classes.
"""
@classmethod
def as_pseudo(cls, obj):
"""
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
"""
return obj if isinstance(obj, cls) else cls.from_file(obj)
@staticmethod
def from_file(filename):
"""
Build an instance of a concrete Pseudo subclass from filename.
Note: the parser knows the concrete class that should be instantiated
Client code should rely on the abstract interface provided by Pseudo.
"""
return PseudoParser().parse(filename)
def __eq__(self, other):
if other is None: return False
return (self.md5 == other.md5 and
self.__class__ == other.__class__ and
self.Z == other.Z and
self.Z_val == other.Z_val and
self.l_max == other.l_max )
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
try:
return "<%s at %s>" % (self.__class__.__name__, os.path.relpath(self.filepath))
except:
# relpath can fail if the code is executed in demon mode.
return "<%s at %s>" % (self.__class__.__name__, self.filepath)
def __str__(self):
return self.to_string()
def to_string(self, verbose=0):
"""String representation."""
lines = []
app = lines.append
app("<%s: %s>" % (self.__class__.__name__, self.basename))
app(" summary: " + self.summary.strip())
app(" number of valence electrons: %s" % self.Z_val)
app(" maximum angular momentum: %s" % l2str(self.l_max))
app(" angular momentum for local part: %s" % l2str(self.l_local))
app(" XC correlation: %s" % self.xc)
app(" supports spin-orbit: %s" % self.supports_soc)
if self.isnc:
app(" radius for non-linear core correction: %s" % self.nlcc_radius)
if self.has_hints:
for accuracy in ("low", "normal", "high"):
hint = self.hint_for_accuracy(accuracy=accuracy)
app(" hint for %s accuracy: %s" % (accuracy, str(hint)))
return "\n".join(lines)
@property
@abc.abstractmethod
def summary(self):
"""String summarizing the most important properties."""
@property
def filepath(self):
return os.path.abspath(self.path)
@property
def basename(self):
"""File basename."""
return os.path.basename(self.filepath)
@property
@abc.abstractmethod
def Z(self):
"""The atomic number of the atom."""
@property
@abc.abstractmethod
def Z_val(self):
"""Valence charge."""
@property
def type(self):
return self.__class__.__name__
@property
def element(self):
"""Pymatgen :class:`Element`."""
try:
return Element.from_Z(self.Z)
except (KeyError, IndexError):
return Element.from_Z(int(self.Z))
@property
def symbol(self):
"""Element symbol."""
return self.element.symbol
@property
@abc.abstractmethod
def l_max(self):
"""Maximum angular momentum."""
@property
@abc.abstractmethod
def l_local(self):
"""Angular momentum used for the local part."""
@property
def isnc(self):
"""True if norm-conserving pseudopotential."""
return isinstance(self, NcPseudo)
@property
def ispaw(self):
"""True if PAW pseudopotential."""
return isinstance(self, PawPseudo)
@lazy_property
def md5(self):
"""MD5 hash value."""
#if self.has_dojo_report and "md5" in self.dojo_report: return self.dojo_report["md5"]
return self.compute_md5()
def compute_md5(self):
"""Compute and erturn MD5 hash value."""
import hashlib
with open(self.path, "rt") as fh:
text = fh.read()
m = hashlib.md5(text.encode("utf-8"))
return m.hexdigest()
@property
@abc.abstractmethod
def supports_soc(self):
"""
True if the pseudo can be used in a calculation with spin-orbit coupling.
Base classes should provide a concrete implementation that computes this value.
"""
@pmg_serialize
def as_dict(self, **kwargs):
return dict(
basename=self.basename,
type=self.type,
symbol=self.symbol,
Z=self.Z,
Z_val=self.Z_val,
l_max=self.l_max,
md5=self.md5,
filepath=self.filepath,
#xc=self.xc.as_dict(),
)
@classmethod
def from_dict(cls, d):
new = cls.from_file(d['filepath'])
# Consistency test based on md5
if "md5" in d and d["md5"] != new.md5:
raise ValueError("The md5 found in file does not agree with the one in dict\n"
"Received %s\nComputed %s" % (d["md5"], new.md5))
return new
def as_tmpfile(self, tmpdir=None):
"""
Copy the pseudopotential to a temporary a file and returns a new pseudopotential object.
Useful for unit tests in which we have to change the content of the file.
Args:
tmpdir: If None, a new temporary directory is created and files are copied here
else tmpdir is used.
"""
import tempfile, shutil
tmpdir = tempfile.mkdtemp() if tmpdir is None else tmpdir
new_path = os.path.join(tmpdir, self.basename)
shutil.copy(self.filepath, new_path)
# Copy dojoreport file if present.
root, ext = os.path.splitext(self.filepath)
djrepo = root + ".djrepo"
if os.path.exists(djrepo):
shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo)))
# Build new object and copy dojo_report if present.
new = self.__class__.from_file(new_path)
if self.has_dojo_report: new.dojo_report = self.dojo_report.deepcopy()
return new
@property
def has_dojo_report(self):
"""True if the pseudo has an associated `DOJO_REPORT` section."""
return hasattr(self, "dojo_report") and bool(self.dojo_report)
@property
def djrepo_path(self):
"""The path of the djrepo file. None if file does not exist."""
root, ext = os.path.splitext(self.filepath)
path = root + ".djrepo"
return path
#if os.path.exists(path): return path
#return None
def hint_for_accuracy(self, accuracy="normal"):
"""
Returns a :class:`Hint` object with the suggested value of ecut [Ha] and
pawecutdg [Ha] for the given accuracy.
ecut and pawecutdg are set to zero if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
"""
if not self.has_dojo_report:
return Hint(ecut=0., pawecutdg=0.)
# Get hints from dojoreport. Try first in hints then in ppgen_hints.
if "hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["hints"][accuracy])
elif "ppgen_hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["ppgen_hints"][accuracy])
return Hint(ecut=0., pawecutdg=0.)
@property
def has_hints(self):
"""
True if self provides hints on the cutoff energy.
"""
for acc in ["low", "normal", "high"]:
try:
if self.hint_for_accuracy(acc) is None:
return False
except KeyError:
return False
return True
def open_pspsfile(self, ecut=20, pawecutdg=None):
"""
Calls Abinit to compute the internal tables for the application of the
pseudopotential part. Returns :class:`PspsFile` object providing methods
to plot and analyze the data or None if file is not found or it's not readable.
Args:
ecut: Cutoff energy in Hartree.
pawecutdg: Cutoff energy for the PAW double grid.
"""
from pymatgen.io.abinit.tasks import AbinitTask
from abipy.core.structure import Structure
from abipy.abio.factories import gs_input
from abipy.electrons.psps import PspsFile
# Build fake structure.
lattice = 10 * np.eye(3)
structure = Structure(lattice, [self.element], coords=[[0, 0, 0]])
if self.ispaw and pawecutdg is None: pawecutdg = ecut * 4
inp = gs_input(structure, pseudos=[self], ecut=ecut, pawecutdg=pawecutdg,
spin_mode="unpolarized", kppa=1)
# Add prtpsps = -1 to make Abinit print the PSPS.nc file and stop.
inp["prtpsps"] = -1
# Build temporary task and run it (ignore retcode because we don't exit cleanly)
task = AbinitTask.temp_shell_task(inp)
task.start_and_wait()
filepath = task.outdir.has_abiext("_PSPS.nc")
if not filepath:
logger.critical("Cannot find PSPS.nc file in %s" % task.outdir)
return None
# Open the PSPS.nc file.
try:
return PspsFile(filepath)
except Exception as exc:
logger.critical("Exception while reading PSPS file at %s:\n%s" % (filepath, str(exc)))
return None
class NcPseudo(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract class defining the methods that must be implemented
by the concrete classes representing norm-conserving pseudopotentials.
"""
@property
@abc.abstractmethod
def nlcc_radius(self):
"""
Radius at which the core charge vanish (i.e. cut-off in a.u.).
Returns 0.0 if nlcc is not used.
"""
@property
def has_nlcc(self):
"""True if the pseudo is generated with non-linear core correction."""
return self.nlcc_radius > 0.0
@property
def rcore(self):
"""Radius of the pseudization sphere in a.u."""
try:
return self._core
except AttributeError:
return None
class PawPseudo(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract class that defines the methods that must be implemented
by the concrete classes representing PAW pseudopotentials.
"""
#def nlcc_radius(self):
# """
# Radius at which the core charge vanish (i.e. cut-off in a.u.).
# Returns 0.0 if nlcc is not used.
# """
# return 0.0
#
#@property
#def has_nlcc(self):
# """True if the pseudo is generated with non-linear core correction."""
# return True
@property
@abc.abstractmethod
def paw_radius(self):
"""Radius of the PAW sphere in a.u."""
@property
def rcore(self):
"""Alias of paw_radius."""
return self.paw_radius
class AbinitPseudo(Pseudo):
"""
An AbinitPseudo is a pseudopotential whose file contains an abinit header.
"""
def __init__(self, path, header):
"""
Args:
path: Filename.
header: :class:`AbinitHeader` instance.
"""
self.path = path
self.header = header
self._summary = header.summary
# Build xc from header.
self.xc = XcFunc.from_abinit_ixc(header["pspxc"])
for attr_name, desc in header.items():
value = header.get(attr_name, None)
# Hide these attributes since one should always use the public interface.
setattr(self, "_" + attr_name, value)
@property
def summary(self):
"""Summary line reported in the ABINIT header."""
return self._summary.strip()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
return self._zion
@property
def l_max(self):
return self._lmax
@property
def l_local(self):
return self._lloc
@property
def supports_soc(self):
# Treate ONCVPSP pseudos
if self._pspcod == 8:
switch = self.header["extension_switch"]
if switch in (0, 1): return False
if switch in (2, 3): return True
raise ValueError("Don't know how to handle extension_switch: %s" % switch)
# TODO Treat HGH HGHK pseudos
# As far as I know, other Abinit pseudos do not support SOC.
return False
class NcAbinitPseudo(NcPseudo, AbinitPseudo):
"""Norm-conserving pseudopotential in the Abinit format."""
@property
def summary(self):
return self._summary.strip()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self._zion
@property
def l_max(self):
return self._lmax
@property
def l_local(self):
return self._lloc
@property
def nlcc_radius(self):
return self._rchrg
class PawAbinitPseudo(PawPseudo, AbinitPseudo):
"""Paw pseudopotential in the Abinit format."""
@property
def paw_radius(self):
return self._r_cut
#def orbitals(self):
@property
def supports_soc(self):
return True
class Hint(object):
"""
Suggested value for the cutoff energy [Hartree units]
and the cutoff energy for the dense grid (only for PAW pseudos).
"""
def __init__(self, ecut, pawecutdg=None):
self.ecut = ecut
self.pawecutdg = ecut if pawecutdg is None else pawecutdg
def __str__(self):
if self.pawecutdg is not None:
return "ecut: %s, pawecutdg: %s" % (self.ecut, self.pawecutdg)
else:
return "ecut: %s" % (self.ecut)
@pmg_serialize
def as_dict(self):
return dict(ecut=self.ecut, pawecutdg=self.pawecutdg)
@classmethod
def from_dict(cls, d):
return cls(**{k: v for k, v in d.items() if not k.startswith("@")})
def _dict_from_lines(lines, key_nums, sep=None):
"""
Helper function to parse formatted text structured like:
value1 value2 ... sep key1, key2 ...
key_nums is a list giving the number of keys for each line. 0 if line should be skipped.
sep is a string denoting the character that separates the keys from the value (None if
no separator is present).
Returns:
dict{key1 : value1, key2 : value2, ...}
Raises:
ValueError if parsing fails.
"""
if is_string(lines):
lines = [lines]
if not isinstance(key_nums, collections.Iterable):
key_nums = list(key_nums)
if len(lines) != len(key_nums):
err_msg = "lines = %s\n key_num = %s" % (str(lines), str(key_nums))
raise ValueError(err_msg)
kwargs = Namespace()
for (i, nk) in enumerate(key_nums):
if nk == 0: continue
line = lines[i]
tokens = [t.strip() for t in line.split()]
values, keys = tokens[:nk], "".join(tokens[nk:])
# Sanitize keys: In some case we might get strings in the form: foo[,bar]
keys.replace("[", "").replace("]", "")
keys = keys.split(",")
if sep is not None:
check = keys[0][0]
if check != sep:
raise ValueError("Expecting separator %s, got %s" % (sep, check))
keys[0] = keys[0][1:]
if len(values) != len(keys):
msg = "line: %s\n len(keys) != len(value)\nkeys: %s\n values: %s" % (line, keys, values)
raise ValueError(msg)
kwargs.update(zip(keys, values))
return kwargs
class AbinitHeader(dict):
"""Dictionary whose keys can be also accessed as attributes."""
def __getattr__(self, name):
try:
# Default behaviour
return super(AbinitHeader, self).__getattribute__(name)
except AttributeError:
try:
# Try in the dictionary.
return self[name]
except KeyError as exc:
raise AttributeError(str(exc))
def _int_from_str(string):
"""
Convert string into integer
Raise:
TypeError if string is not a valid integer
"""
float_num = float(string)
int_num = int(float_num)
if float_num == int_num:
return int_num
else:
# Needed to handle pseudos with fractional charge
int_num = np.rint(float_num)
logger.warning("Converting float %s to int %s" % (float_num, int_num))
return int_num
class NcAbinitHeader(AbinitHeader):
"""The abinit header found in the NC pseudopotential files."""
_attr_desc = namedtuple("att", "default astype")
_VARS = {
# Mandatory
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"mmax": _attr_desc(None, float),
# Optional variables for non linear-core correction. HGH does not have it.
"rchrg": _attr_desc(0.0, float), # radius at which the core charge vanish (i.e. cut-off in a.u.)
"fchrg": _attr_desc(0.0, float),
"qchrg": _attr_desc(0.0, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super(NcAbinitHeader, self).__init__()
# pseudos generated by APE use llocal instead of lloc.
if "llocal" in kwargs:
kwargs["lloc"] = kwargs.pop("llocal")
self.summary = summary.strip()
for key, desc in NcAbinitHeader._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except:
raise RuntimeError("Conversion Error for key %s, value %s" % (key, value))
self[key] = value
# Add remaining arguments, e.g. extension_switch
if kwargs:
self.update(kwargs)
@staticmethod
def fhi_header(filename, ppdesc):
"""
Parse the FHI abinit header. Example:
Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
21.00000 3.00000 940714 zatom, zion, pspdat
1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, 4)
try:
header = _dict_from_lines(lines[:4], [0, 3, 6, 3])
except ValueError:
# The last record with rchrg ... seems to be optional.
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def hgh_header(filename, ppdesc):
"""
Parse the HGH abinit header. Example:
Hartwigsen-Goedecker-Hutter psp for Ne, from PRB58, 3641 (1998)
10 8 010605 zatom,zion,pspdat
3 1 1 0 2001 0 pspcod,pspxc,lmax,lloc,mmax,r2well
"""
lines = _read_nlines(filename, 3)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def gth_header(filename, ppdesc):
"""
Parse the GTH abinit header. Example:
Goedecker-Teter-Hutter Wed May 8 14:27:44 EDT 1996
1 1 960508 zatom,zion,pspdat
2 1 0 0 2001 0. pspcod,pspxc,lmax,lloc,mmax,r2well
0.2000000 -4.0663326 0.6778322 0 0 rloc, c1, c2, c3, c4
0 0 0 rs, h1s, h2s
0 0 rp, h1p
1.36 .2 0.6 rcutoff, rloc
"""
lines = _read_nlines(filename, 7)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def oncvpsp_header(filename, ppdesc):
"""
Parse the ONCVPSP abinit header. Example:
Li ONCVPSP r_core= 2.01 3.02
3.0000 3.0000 140504 zatom,zion,pspd
8 2 1 4 600 0 pspcod,pspxc,lmax,lloc,mmax,r2well
5.99000000 0.00000000 0.00000000 rchrg fchrg qchrg
2 2 0 0 0 nproj
0 extension_switch
0 -2.5000025868368D+00 -1.2006906995331D+00
1 0.0000000000000D+00 0.0000000000000D+00 0.0000000000000D+00
2 1.0000000000000D-02 4.4140499497377D-02 1.9909081701712D-02
"""
lines = _read_nlines(filename, 6)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
# Replace pspd with pspdata
header.update({'pspdat': header['pspd']})
header.pop('pspd')
# Read extension switch
header["extension_switch"] = int(lines[5].split()[0])
return NcAbinitHeader(summary, **header)
@staticmethod
def tm_header(filename, ppdesc):
"""
Parse the TM abinit header. Example:
Troullier-Martins psp for element Fm Thu Oct 27 17:28:39 EDT 1994
100.00000 14.00000 940714 zatom, zion, pspdat
1 1 3 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
1 3.116 4.632 1 3.4291849 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
2 4.557 6.308 1 2.1865358 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3 23.251 29.387 1 2.4776730 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3.62474762267880 .07409391739104 3.07937699839200 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, -1)
header = []
for lineno, line in enumerate(lines):
header.append(line)
if lineno == 2:
# Read lmax.
tokens = line.split()
pspcod, pspxc, lmax, lloc = map(int, tokens[:4])
mmax, r2well = map(float, tokens[4:6])
#if tokens[-1].strip() != "pspcod,pspxc,lmax,lloc,mmax,r2well":
# raise RuntimeError("%s: Invalid line\n %s" % (filename, line))
lines = lines[3:]
break
# TODO
# Parse the section with the projectors.
#0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
#.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
projectors = OrderedDict()
for idx in range(2*(lmax+1)):
line = lines[idx]
if idx % 2 == 0: proj_info = [line,]
if idx % 2 == 1:
proj_info.append(line)
d = _dict_from_lines(proj_info, [5,4])
projectors[int(d["l"])] = d
# Add the last line with info on nlcc.
header.append(lines[idx+1])
summary = header[0]
header = _dict_from_lines(header, [0,3,6,3])
return NcAbinitHeader(summary, **header)
class PawAbinitHeader(AbinitHeader):
"""The abinit header found in the PAW pseudopotential files."""
_attr_desc = namedtuple("att", "default astype")
_VARS = {
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"mmax": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"pspfmt": _attr_desc(None, str),
"creatorID": _attr_desc(None, int),
"basis_size": _attr_desc(None, int),
"lmn_size": _attr_desc(None, int),
"orbitals": _attr_desc(None, list),
"number_of_meshes": _attr_desc(None, int),
"r_cut": _attr_desc(None, float), # r_cut(PAW) in the header
"shape_type": _attr_desc(None, int),
"rshape": _attr_desc(None, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super(PawAbinitHeader, self).__init__()
self.summary = summary.strip()
for key, desc in self._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except:
raise RuntimeError("Conversion Error for key %s, with value %s" % (key, value))
self[key] = value
if kwargs:
raise RuntimeError("kwargs should be empty but got %s" % str(kwargs))
@staticmethod
def paw_header(filename, ppdesc):
"""
Parse the PAW abinit header. Examples:
Paw atomic data for element Ni - Generated by AtomPAW (N. Holzwarth) + AtomPAW2Abinit v3.0.5
28.000 18.000 20061204 : zatom,zion,pspdat
7 7 2 0 350 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw3 1305 : pspfmt,creatorID
5 13 : basis_size,lmn_size
0 0 1 1 2 : orbitals
3 : number_of_meshes
1 3 350 1.1803778368E-05 3.5000000000E-02 : mesh 1, type,size,rad_step[,log_step]
2 1 921 2.500000000000E-03 : mesh 2, type,size,rad_step[,log_step]
3 3 391 1.1803778368E-05 3.5000000000E-02 : mesh 3, type,size,rad_step[,log_step]
2.3000000000 : r_cut(SPH)
2 0.
Another format:
C (US d-loc) - PAW data extracted from US-psp (D.Vanderbilt) - generated by USpp2Abinit v2.3.0
6.000 4.000 20090106 : zatom,zion,pspdat
7 11 1 0 560 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw4 2230 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 560 1.5198032759E-04 1.6666666667E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 556 1.5198032759E-04 1.6666666667E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 576 1.5198032759E-04 1.6666666667E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 666 1.5198032759E-04 1.6666666667E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 673 1.5198032759E-04 1.6666666667E-02 : mesh 5, type,size,rad_step[,log_step]
1.5550009124 : r_cut(PAW)
3 0. : shape_type,rshape
Yet nnother one:
Paw atomic data for element Si - Generated by atompaw v3.0.1.3 & AtomPAW2Abinit v3.3.1
14.000 4.000 20120814 : zatom,zion,pspdat
7 11 1 0 663 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw5 1331 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 663 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 658 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 740 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 819 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 870 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 5, type,size,rad_step[,log_step]
1.5669671236 : r_cut(PAW)
2 0. : shape_type,rshape
"""
supported_formats = ["paw3", "paw4", "paw5"]
if ppdesc.format not in supported_formats:
raise NotImplementedError("format %s not in %s" % (ppdesc.format, supported_formats))
lines = _read_nlines(filename, -1)
summary = lines[0]
header = _dict_from_lines(lines[:5], [0, 3, 6, 2, 2], sep=":")
lines = lines[5:]
# TODO
# Parse orbitals and number of meshes.
header["orbitals"] = [int(t) for t in lines[0].split(":")[0].split()]
header["number_of_meshes"] = num_meshes = int(lines[1].split(":")[0])
#print filename, header
# Skip meshes =
lines = lines[2+num_meshes:]
#for midx in range(num_meshes):
# l = midx + 1
#print lines[0]
header["r_cut"] = float(lines[0].split(":")[0])
#print lines[1]
header.update(_dict_from_lines(lines[1], [2], sep=":"))
#print("PAW header\n", header)
return PawAbinitHeader(summary, **header)
class PseudoParserError(Exception):
"""Base Error class for the exceptions raised by :class:`PseudoParser`"""
class PseudoParser(object):
"""
Responsible for parsing pseudopotential files and returning pseudopotential objects.
Usage::
pseudo = PseudoParser().parse("filename")
"""
Error = PseudoParserError
# Supported values of pspcod
ppdesc = namedtuple("ppdesc", "pspcod name psp_type format")
# TODO Recheck
_PSPCODES = OrderedDict( {
1: ppdesc(1, "TM", "NC", None),
2: ppdesc(2, "GTH", "NC", None),
3: ppdesc(3, "HGH", "NC", None),
4: ppdesc(4, "Teter", "NC", None),
#5: ppdesc(5, "NC", , None),
6: ppdesc(6, "FHI", "NC", None),
7: ppdesc(6, "PAW_abinit_text", "PAW", None),
8: ppdesc(8, "ONCVPSP", "NC", None),
10: ppdesc(10, "HGHK", "NC", None),
})
del ppdesc
# renumber functionals from oncvpsp todo confrim that 3 is 2
#_FUNCTIONALS = {1: {'n': 4, 'name': 'Wigner'},
# 2: {'n': 5, 'name': 'HL'},
# 3: {'n': 2, 'name': 'PWCA'},
# 4: {'n': 11, 'name': 'PBE'}}
def __init__(self):
# List of files that have been parsed succesfully.
self._parsed_paths = []
# List of files that could not been parsed.
self._wrong_paths = []
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):
"""
Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects.
"""
for i, ext in enumerate(exclude_exts):
if not ext.strip().startswith("."):
exclude_exts[i] = "." + ext.strip()
# Exclude files depending on the extension.
paths = []
for fname in os.listdir(dirname):
root, ext = os.path.splitext(fname)
path = os.path.join(dirname, fname)
if (ext in exclude_exts or fname in exclude_fnames or
fname.startswith(".") or not os.path.isfile(path)): continue
paths.append(path)
pseudos = []
for path in paths:
# Parse the file and generate the pseudo.
try:
pseudo = self.parse(path)
except:
pseudo = None
if pseudo is not None:
pseudos.append(pseudo)
self._parsed_paths.extend(path)
else:
self._wrong_paths.extend(path)
return pseudos
def read_ppdesc(self, filename):
"""
Read the pseudopotential descriptor from file filename.
Returns:
Pseudopotential descriptor. None if filename is not a valid pseudopotential file.
Raises:
`PseudoParserError` if fileformat is not supported.
"""
if filename.endswith(".xml"):
raise self.Error("XML pseudo not supported yet")
else:
# Assume file with the abinit header.
lines = _read_nlines(filename, 80)
for lineno, line in enumerate(lines):
if lineno == 2:
try:
tokens = line.split()
pspcod, pspxc = map(int, tokens[:2])
except:
msg = "%s: Cannot parse pspcod, pspxc in line\n %s" % (filename, line)
logger.critical(msg)
return None
#if tokens[-1].strip().replace(" ","") not in ["pspcod,pspxc,lmax,lloc,mmax,r2well",
# "pspcod,pspxc,lmax,llocal,mmax,r2well"]:
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
if pspcod not in self._PSPCODES:
raise self.Error("%s: Don't know how to handle pspcod %s\n" % (filename, pspcod))
ppdesc = self._PSPCODES[pspcod]
if pspcod == 7:
# PAW -> need to know the format pspfmt
tokens = lines[lineno+1].split()
pspfmt, creatorID = tokens[:2]
#if tokens[-1].strip() != "pspfmt,creatorID":
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
ppdesc = ppdesc._replace(format = pspfmt)
return ppdesc
return None
def parse(self, filename):
"""
Read and parse a pseudopotential file. Main entry point for client code.
Returns:
pseudopotential object or None if filename is not a valid pseudopotential file.
"""
path = os.path.abspath(filename)
# Only PAW supports XML at present.
if filename.endswith(".xml"):
return PawXmlSetup(path)
ppdesc = self.read_ppdesc(path)
if ppdesc is None:
logger.critical("Cannot find ppdesc in %s" % path)
return None
psp_type = ppdesc.psp_type
parsers = {
"FHI": NcAbinitHeader.fhi_header,
"GTH": NcAbinitHeader.gth_header,
"TM": NcAbinitHeader.tm_header,
"Teter": NcAbinitHeader.tm_header,
"HGH": NcAbinitHeader.hgh_header,
"HGHK": NcAbinitHeader.hgh_header,
"ONCVPSP": NcAbinitHeader.oncvpsp_header,
"PAW_abinit_text": PawAbinitHeader.paw_header,
}
try:
header = parsers[ppdesc.name](path, ppdesc)
except Exception:
raise self.Error(path + ":\n" + straceback())
if psp_type == "NC":
pseudo = NcAbinitPseudo(path, header)
elif psp_type == "PAW":
pseudo = PawAbinitPseudo(path, header)
else:
raise NotImplementedError("psp_type not in [NC, PAW]")
return pseudo
#TODO use RadialFunction from pseudo_dojo.
class RadialFunction(namedtuple("RadialFunction", "mesh values")):
pass
class PawXmlSetup(Pseudo, PawPseudo):
def __init__(self, filepath):
self.path = os.path.abspath(filepath)
# Get the XML root (this trick is used to that the object is pickleable).
root = self.root
# Get the version of the XML format
self.paw_setup_version = root.get("version")
# Info on the atom.
atom_attrib = root.find("atom").attrib
#self._symbol = atom_attrib["symbol"]
self._zatom = int(float(atom_attrib["Z"]))
self.core, self.valence = map(float, [atom_attrib["core"], atom_attrib["valence"]])
# Build xc from header.
xc_info = root.find("xc_functional").attrib
self.xc = XcFunc.from_type_name(xc_info["type"], xc_info["name"])
# Old XML files do not define this field!
# In this case we set the PAW radius to None.
#self._paw_radius = float(root.find("PAW_radius").attrib["rpaw"])
#self.ae_energy = {k: float(v) for k,v in root.find("ae_energy").attrib.items()}
pawr_element = root.find("PAW_radius")
self._paw_radius = None
if pawr_element is not None:
self._paw_radius = float(pawr_element.attrib["rpaw"])
#<valence_states>
# <state n="2" l="0" f="2" rc="1.10" e="-0.6766" id="N-2s"/>
# <state n="2" l="1" f="3" rc="1.10" e="-0.2660" id="N-2p"/>
# <state l="0" rc="1.10" e=" 0.3234" id="N-s1"/>
# <state l="1" rc="1.10" e=" 0.7340" id="N-p1"/>
# <state l="2" rc="1.10" e=" 0.0000" id="N-d1"/>
#</valence_states>
#
# The valence_states element contains several state elements.
# For this setup, the first two lines describe bound eigenstates
# with occupation numbers and principal quantum numbers.
# Notice, that the three additional unbound states should have no f and n attributes.
# In this way, we know that only the first two bound states (with f and n attributes)
# should be used for constructing an initial guess for the wave functions.
self.valence_states = {}
for node in root.find("valence_states"):
attrib = AttrDict(node.attrib)
assert attrib.id not in self.valence_states
self.valence_states[attrib.id] = attrib
#print(self.valence_states)
# Parse the radial grids
self.rad_grids = {}
for node in root.findall("radial_grid"):
grid_params = node.attrib
gid = grid_params["id"]
assert gid not in self.rad_grids
self.rad_grids[gid] = self._eval_grid(grid_params)
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the XML root element process since Element object cannot be pickled.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_root"]}
@property
def root(self):
try:
return self._root
except AttributeError:
from xml.etree import cElementTree as Et
tree = Et.parse(self.filepath)
self._root = tree.getroot()
return self._root
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self.valence
# FIXME
@property
def l_max(self):
"""Maximum angular momentum."""
return None
@property
def l_local(self):
"""Angular momentum used for the local part."""
return None
@property
def summary(self):
"""String summarizing the most important properties."""
return ""
@property
def paw_radius(self):
return self._paw_radius
@property
def supports_soc(self):
"""
Here I assume that the ab-initio code can treat the SOC within the on-site approximation
"""
return True
@staticmethod
def _eval_grid(grid_params):
"""
This function receives a dictionary with the parameters defining the
radial mesh and returns a `ndarray` with the mesh
"""
eq = grid_params.get("eq").replace(" ", "")
istart, iend = int(grid_params.get("istart")), int(grid_params.get("iend"))
indices = list(range(istart, iend+1))
if eq == 'r=a*exp(d*i)':
a, d = float(grid_params['a']), float(grid_params['d'])
mesh = [a * np.exp(d * i) for i in indices]
elif eq == 'r=a*i/(n-i)':
a, n = float(grid_params['a']), float(grid_params['n'])
mesh = [a * i / (n - i) for i in indices]
elif eq == 'r=a*(exp(d*i)-1)':
a, d = float(grid_params['a']), float(grid_params['d'])
mesh = [a * (np.exp(d * i) - 1.0) for i in indices]
elif eq == 'r=d*i':
d = float(grid_params['d'])
mesh = [d * i for i in indices]
elif eq == 'r=(i/n+a)^5/a-a^4':
a, n = float(grid_params['a']), float(grid_params['n'])
mesh = [(i / n + a)**5 / a - a**4 for i in indices]
else:
raise ValueError('Unknown grid type: %s' % eq)
return np.array(mesh)
def _parse_radfunc(self, func_name):
"""Parse the first occurence of func_name in the XML file."""
node = self.root.find(func_name)
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
return self.rad_grids[grid], values, node.attrib
def _parse_all_radfuncs(self, func_name):
"""Parse all the nodes with tag func_name in the XML file."""
for node in self.root.findall(func_name):
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
yield self.rad_grids[grid], values, node.attrib
@property
def ae_core_density(self):
"""The all-electron radial density."""
try:
return self._ae_core_density
except AttributeError:
mesh, values, attrib = self._parse_radfunc("ae_core_density")
self._ae_core_density = RadialFunction(mesh, values)
return self._ae_core_density
@property
def pseudo_core_density(self):
"""The pseudized radial density."""
try:
return self._pseudo_core_density
except AttributeError:
mesh, values, attrib = self._parse_radfunc("pseudo_core_density")
self._pseudo_core_density = RadialFunction(mesh, values)
return self._pseudo_core_density
@property
def ae_partial_waves(self):
"""Dictionary with the AE partial waves indexed by state."""
try:
return self._ae_partial_waves
except AttributeError:
self._ae_partial_waves = {}
for mesh, values, attrib in self._parse_all_radfuncs("ae_partial_wave"):
state = attrib["state"]
#val_state = self.valence_states[state]
self._ae_partial_waves[state] = RadialFunction(mesh, values)
return self._ae_partial_waves
@property
def pseudo_partial_waves(self):
"""Dictionary with the pseudo partial waves indexed by state."""
try:
return self._pseudo_partial_waves
except AttributeError:
self._pseudo_partial_waves = {}
for (mesh, values, attrib) in self._parse_all_radfuncs("pseudo_partial_wave"):
state = attrib["state"]
#val_state = self.valence_states[state]
self._pseudo_partial_waves[state] = RadialFunction(mesh, values)
return self._pseudo_partial_waves
@property
def projector_functions(self):
"""Dictionary with the PAW projectors indexed by state."""
try:
return self._projector_functions
except AttributeError:
self._projector_functions = {}
for (mesh, values, attrib) in self._parse_all_radfuncs("projector_function"):
state = attrib["state"]
#val_state = self.valence_states[state]
self._projector_functions[state] = RadialFunction(mesh, values)
return self._projector_functions
@add_fig_kwargs
def plot_densities(self, ax=None, **kwargs):
"""
Plot the PAW densities.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel('r [Bohr]')
#ax.set_ylabel('density')
for i, den_name in enumerate(["ae_core_density", "pseudo_core_density"]):
rden = getattr(self, den_name)
label = "$n_c$" if i == 1 else "$\\tilde{n}_c$"
ax.plot(rden.mesh, rden.mesh * rden.values, label=label, lw=2)
ax.legend(loc="best")
return fig
@add_fig_kwargs
def plot_waves(self, ax=None, **kwargs):
"""
Plot the AE and the pseudo partial waves.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel("$r\\phi,\\, r\\tilde\\phi\\, [Bohr]^{-\\frac{1}{2}}$")
ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
#ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.pseudo_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="PS-WAVE: " + state)
for state, rfunc in self.ae_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="AE-WAVE: " + state)
ax.legend(loc="best")
return fig
@add_fig_kwargs
def plot_projectors(self, ax=None, **kwargs):
"""
Plot the PAW projectors.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
title = kwargs.pop("title", "Projectors")
ax.grid(True)
ax.set_xlabel('r [Bohr]')
ax.set_ylabel("$r\\tilde p\\, [Bohr]^{-\\frac{1}{2}}$")
ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
#ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.projector_functions.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state)
ax.legend(loc="best")
return fig
#@add_fig_kwargs
#def plot_potentials(self, **kwargs):
# """
# ================ ==============================================================
# kwargs Meaning
# ================ ==============================================================
# title Title of the plot (Default: None).
# show True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps' to save the figure to a file.
# ================ ==============================================================
# Returns:
# `matplotlib` figure
# """
# title = kwargs.pop("title", "Potentials")
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.grid(True)
# ax.set_xlabel('r [Bohr]')
# ax.set_ylabel('density')
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
# for state, rfunc in self.potentials.items():
# ax.plot(rfunc.mesh, rfunc.values, label="TPROJ: " + state)
# ax.legend(loc="best")
# if title is not None: fig.suptitle(title)
# if show: plt.show()
# if savefig: fig.savefig(savefig)
# return fig
class PseudoTable(six.with_metaclass(abc.ABCMeta, collections.Sequence, MSONable, object)):
"""
Define the pseudopotentials from the element table.
Individidual elements are accessed by name, symbol or atomic number.
For example, the following all retrieve iron:
print elements[26]
Fe
print elements.Fe
Fe
print elements.symbol('Fe')
Fe
print elements.name('iron')
Fe
print elements.isotope('Fe')
Fe
"""
@classmethod
def as_table(cls, items):
"""
Return an instance of :class:`PseudoTable` from the iterable items.
"""
if isinstance(items, cls): return items
return cls(items)
@classmethod
def from_dir(cls, top, exts=None, exclude_dirs="_*"):
"""
Find all pseudos in the directory tree starting from top.
Args:
top: Top of the directory tree
exts: List of files extensions. if exts == "all_files"
we try to open all files in top
exclude_dirs: Wildcard used to exclude directories.
return: :class:`PseudoTable` sorted by atomic number Z.
"""
pseudos = []
if exts == "all_files":
for f in [os.path.join(top, fn) for fn in os.listdir(top)]:
if os.path.isfile(f):
try:
p = Pseudo.from_file(f)
if p:
pseudos.append(p)
else:
logger.info('Skipping file %s' % f)
except:
logger.info('Skipping file %s' % f)
if not pseudos:
logger.warning('No pseudopotentials parsed from folder %s' % top)
return None
logger.info('Creating PseudoTable with %i pseudopotentials' % len(pseudos))
else:
if exts is None: exts=("psp8",)
for p in find_exts(top, exts, exclude_dirs=exclude_dirs):
try:
pseudos.append(Pseudo.from_file(p))
except Exception as exc:
logger.critical("Error in %s:\n%s" % (p, exc))
return cls(pseudos).sort_by_z()
def __init__(self, pseudos):
"""
Args:
pseudos: List of pseudopotentials or filepaths
"""
# Store pseudos in a default dictionary with z as key.
# Note that we can have more than one pseudo for given z.
# hence the values are lists of pseudos.
if not isinstance(pseudos, collections.Iterable):
pseudos = [pseudos]
if len(pseudos) and is_string(pseudos[0]):
pseudos = list_strings(pseudos)
self._pseudos_with_z = defaultdict(list)
for pseudo in pseudos:
if not isinstance(pseudo, Pseudo):
pseudo = Pseudo.from_file(pseudo)
if pseudo is not None:
self._pseudos_with_z[pseudo.Z].append(pseudo)
for z in self.zlist:
pseudo_list = self._pseudos_with_z[z]
symbols = [p.symbol for p in pseudo_list]
symbol = symbols[0]
if any(symb != symbol for symb in symbols):
raise ValueError("All symbols must be equal while they are: %s" % str(symbols))
setattr(self, symbol, pseudo_list)
def __getitem__(self, Z):
"""
Retrieve pseudos for the atomic number z. Accepts both int and slice objects.
"""
if isinstance(Z, slice):
assert Z.stop is not None
pseudos = []
for znum in iterator_from_slice(Z):
pseudos.extend(self._pseudos_with_z[znum])
return self.__class__(pseudos)
else:
return self.__class__(self._pseudos_with_z[Z])
def __len__(self):
return len(list(self.__iter__()))
def __iter__(self):
"""Process the elements in Z order."""
for z in self.zlist:
for pseudo in self._pseudos_with_z[z]:
yield pseudo
def __repr__(self):
return "<%s at %s>" % (self.__class__.__name__, id(self))
def __str__(self):
return self.to_table()
@property
def allnc(self):
"""True if all pseudos are norm-conserving."""
return all(p.isnc for p in self)
@property
def allpaw(self):
"""True if all pseudos are PAW."""
return all(p.ispaw for p in self)
@property
def zlist(self):
"""Ordered list with the atomic numbers available in the table."""
return sorted(list(self._pseudos_with_z.keys()))
#def max_ecut_pawecutdg(self, accuracy):
#"""Return the maximum value of ecut and pawecutdg based on the hints available in the pseudos."""
# ecut = max(p.hint_for_accuracy(accuracy=accuracy).ecut for p in self)
# pawecutdg = max(p.hint_for_accuracy(accuracy=accuracy).pawecutdg for p in self)
# return ecut, pawecutdg
def as_dict(self, **kwargs):
d = {}
for p in self:
k, count = p.element.name, 1
# k, count = p.element, 1
# Handle multiple-pseudos with the same name!
while k in d:
k += k.split("#")[0] + "#" + str(count)
count += 1
d.update({k: p.as_dict()})
d['@module'] = self.__class__.__module__
d['@class'] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
pseudos = []
dec = MontyDecoder()
for k, v in d.items():
if not k.startswith('@'):
pseudos.append(dec.process_decoded(v))
return cls(pseudos)
def is_complete(self, zmax=118):
"""
True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential
"""
for z in range(1, zmax):
if not self[z]: return False
return True
def all_combinations_for_elements(self, element_symbols):
"""
Return a list with all the the possible combination of pseudos
for the given list of element_symbols.
Each item is a list of pseudopotential objects.
Example::
table.all_combinations_for_elements(["Li", "F"])
"""
d = OrderedDict()
for symbol in element_symbols:
d[symbol] = self.select_symbols(symbol, ret_list=True)
from itertools import product
return list(product(*d.values()))
def pseudo_with_symbol(self, symbol, allow_multi=False):
"""
Return the pseudo with the given chemical symbol.
Args:
symbols: String with the chemical symbol of the element
allow_multi: By default, the method raises ValueError
if multiple occurrences are found. Use allow_multi to prevent this.
Raises:
ValueError if symbol is not found or multiple occurences are present and not allow_multi
"""
pseudos = self.select_symbols(symbol, ret_list=True)
if not pseudos or (len(pseudos) > 1 and not allow_multi):
raise ValueError("Found %d occurrences of symbol %s" % (len(pseudos), symbol))
if not allow_multi:
return pseudos[0]
else:
return pseudos
def pseudos_with_symbols(self, symbols):
"""
Return the pseudos with the given chemical symbols.
Raises:
ValueError if one of the symbols is not found or multiple occurences are present.
"""
pseudos = self.select_symbols(symbols, ret_list=True)
found_symbols = [p.symbol for p in pseudos]
duplicated_elements = [s for s, o in collections.Counter(found_symbols).items() if o > 1]
if duplicated_elements:
raise ValueError("Found multiple occurrences of symbol(s) %s" % ', '.join(duplicated_elements))
missing_symbols = [s for s in symbols if s not in found_symbols]
if missing_symbols:
raise ValueError("Missing data for symbol(s) %s" % ', '.join(missing_symbols))
return pseudos
def select_symbols(self, symbols, ret_list=False):
"""
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
"""
symbols = list_strings(symbols)
exclude = symbols[0].startswith("-")
if exclude:
if not all(s.startswith("-") for s in symbols):
raise ValueError("When excluding symbols, all strings must start with `-`")
symbols = [s[1:] for s in symbols]
symbols = set(symbols)
pseudos = []
for p in self:
if exclude:
if p.symbol in symbols: continue
else:
if p.symbol not in symbols: continue
pseudos.append(p)
if ret_list:
return pseudos
else:
return self.__class__(pseudos)
def get_pseudos_for_structure(self, structure):
"""
Return the list of :class:`Pseudo` objects to be used for this :class:`Structure`.
Args:
structure: pymatgen :class:`Structure`.
Raises:
`ValueError` if one of the chemical symbols is not found or
multiple occurences are present in the table.
"""
return self.pseudos_with_symbols(structure.symbol_set)
def print_table(self, stream=sys.stdout, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
"""
print(self.to_table(filter_function=filter_function), file=stream)
def to_table(self, filter_function=None):
"""Return string with data in tabular form."""
table = []
for p in self:
if filter_function is not None and filter_function(p): continue
table.append([p.basename, p.symbol, p.Z_val, p.l_max, p.l_local, p.xc, p.type])
return tabulate(table, headers= ["basename", "symbol", "Z_val", "l_max", "l_local", "XC", "type"],
tablefmt="grid")
def sorted(self, attrname, reverse=False):
"""
Sort the table according to the value of attribute attrname.
Return:
New class:`PseudoTable` object
"""
attrs = []
for i, pseudo in self:
try:
a = getattr(pseudo, attrname)
except AttributeError:
a = np.inf
attrs.append((i, a))
# Sort attrs, and build new table with sorted pseudos.
return self.__class__([self[a[0]] for a in sorted(attrs, key=lambda t: t[1], reverse=reverse)])
def sort_by_z(self):
"""Return a new :class:`PseudoTable` with pseudos sorted by Z"""
return self.__class__(sorted(self, key=lambda p: p.Z))
def select(self, condition):
"""
Select only those pseudopotentials for which condition is True.
Return new class:`PseudoTable` object.
Args:
condition:
Function that accepts a :class:`Pseudo` object and returns True or False.
"""
return self.__class__([p for p in self if condition(p)])
def with_dojo_report(self):
"""Select pseudos containing the DOJO_REPORT section. Return new class:`PseudoTable` object."""
return self.select(condition=lambda p: p.has_dojo_report)
def select_rows(self, rows):
"""
Return new class:`PseudoTable` object with pseudos in the given rows of the periodic table.
rows can be either a int or a list of integers.
"""
if not isinstance(rows, (list, tuple)): rows = [rows]
return self.__class__([p for p in self if p.element.row in rows])
def select_family(self, family):
# e.g element.is_alkaline
return self.__class__([p for p in self if getattr(p.element, "is_" + family)])
| czhengsci/pymatgen | pymatgen/io/abinit/pseudos.py | Python | mit | 63,668 | [
"ABINIT",
"pymatgen"
] | 92536dcea71c7410bfe3917a77806a4dadffeae3fa57753e46af338f7b3b4526 |
#!/usr/bin/env python
# Copyright (C) 2007 University of Texas at Austin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, string, os, signal, types
def handler(signum, frame):
'signal handler for abortion [Ctrl-C]'
sys.stderr.write('\n[Ctrl-C] Aborting...\n')
if child:
os.kill (signal.SIGINT,child)
sys.exit(-1)
signal.signal(signal.SIGINT,handler) # handle interrupt
child = None
def syswait(comm):
'Interruptable system command'
global child
child = os.fork()
if child:
(pid,exit) = os.waitpid(child,0)
child = 0
return exit
else:
os.system(comm)
os._exit(0)
def tour(dirs=[],comm='',verbose=1):
'Visit every directory in dirs running a command comm'
if not verbose: # no output to stdout
sys.stdout = open("/dev/null","w")
sys.stderr.write('Executing "%s"...\n' % comm)
sys.stderr.write(string.join(dirs,'::') + '\n')
cwd = os.getcwd()
for subdir in dirs:
if type(comm) is types.ListType:
mycomm = comm.pop(0)
else:
mycomm = comm
os.chdir (cwd)
try:
os.chdir (subdir)
except:
sys.stderr.write('\n%s: wrong directory %s...\n' % (mycomm,subdir))
sys.exit(1)
os.environ['PWD'] = os.path.join(cwd,subdir)
sys.stderr.write(string.join(['+' * 44,subdir,'\n'],' '))
if mycomm:
mycomm = mycomm.replace('%',subdir,1)
syswait(mycomm)
sys.stderr.write(string.join(['-' * 44,subdir,'\n'],' '))
sys.stderr.write('Done.\n')
os.chdir (cwd)
if __name__ == "__main__":
import glob
# own user interface instead of that provided by RSF's Python API
# because this script has users that do not have RSF
if len(sys.argv) < 2:
print '''
Usage: %s [-q] command
visits lower-case subdirectories and executes command
-q quiet (suppress stdout)
The '%%' character is replaced with the current directory
''' % sys.argv[0]
sys.exit(0)
########
comm = sys.argv.pop(0)
if comm == "-q":
verbose = 0
comm = sys.argv.pop(0)
else:
verbose = 1
comm = string.join(sys.argv,' ')
dirs = filter(lambda x: x[-5:] != '_html',
filter(os.path.isdir,glob.glob('[a-z]*')))
tour(dirs,comm,verbose)
sys.exit(0)
| zxtstarry/src | framework/rsf/sftour.py | Python | gpl-2.0 | 3,094 | [
"VisIt"
] | 866c42b4b20e3106cdc7a497d77a1261ac56cf0e5585435794feb9dbc76463a0 |
#!/usr/bin/env python
#from math import *
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
from gi.repository import GObject
from gi.repository import NumCosmo as Nc
from gi.repository import NumCosmoMath as Ncm
Ncm.cfg_init ()
NT = 3 # Number of threads
NClusters = 21 # Number of clusters
NWalkers = 100 # Number of walkers / chains
# Cosmological model: XCDM, DE eqos - w = constant
cosmo = Nc.HICosmo.new_from_name (Nc.HICosmo, "NcHICosmoDEXcdm")
dist = Nc.Distance.new (4.0)
# Primordial power spectrum - power law
prim = Nc.HIPrimPowerLaw.new ()
reion = Nc.HIReionCamb.new ()
# Transfer function
tf = Nc.TransferFunc.new_from_name ("NcTransferFuncEH")
# Linear matter power spectrum
ps_ml = Nc.PowspecMLTransfer.new (tf)
psf = Ncm.PowspecFilter.new (ps_ml, Ncm.PowspecFilterType.TOPHAT)
mulf = Nc.MultiplicityFunc.new_from_name ("NcMultiplicityFuncTinkerCrit{'Delta':<500.0>}")
mf = Nc.HaloMassFunction.new (dist, psf, mulf)
cad = Nc.ClusterAbundance.new (mf, None)
clusterz = Nc.ClusterRedshift.new_from_name ("NcClusterRedshiftNodist{'z-min':<0.0>, 'z-max':<2.0>}")
# Planck - CLASH cluster mass distribution, pivot mass M0
clusterm = Nc.ClusterMass.new_from_name ("NcClusterMassPlCL{'M0':<5.7e14>}")
cpc = Nc.ClusterPseudoCounts.new (NClusters)
cosmo.add_submodel (reion)
cosmo.add_submodel (prim)
# Cosmological parameters
cosmo.props.H0 = 70.0
cosmo.props.Omegab = 0.049
cosmo.props.Omegac = 0.251
cosmo.props.Omegax = 0.7
cosmo.props.Tgamma0 = 2.72
prim.props.n_SA = 0.967
prim.props.ln10e10ASA = 3.064 #cosmo.props.sigma8 = 0.816
cosmo.props.w = -1.0
cosmo.omega_x2omega_k ()
cosmo.param_set_by_name ('Omegak', 0.0)
cad.prepare (cosmo, clusterz, clusterm)
mset = Ncm.MSet.empty_new ()
mset.set (cosmo)
mset.set (clusterm)
mset.set (clusterz)
mset.set (cpc)
clusterm.param_set_ftype (0, Ncm.ParamType.FREE)
clusterm.param_set_ftype (1, Ncm.ParamType.FREE)
clusterm.param_set_ftype (2, Ncm.ParamType.FREE)
clusterm.param_set_ftype (3, Ncm.ParamType.FREE)
clusterm.param_set_ftype (4, Ncm.ParamType.FREE)
clusterm.param_set_ftype (5, Ncm.ParamType.FREE)
clusterm.param_set_ftype (6, Ncm.ParamType.FREE)
clusterm.param_set_by_name ('Asz', 1.00)
clusterm.param_set_by_name ('Bsz', 0.25)
clusterm.param_set_by_name ('sigma_sz', 0.12)
clusterm.param_set_by_name ('Al', 1.00)
clusterm.param_set_by_name ('Bl', 0.0)
clusterm.param_set_by_name ('sigma_l', 0.27)
clusterm.param_set_by_name ('cor', 0.0)
cpc.param_set_ftype (0, Ncm.ParamType.FREE)
cpc.param_set_ftype (1, Ncm.ParamType.FREE)
cpc.param_set_ftype (2, Ncm.ParamType.FREE)
cpc.param_set_ftype (3, Ncm.ParamType.FREE)
cpc.param_set_by_name ('lnMCut', 33.0)
cpc.param_set_by_name ('sigma_Mcut', 0.10)
cpc.param_set_by_name ('zmin', 0.188)
cpc.param_set_by_name ('Deltaz', 0.70214)
plclash = Nc.DataClusterPseudoCounts.new_from_file ('nc_data_cluster_planck_clash.obj')
plclash.set_cad (cad)
dset = Ncm.Dataset.new ()
dset.append_data (plclash)
lh = Ncm.Likelihood (dataset = dset)
# Gaussian prior on the lensing bias, b_l = 0 \pm 0.08
lh.priors_add_gauss_param (clusterm.id(), 4, 0.0, 0.08)
algorithm = 'ln-neldermead'
fit = Ncm.Fit.new (Ncm.FitType.NLOPT, algorithm, lh, mset, Ncm.FitGradType.NUMDIFF_CENTRAL)
Ncm.func_eval_set_max_threads (NT)
Ncm.func_eval_log_pool_stats ()
init_sampler = Ncm.MSetTransKernGauss.new (0)
stretch = Ncm.FitESMCMCWalkerStretch.new (NWalkers, mset.fparams_len ())
esmcmc = Ncm.FitESMCMC.new (fit, NWalkers, init_sampler, stretch, Ncm.FitRunMsgs.FULL)
init_sampler.set_mset (mset)
init_sampler.set_prior_from_mset ()
esmcmc.set_nthreads (NT)
init_sampler.set_cov_from_scale ()
esmcmc.set_data_file ('test.fits')
esmcmc.start_run ()
esmcmc.run (1000) # Number of points to be computed in each chain
esmcmc.end_run ()
esmcmc.mean_covar ()
fit.log_covar ()
| NumCosmo/NumCosmo | scripts/mass_calibration_planck_clash.py | Python | gpl-3.0 | 3,891 | [
"Gaussian"
] | b5a9fa940ced6158c8a1d3a7f1f6150b8736c0efd329319e2f520c0b05e40149 |
"""
Contains tests to check a particular script's results or speed have
not changed.
Check README and buildbot to see how all these tests are run.
"""
import pickle, copy, __main__, timeit, os, os.path, socket, cPickle, inspect, traceback, tempfile, shutil
from numpy.testing import assert_array_equal, assert_array_almost_equal
import param
from param import resolve_path, normalize_path
import topo
from nose.tools import nottest
import cProfile
# While training data is usually checked into topo/tests and is the
# same for all machines, speed data is generated by the machine
# running this makefile. Therefore, speed data is stored in a
# machine-specific directory.
TOPOGRAPHICAHOME = param.normalize_path.prefix
TESTSDATADIR = os.path.join(TOPOGRAPHICAHOME,"tests")
MACHINETESTSDATADIR = os.path.join(TESTSDATADIR,socket.gethostname())
FIXEDDATADIR = resolve_path("topo/tests/data_traintests",path_to_file=False)
GPUDATADIR = resolve_path("topo/tests/data_gputests", path_to_file=False)
######################################################################################
### Support fns
def ensure_path_exists(path):
"""Force the specified path to exist if it doesn't yet."""
if not os.path.exists(path):
os.makedirs(path)
def _support_old_args(args):
# support old data files which contain 'default_density', etc
if 'default_density' in args:
args['cortex_density']=args['default_density']
#del args['default_density']
if 'default_retina_density' in args:
args['retina_density']=args['default_retina_density']
#del args['default_retina_density']
if 'default_lgn_density' in args:
args['lgn_density']=args['default_lgn_density']
#del args['default_lgn_density']
# (left the dels commented out for now in case scripts still use old names)
def _setargs(args):
for arg,val in args.items():
print "Setting %s=%s"%(arg,val)
__main__.__dict__[arg]=val
# For generating data in a separate process, leaving the parent with
# the original settings (i.e. avoids having to do a reset of
# simulation time, etc).
# CEBALERT: is this somehow causing func to run more slowly than
# without forking?
from multiprocessing import Process
def _run_in_forked_process(func, *args, **kwds):
p = Process(target=func(*args, **kwds))
p.start()
p.join()
def _instantiate_everything(
classes_to_exclude=("topo.base.simulation.Simulation","topo.base.simulation.Simulation"),
modules_to_exclude=('plotting','tests','tkgui','command','util')):
# default excludes currently set up for pickle tests
# CEBALERT: this is basically get_PO_class_attributes from param.parameterized
def get_classes(module,classes,processed_modules,module_excludes=()):
exec "from %s import *"%module.__name__ in locals()
dict_ = module.__dict__
for (k,v) in dict_.items():
if '__all__' in dict_ and inspect.ismodule(v) and k not in module_excludes:
if k in dict_['__all__'] and v not in processed_modules:
get_classes(v,classes,processed_modules,module_excludes)
processed_modules.append(v)
else:
# class & not parameterizedfunction & not __abstract & not excluded & starts with topo. or param.
if isinstance(v,type) and not isinstance(v,param.ParameterizedFunction) and not (hasattr(v,"_%s__abstract"%v.__name__) and getattr(v,"_%s__abstract"%v.__name__) is True):
full_class_path = v.__module__+'.'+v.__name__
if (not full_class_path in classes) and (not full_class_path in classes_to_exclude) and (full_class_path.startswith("topo") or full_class_path.startswith("param")):
classes.append(full_class_path)
classes = []
processed_modules = []
import topo
get_classes(topo,classes,processed_modules,module_excludes=modules_to_exclude)
get_classes(param,classes,processed_modules,module_excludes=modules_to_exclude)
instances = []
instantiated_names = []
uninstantiated_names = []
for class_name in classes:
try:
instances.append(eval(class_name+"()"))
instantiated_names.append(class_name)
except:
#print "Could not instantiate %s"%class_name
uninstantiated_names.append(class_name)
print "\n ** Instantiated %s classes:"%len(instantiated_names)
print "\n".join(instantiated_names)
print "\n ** Could not instantiate %s classes:"%len(uninstantiated_names)
print "\n".join(uninstantiated_names)
return instances
######################################################################################
# CEBALERT: document somewhere about when to delete data files
# (i.e. when to generate new data) for train-tests and speed-tests and
# startup-speed-tests.
######################################################################################
### train-tests
RUN_FOR = [1,99,150]
LOOK_AT = "V1"
TRAINTESTS_CORTEXDENSITY = 8
RETINA_DENSITY = 24
LGN_DENSITY = 24
def _generate_data(script,data_filename,look_at='V1',run_for=[1,99,150],**args):
print "Generating data for %s's %s after topo.sim.run(%s)"%(script,look_at,run_for)
_setargs(args)
execfile(script,__main__.__dict__)
data = {}
for time in run_for:
print "Running for %s iterations"%time
topo.sim.run(time)
print "Recording data for %s at %s"%(look_at,topo.sim.timestr())
data[topo.sim.timestr()] = copy.deepcopy(topo.sim[look_at].activity)
data['args']=args
data['run_for']=run_for
data['look_at']=look_at
data['versions'] = topo.version,topo.release
print "Saving data to %s"%data_filename
pickle.dump(data,open(data_filename,'wb'),2)
@nottest
def test_script(script,decimal=None):
"""
Run script with the parameters specified when its DATA file was
generated, and check for changes.
Looks for the DATA file at FIXEDDATADIR/script_name.ty_DATA (for
data checked into SVN). If not found there, looks at
TESTSDATADIR/script_name.ty_DATA. If also not found there, first
generates a new DATA file at TESTSDATADIR/script_name.ty_DATA
(i.e. to generate new data, delete the existing data before running).
The decimal parameter defines how many decimal points to use when
testing for array equality. The default of None causes exact
matching.
"""
print "Comparing results for %s"%script
script_name = os.path.basename(script)
# CEBALERT: clean up
ensure_path_exists(TESTSDATADIR)
data_filename_only = script_name+"_DATA"
data_filename = os.path.join(TESTSDATADIR,data_filename_only)
try:
locn = resolve_path(data_filename_only,search_paths=[GPUDATADIR, FIXEDDATADIR,TESTSDATADIR])
except IOError:
print "No existing data"
#_run_in_forked_process(_generate_data,script,data_filename,run_for=RUN_FOR,cortex_density=TRAINTESTS_CORTEXDENSITY,lgn_density=LGN_DENSITY, retina_density=RETINA_DENSITY)
_generate_data(script,data_filename,run_for=RUN_FOR,cortex_density=TRAINTESTS_CORTEXDENSITY,lgn_density=LGN_DENSITY,retina_density=RETINA_DENSITY)
locn = resolve_path(data_filename)
print "Reading data from %s"%locn
data_file = open(locn,'rb')
data = pickle.load(data_file)
print "Data from release=%s, version=%s"%(data['versions'] if 'versions' in data else ("unknown","unknown"))
# retrieve parameters used when script was run
run_for=data['run_for']
look_at = data['look_at']
####################################################
# support very old data files that contain 'density' instead of args['cortex_density']
if 'args' not in data:
data['args']={'cortex_density' : data['density']}
args = data['args']
_support_old_args(args)
####################################################
_setargs(args)
print "Starting '%s'"%script
execfile(script,__main__.__dict__)
#########################################################
time_fmt = topo.sim.timestr
# support old pickled data (could replace time_fmt(topo.sim.time()) with
# just topo.sim.timestr() if we didn't need to support old data
if topo.sim.timestr(run_for[0]) not in data:
time_fmt = float
#########################################################
for time in run_for:
print "Running for %s iterations"%time
topo.sim.run(time)
if decimal is None:
assert_array_equal(data[time_fmt(topo.sim.time())],topo.sim[look_at].activity,
err_msg="\nAt topo.sim.time()=%d, with decimal=%s"%(topo.sim.time(),decimal))
else:
assert_array_almost_equal(data[time_fmt(topo.sim.time())],topo.sim[look_at].activity,
decimal,err_msg="\nAt topo.sim.time()=%d, with decimal=%s"%(topo.sim.time(),decimal))
result = "Results from " + script + " have not changed."
if decimal is not None: result+= " (%d dp)" % (decimal)
print result+"\n"
# CEBALERT: old name
#TestScript = test_script
###########################################################################
### speed-tests
SPEEDTESTS_CORTEXDENSITY=48
SPEEDTESTS_ITERATIONS = 250
# CEBALERT: see ALERT about variation by time_sim_startup()
def _time_sim_run(script,iterations=10):
"""
Execute the script in __main__, then time topo.sim.run(iterations).
Uses the timeit module.
"""
print "Running '%s' for %s iterations"%(script,iterations)
execfile(script,__main__.__dict__)
topo.sim.run(1) # ensure compilations etc happen outside timing
# CB: we enable garbage collection
# (http://docs.python.org/lib/module-timeit.html)
return timeit.Timer('topo.sim.run('+`iterations`+')','gc.enable(); import topo').timeit(number=1)
def generate_speed_profile(script, outfile, iterations=100):
"""
Executes the script in __main__, runs the simulation for 1 iteration,
before running it for the specified number of iterations to avoid taking
into account the startup overhead.
"""
print "Initialising..."
execfile(script,__main__.__dict__)
topo.sim.run(1) # ensure compilations etc happen outside timing
print "Running '%s' for %s iterations..." % (script, iterations)
cProfile.run('topo.sim.run('+`iterations`+')', filename=outfile)
def _generate_speed_data(script,data_filename,iterations=100,**args):
print "Generating speed data for %s"%script
_setargs(args)
how_long = _time_sim_run(script,iterations)
speed_data = {'args':args,
'iterations':iterations,
'how_long':how_long}
speed_data['versions'] = topo.version,topo.release
print "Saving data to %s"%data_filename
pickle.dump(speed_data,open(data_filename,'wb'),2)
def compare_speed_data(script):
"""
Run and time script with the parameters specified when its SPEEDDATA file was
generated, and check for changes.
Looks for the SPEEDDATA file at
MACHINETESTSDATADIR/script_name.ty_DATA. If not found there, first
generates a new SPEEDDATA file at
MACHINETESTSDATADIR/script_name.ty_DATA (i.e. to generate new
data, delete the existing data before running).
"""
print "Comparing speed data for %s"%script
script_name = os.path.basename(script)
ensure_path_exists(MACHINETESTSDATADIR)
data_filename = os.path.join(MACHINETESTSDATADIR,script_name+"_SPEEDDATA")
try:
locn = resolve_path(data_filename)
except IOError:
print "No existing data"
#_run_in_forked_process(_generate_speed_data,script,data_filename,iterations=SPEEDTESTS_ITERATIONS,cortex_density=SPEEDTESTS_CORTEXDENSITY)
_generate_speed_data(script,data_filename,iterations=SPEEDTESTS_ITERATIONS,cortex_density=SPEEDTESTS_CORTEXDENSITY)
locn = resolve_path(data_filename)
print "Reading data from %s"%locn
speed_data_file = open(locn,'r')
try:
speed_data = pickle.load(speed_data_file)
print "Data from release=%s, version=%s"%(speed_data['versions'] if 'versions' in speed_data else ("unknown","unknown"))
except:
###############################################################
## Support old data files (used to be string in the file rather
## than pickle)
speed_data_file.seek(0)
speed_data = speed_data_file.readline()
iterations,old_time = speed_data.split('=')
iterations = float(iterations); old_time=float(old_time)
speed_data = {'iterations':iterations,
'how_long':old_time,
'args':{}}
###############################################################
speed_data_file.close()
old_time = speed_data['how_long']
iterations = speed_data['iterations']
args = speed_data['args']
_support_old_args(args)
_setargs(args)
new_time = _time_sim_run(script,iterations)
percent_change = 100.0*(new_time-old_time)/old_time
print "["+script+"]"+ ' Before: %2.1f s Now: %2.1f s (change=%2.1f s, %2.1f percent)'\
%(old_time,new_time,new_time-old_time,percent_change)
# CEBALERT: whatever compensations the python timing functions are supposed to make for CPU
# activity, do they work well enough? If the processor is being used, these times jump all
# over the place (i.e. vary by more than 10%).
#assert percent_change<=5, "\nTime increase was greater than 5%"
###########################################################################
###########################################################################
### startup timing
# CEBALERT: figure out what this meant: "expect variation in these
# results! see python's timeit module documentation"
def _time_sim_startup(script):
print "Starting %s"%script
return timeit.Timer("execfile('%s',__main__.__dict__)"%script,'import __main__;gc.enable()').timeit(number=1)
def _generate_startup_speed_data(script,data_filename,**args):
print "Generating startup speed data for %s"%script
_setargs(args)
how_long = _time_sim_startup(script)
speed_data = {'args':args,
'how_long':how_long}
speed_data['versions'] = topo.version,topo.release
print "Saving data to %s"%data_filename
pickle.dump(speed_data,open(data_filename,'wb'),2)
def compare_startup_speed_data(script):
"""
Run and time script with the parameters specified when its
STARTUPSPEEDDATA file was generated, and check for changes.
Looks for the STARTUPSPEEDDATA file at
MACHINETESTSDATADIR/script_name.ty_STARTUPSPEEDDATA. If not found
there, first generates a new STARTUPSPEEDDATA file at
MACHINETESTSDATADIR/script_name.ty_STARTUPSPEEDDATA (i.e. to
generate new data, delete the existing data before running).
"""
script = script.replace("\\", "\\\\")
print "Comparing startup speed data for %s"%script
script_name = os.path.basename(script)
ensure_path_exists(MACHINETESTSDATADIR)
data_filename = os.path.join(MACHINETESTSDATADIR,script_name+"_STARTUPSPEEDDATA")
try:
locn = resolve_path(data_filename)
except IOError:
print "No existing data"
#_run_in_forked_process(_generate_startup_speed_data,script,data_filename,cortex_density=SPEEDTESTS_CORTEXDENSITY)
_generate_startup_speed_data(script,data_filename,cortex_density=SPEEDTESTS_CORTEXDENSITY)
locn = resolve_path(data_filename)
print "Reading data from %s"%locn
speed_data_file = open(locn,'r')
try:
speed_data = pickle.load(speed_data_file)
print "Data from release=%s, version=%s"%(speed_data['versions'] if 'versions' in speed_data else ("unknown","unknown"))
except:
###############################################################
## Support old data files (used to be string in the file rather
## than pickle)
speed_data_file.seek(0)
speed_data = speed_data_file.readline()
density,old_time = speed_data.split('=')
speed_data = {'cortex_density':float(density),
'how_long':float(old_time),
'args':{}}
_support_old_args(speed_data['args'])
###############################################################
_setargs(speed_data['args'])
speed_data_file.close()
old_time = speed_data['how_long']
new_time = _time_sim_startup(script)
percent_change = 100.0*(new_time-old_time)/old_time
print "["+script+ ' startup] Before: %2.1f s Now: %2.1f s (change=%2.1f s, %2.1f percent)'\
%(old_time,new_time,new_time-old_time,percent_change)
### end startup timing
###########################################################################
###########################################################################
### Snapshot tests
# This is clumsy. We could control topographica subprocesses, but I
# can't remember how to do it
def compare_with_and_without_snapshot_NoSnapshot(script="models/lissom.ty",look_at='V1',cortex_density=8,lgn_density=4,retina_density=4,dims=['or','od','dr','cr','dy','sf'],dataset="Gaussian",run_for=10,break_at=5):
data_filename=os.path.split(script)[1]+"_PICKLETEST"
# we must execute in main because e.g. scheduled events are run in __main__
# CEBALERT: should set global params
__main__.__dict__['cortex_density']=cortex_density
__main__.__dict__['lgn_density']=lgn_density
__main__.__dict__['retina_density']=retina_density
__main__.__dict__['dims']=dims
__main__.__dict__['dataset']=dataset
execfile(script,__main__.__dict__)
data = {}
topo.sim.run(break_at)
data[topo.sim.time()]= copy.deepcopy(topo.sim[look_at].activity)
topo.sim.run(run_for-break_at)
data[topo.sim.time()]= copy.deepcopy(topo.sim[look_at].activity)
data['run_for']=run_for
data['break_at']=break_at
data['look_at']=look_at
data['cortex_density']=cortex_density
data['lgn_density']=lgn_density
data['retina_density']=retina_density
data['dims']=dims
data['dataset']=dataset
locn = normalize_path(os.path.join("tests",data_filename))
print "Writing pickle to %s"%locn
pickle.dump(data,open(locn,'wb'),2)
def compare_with_and_without_snapshot_CreateSnapshot(script="models/lissom.ty"):
data_filename=os.path.split(script)[1]+"_PICKLETEST"
locn = resolve_path(os.path.join('tests',data_filename))
print "Loading pickle at %s"%locn
try:
data = pickle.load(open(locn,"rb"))
except IOError:
print "\nData file '"+data_filename+"' could not be opened; run _A() first."
raise
# retrieve parameters used when script was run
run_for=data['run_for']
break_at=data['break_at']
look_at=data['look_at']
# CEBALERT: shouldn't need to re-list - should be able to read from data!
cortex_density=data['cortex_density']
lgn_density=data['lgn_density']
retina_density=data['retina_density']
dims=data['dims']
dataset=data['dataset']
__main__.__dict__['cortex_density']=cortex_density
__main__.__dict__['lgn_density']=lgn_density
__main__.__dict__['retina_density']=retina_density
__main__.__dict__['dims']=dims
__main__.__dict__['dataset']=dataset
execfile(script,__main__.__dict__)
# check we have the same before any pickling
topo.sim.run(break_at)
assert_array_equal(data[topo.sim.time()],topo.sim[look_at].activity,
err_msg="\nAt topo.sim.time()=%d"%topo.sim.time())
from topo.command import save_snapshot
locn = normalize_path(os.path.join('tests',data_filename+'.typ_'))
print "Saving snapshot to %s"%locn
save_snapshot(locn)
def compare_with_and_without_snapshot_LoadSnapshot(script="models/lissom.ty"):
data_filename=os.path.split(script)[1]+"_PICKLETEST"
snapshot_filename=os.path.split(script)[1]+"_PICKLETEST.typ_"
locn = resolve_path(os.path.join('tests',data_filename))
print "Loading pickle from %s"%locn
try:
data = pickle.load(open(locn,"rb"))
except IOError:
print "\nData file '"+data_filename+"' could not be opened; run _A() first"
raise
# retrieve parameters used when script was run
run_for=data['run_for']
break_at=data['break_at']
look_at=data['look_at']
# # CEBALERT: shouldn't need to re-list - should be able to read from data!
# cortex_density=data['cortex_density']
# lgn_density=data['lgn_density']
# retina_density=data['retina_density']
# dims=data['dims']
# dataset=data['dataset']
from topo.command import load_snapshot
locn = resolve_path(os.path.join('tests',snapshot_filename))
print "Loading snapshot at %s"%locn
try:
load_snapshot(locn)
except IOError:
print "\nPickle file '"+snapshot_filename+"' could not be opened; run _B() first."
raise
assert topo.sim.time()==break_at
assert_array_equal(data[topo.sim.time()],topo.sim[look_at].activity,
err_msg="\nAt topo.sim.time()=%d"%topo.sim.time())
print "Match at %s after loading snapshot"%topo.sim.time()
topo.sim.run(run_for-break_at)
assert_array_equal(data[topo.sim.time()],topo.sim[look_at].activity,
err_msg="\nAt topo.sim.time()=%d"%topo.sim.time())
print "Match at %s after running loaded snapshot"%topo.sim.time()
### end Snapshot tests
###########################################################################
###########################################################################
### pickle tests
def pickle_unpickle_everything(existing_pickles=None):
pickle_errors = 0
if existing_pickles is None:
instances = _instantiate_everything()
pickles = {}
for instance in instances:
try:
pickles[str(instance)]=pickle.dumps(instance)
except:
print "Error pickling %s:"%instance
pickle_errors+=1
traceback.print_exc()
else:
pickles = pickle.load(open(existing_pickles))
unpickle_errors = 0
for instance_name,pickled_instance in pickles.items():
try:
pickle.loads(pickled_instance)
except:
print "Error unpickling %s"%instance_name
unpickle_errors+=1
traceback.print_exc()
print
if existing_pickles is None:
print "Instances that failed to pickle: %s"%pickle_errors
print "Pickled instances that failed to unpickle: %s"%unpickle_errors
return pickle_errors+unpickle_errors
###########################################################################
###########################################################################
# basic test of run batch
@nottest
def test_runbatch():
from topo.misc.genexamples import find_examples
from topo.command import run_batch
original_output_path = param.normalize_path.prefix
start_output_path = tempfile.mkdtemp()
param.normalize_path.prefix = start_output_path
tiny = os.path.join(find_examples(),"tiny.ty")
run_batch(tiny,cortex_density=1,retina_density=1,times=[1],snapshot=True,output_directory="testing123")
new_output_path = param.normalize_path.prefix
assert new_output_path.startswith(start_output_path)
assert "testing123" in new_output_path # not perfect test, but better than nothing.
base = os.path.basename(new_output_path).split(",")[0]
def exists(endpart):
whole = os.path.join(new_output_path,base+endpart)
print "Checking for %s"%whole
return os.path.isfile(whole)
assert exists(".global_params.pickle")
assert exists(".out")
assert exists("_000001.00_V1_Activity.png")
assert exists("_000001.00_script_repr.ty")
assert exists("_000001.00.typ")
print "Deleting %s"%param.normalize_path.prefix
shutil.rmtree(param.normalize_path.prefix)
param.normalize_path.prefix=original_output_path
###########################################################################
###########################################################################
## CEBALERT: for C++ reference simulations - should be moved elsewhere
def run_multiple_density_comparisons(ref_script):
from topo.misc.util import cross_product
import subprocess
import traceback
import os
#k = [8,10,12,13,14,34]
#x = cross_product([k,k])
x = [[ 8, 8],[ 8, 9],[ 8,10],[ 8,11],[ 8,12],[ 8,13],[ 8,14],[ 8,15],
[24,14],[24,17],[24,20],
[24,24],[24,48]]
cmds = []
for spec in x:
c="""./topographica -c "verbose=False;BaseRN=%s;BaseN=%s;comparisons=True;stop_at_1000=False" topo/tests/reference/%s"""%(spec[0],spec[1],ref_script)
cmds.append(c)
results = []
errs=[]
for cmd in cmds:
print
print "************************************************************"
print "Executing '%s'"%cmd
# errout = os.tmpfile()#StringIO.StringIO()
p = subprocess.Popen(cmd, shell=True,stderr=subprocess.PIPE)
p.wait()
r = p.returncode
errout = p.stderr
#r = subprocess.call(cmd,shell=True)#,stderr=subprocess.PIPE)#errout)
#print "TB",traceback.print_exc()
if r==0:
result = "PASS"
else:
result = "FAIL"
results.append(result)
l = errout.readlines()
i = 0
L=0
for line in l:
if line.startswith("AssertionError"):
L=i
break
i+=1
errs.append(l[L::])
errout.close()
print "================================================================================"
print
print "SUMMARY"
print
nerr = 0
for xi,result,err in zip(x,results,errs):
print
print "* %s ... BaseRN=%s,BaseN=%s"%(result,xi[0],xi[1])
if result=="FAIL":
e = ""
print e.join(err)
nerr+=1
print "================================================================================"
return nerr
###########################################################################
| Tasignotas/topographica_mirror | topo/tests/test_script.py | Python | bsd-3-clause | 26,287 | [
"Gaussian"
] | 9569f21be49cec7077c6fef92b34f39fdc7b85eb55a4bf6f8ffe702469505306 |
import getopt
import logging
import sys
import matplotlib.pyplot as plt
from matplotlib import cm
from Comm.EnsembleReceiver import EnsembleReceiver
logger = logging.getLogger("EnsembleReceiver")
logger.setLevel(logging.DEBUG)
FORMAT = '[%(asctime)-15s][%(levelname)s][%(funcName)s] %(message)s'
logging.basicConfig(format=FORMAT)
class LivePlot(EnsembleReceiver):
"""
Live plot will display live data from the UDP port.
This inherits from Ensemble Receiver to receive
and decode the JSON data from the UDP port.
"""
def __init__(self, udp_port):
"""
Call the super class to pass the UDP port.
:param udp_port: UDP Port to read the JSON data.
"""
#super(LivePlot, self).__init__(udp_port)
super().__init__()
self.plot_index = 0
self.IsBeam = False
self.IsAmp = False
self.IsAmpSpline = True
self.methods = [None, 'none', 'nearest', 'bilinear', 'bicubic', 'spline16',
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric',
'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos']
plt.axis([0, 10, 0, 1])
plt.ion()
self.connect(udp_port)
def process(self, jsonData):
"""
Process the JSON data that contains the ADCP data.
:param jsonData: JSON ADCP data.
:return:
"""
logger.info(jsonData["Name"])
if self.IsBeam:
if "E000001" in jsonData["Name"]:
#logger.info(self.plot_index)
#logger.info(jsonData["Velocities"][0][0])
plt.scatter([self.plot_index, self.plot_index, self.plot_index, self.plot_index], jsonData["Velocities"][0])
plt.plot(self.plot_index, jsonData["Velocities"][0][1], '--', linewidth=2)
plt.pause(0.05)
self.plot_index = self.plot_index + 1
if self.IsAmp:
if "E000004" in jsonData["Name"]:
#plt.scatter([self.plot_index, self.plot_index, self.plot_index, self.plot_index], jsonData["Amplitude"][0])
plt.scatter(self.plot_index, jsonData["Amplitude"][0][1])
plt.pause(0.05)
self.plot_index = self.plot_index + 1
if self.IsAmpSpline:
if "E000004" in jsonData["Name"]:
fig, ax = plt.subplots()
cax = ax.imshow(jsonData["Amplitude"], interpolation=self.methods[1], cmap=cm.coolwarm, vmin=0, vmax=12)
ax.set_title('Amplitude Data')
# Move left and bottom spines outward by 10 points
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.colorbar(cax)
plt.xticks(range(0, int(plt.xticks()[0][-1]) + 1, 1))
plt.pause(0.05)
if __name__ == '__main__':
argv = sys.argv[1:]
port = 55057
try:
opts, args = getopt.getopt(argv,"p:",["port="])
except getopt.GetoptError:
print('LivePlot.py -p <port>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('LivePlot.py -p <port>')
sys.exit()
elif opt in ("-p", "--port"):
port = int(arg)
# Read from UDP port
reader = LivePlot(port)
reader.close()
logger.info("Socket Closed") | ricorx7/rti_python | Frontend/LivePlot.py | Python | bsd-3-clause | 3,742 | [
"Gaussian"
] | 3f92a2b4242337939068209afcdb91aeffba40e73a434077fd586fb4cea405e3 |
"""
Tests for CountModel class, which includes various linear models designed for
count data
Test data is the Columbus dataset after it has been rounded to integers to act
as count data. Results are verified using corresponding functions in R.
"""
__author__ = 'Taylor Oshan tayoshan@gmail.com'
import unittest
import numpy as np
import pysal
from pysal.contrib.spint.count_model import CountModel
from pysal.contrib.glm.family import Poisson
class TestCountModel(unittest.TestCase):
"""Tests CountModel class"""
def setUp(self):
db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
y = np.array(db.by_col("HOVAL"))
y = np.reshape(y, (49,1))
self.y = np.round(y).astype(int)
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
def test_PoissonGLM(self):
model = CountModel(self.y, self.X, family=Poisson())
results = model.fit('GLM')
np.testing.assert_allclose(results.params, [3.92159085, 0.01183491,
-0.01371397], atol=1.0e-8)
self.assertIsInstance(results.family, Poisson)
self.assertEqual(results.n, 49)
self.assertEqual(results.k, 3)
self.assertEqual(results.df_model, 2)
self.assertEqual(results.df_resid, 46)
np.testing.assert_allclose(results.yhat,
[ 51.26831574, 50.15022766, 40.06142973, 34.13799739,
28.76119226, 42.6836241 , 55.64593703, 34.08277997,
40.90389582, 37.19727958, 23.47459217, 26.12384057,
29.78303507, 25.96888223, 29.14073823, 26.04369592,
34.18996367, 32.28924005, 27.42284396, 72.69207879,
33.05316347, 36.52276972, 49.2551479 , 35.33439632,
24.07252457, 31.67153709, 27.81699478, 25.38021219,
24.31759259, 23.13586161, 48.40724678, 48.57969818,
31.92596006, 43.3679231 , 34.32925819, 51.78908089,
34.49778584, 27.56236198, 48.34273194, 57.50829097,
50.66038226, 54.68701352, 35.77103116, 43.21886784,
40.07615759, 49.98658004, 43.13352883, 40.28520774,
46.28910294])
np.testing.assert_allclose(results.cov_params,
[[ 1.70280610e-02, -6.18628383e-04, -2.21386966e-04],
[ -6.18628383e-04, 2.61733917e-05, 6.77496445e-06],
[ -2.21386966e-04, 6.77496445e-06, 3.75463502e-06]])
np.testing.assert_allclose(results.std_err, [ 0.13049161, 0.00511599,
0.00193769], atol=1.0e-8)
np.testing.assert_allclose(results.pvalues, [ 2.02901657e-198,
2.07052532e-002, 1.46788805e-012])
np.testing.assert_allclose(results.tvalues, [ 30.0524361 , 2.31331634,
-7.07748998])
np.testing.assert_allclose(results.resid,
[ 28.73168426, -5.15022766, -14.06142973, -1.13799739,
-5.76119226, -13.6836241 , 19.35406297, 2.91722003,
12.09610418, 58.80272042, -3.47459217, -6.12384057,
12.21696493, 17.03111777, -11.14073823, -7.04369592,
7.81003633, 27.71075995, 3.57715604, 8.30792121,
-13.05316347, -6.52276972, -1.2551479 , 17.66560368,
-6.07252457, -11.67153709, 6.18300522, -2.38021219,
7.68240741, -1.13586161, -16.40724678, -8.57969818,
-7.92596006, -15.3679231 , -7.32925819, -15.78908089,
8.50221416, -4.56236198, -8.34273194, 4.49170903,
-8.66038226, -10.68701352, -9.77103116, -9.21886784,
-12.07615759, 26.01341996, -1.13352883, -13.28520774,
-10.28910294])
self.assertAlmostEqual(results.deviance, 230.46013824817649)
self.assertAlmostEqual(results.llf, -247.42592089969378)
self.assertAlmostEqual(results.AIC, 500.85184179938756)
self.assertAlmostEqual(results.D2, 0.388656011675)
self.assertAlmostEqual(results.adj_D2, 0.36207583826952761)
if __name__ == '__main__':
unittest.main()
| TaylorOshan/pysal | pysal/contrib/spint/tests/test_count_model.py | Python | bsd-3-clause | 4,181 | [
"COLUMBUS"
] | 5b8ce662208271b3a7b91bfc381a8e49ec8a2a421590017dbe087f4061a309ab |
#!/usr/bin/python
'''
File defining a python class for snowpit data
November 2016, Simon Filhol
'''
import numpy as np
import pandas as pd
import os
import snowpyt.CAAMLv6_xml as cxv6
from snowpyt.snowflake.sf_dict import snowflake_symbol_dict
import snowpyt.snowflake.sf_dict as sfd
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from matplotlib.ticker import MaxNLocator
path2snowflake = cxv6.__file__[:-14] + '/'
class layer(object):
def __init__(self):
self.dtop = None
self.dtop_unit = None
self.dbot = None
self.thickness = None
self.thickness_unit = None
self.grain_type1 = None
self.grain_type2 = None
self.grain_type3 = None
self.grainSize_unit = None
self.grainSize_mean = None
self.grainSize_max = None
self.hardness_ram = None
self.hardness_index = None
self.hardness = None
self.lwc = None
self.id = None
# # wrong syntax. Check how to have a automatic update of the following fields within the class:
# if (self.dtop is not None) and (self.thickness is not None):
# self.dbot = self.dtop - self.thickness
#
# if (self.dtop is not None) and (self.dbot is not None):
# self.thickness = self.dtop - self.dbot
# derive hardness code automatically
# def __str__(self):
# return "-----layer object-----\ndepthTop={}{}\nthickness={}{}\ngrainFormPrimary={}\ngrainFormSecondary={}\ngrainSize\n\tavg={}{}\n\tavgMax={}{}\nhardness={}\nlwc={}".format(
# self.dtop, self.dtop_unit, self.thickness, self.thickness_unit, self.grain_type1, self.grain_type2,
# self.grain_size_avg, self.grain_size_unit, self.grain_size_max, self.grain_size_unit, self.hardness,
# self.lwc)
class temperature_profile(object):
def __init__(self):
self.depth = []
self.depth_unit = None
self.temp = []
self.temp_unit = None
def __str__(self):
return "-----temperature profile-----\ndepth={} {}\ntemp={} {}".format(self.depth, self.depth_unit, self.temp,
self.temp_unit)
class density_profile(object):
def __init__(self):
self.depth = []
self.depth_unit = None
self.thickness = []
self.thickness_unit = None
self.density = []
self.density_unit = None
def __str__(self):
return "-----density profile-----\ndepth={} {}\nthickness={} {}\ndensity={} {}".format(self.depth,
self.depth_unit,
self.density_unit)
class sample_profile(object):
def __init__(self):
self.layer_top = []
self.layer_bot = []
self.depth_unit = None
self.names = []
self.values = []
self.values_units = None
class metadata(object):
def __init__(self):
self.date = None
self.time = None
self.operation = None
self.observer = None
self.profile_depth = None
self.profile_depth_unit = None
self.location_description = None
self.srsName = None
self.east = None
self.east_unit = None
self.north = None
self.north_unit = None
self.elevation = None
self.elevation_unit = None
self.sky_condition = None
self.precipitation = None
self.air_temperature = None
self.air_temperature_unit = None
self.windspeed = None
self.windspeed_unit = None
self.winddir=None
self.comments = None
def __str__(self):
return "-----metadata-----\ndate={}\noperation={}\nobserver={}\nprofile depth={} {}\nlocation description={}\nsrs name={}\nE={}\nN={}\nelevation={} {}\nsky condition={}\nprecipitation={}\nair temperature={} {}\nwindspeed={} {}\ncomments={}".format(
self.date, self.operation, self.observer, self.profile_depth, self.profile_depth_unit,
self.location_description, self.srsName, self.east, self.north, self.elevation, self.elevation_unit,
self.sky_condition, self.precipitation, self.air_temperature, self.air_temperature_unit, self.windspeed,
self.windspeed_unit, self.comments)
class Snowpit(object):
# try to modify the snowpit class to use medata, layers and profile as class object
def __init__(self):
self.snowflakeDICT = snowflake_symbol_dict
self.caaml_file = None
self.sample_file = None
self.metadata = metadata()
self.temperature_profile = temperature_profile()
self.density_profile = density_profile()
self.sample_profile = sample_profile()
self.table = pd.DataFrame()
self.layers = None
self.units = None
self.layers_top = None
self.layers_bot = None
def _extract_layers(self):
# Function to reoganize layer data
self.layers_bot = np.zeros(self.layers.__len__()) * np.nan
self.layers_top = self.layers_bot * np.nan
self.layers_hardness_ram = self.layers_bot * np.nan
self.layers_hardness_index = self.layers_bot * np.nan
self.layers_grainSize_mean = self.layers_top * np.nan
self.layers_grainSize_max = self.layers_top * np.nan
self.layers_id = self.layers_top * np.nan
self.layers_grainType1 = np.empty(self.layers.__len__(), dtype=object)
self.layers_grainType2 = np.empty(self.layers.__len__(), dtype=object)
self.layers_grainType3 = np.empty(self.layers.__len__(), dtype=object)
for i, layer in enumerate(self.layers):
print('layer # ' + str(i))
print(layer.__dict__)
self.layers_bot[i] = layer.dbot
self.layers_top[i] = layer.dtop
self.layers_hardness_index[i] = sfd.hardness_dict.get(layer.hardness)
try:
self.layers_hardness_ram[i] = 19.3 * self.layers_hardness_index[i] ** 2.4
except:
print('WARNING: no hardness data')
self.layers_grainSize_mean[i] = layer.grainSize_mean
self.layers_grainSize_max[i] = layer.grainSize_max
self.layers_id[i] = layer.id
self.layers_grainType1[i] = layer.grain_type1
self.layers_grainType2[i] = layer.grain_type2
self.layers_grainType3[i] = layer.grain_type3
def import_caamlv6(self):
# Load metadata
self.metadata = cxv6.get_metadata(self.caaml_file)
# load temperature profile
self.temperature_profile = cxv6.get_temperature(self.caaml_file)
# load density profile
self.density_profile = cxv6.get_density(self.caaml_file)
# load layers
self.layers = cxv6.get_layers(self.caaml_file)
self._extract_layers()
def import_sample_csv(self):
self.sample_profile.df = pd.read_csv(self.samples_file)
self.sample_profile.layer_top = self.sample_profile.df.height_top
self.sample_profile.layer_bot = self.sample_profile.df.height_bot
self.sample_profile.names = self.sample_profile.df.columns[2:]
def plot(self, save=False, fig_fname=self.caaml_file.split('/')[-1][0:-4] + '.png',metadata=False, invert_depth=False,figsize=(8,4), dpi=150,
plot_order=['temperature', 'density', 'crystal size',
'stratigraphy', 'hardness',
'sample names', 'dD', 'd18O', 'd-ex']):
fig = plt.figure(figsize=figsize, dpi=dpi)
if metadata:
my_rowspan = 3
else:
my_rowspan = 4
# ===========================================================
# Automatically adjust summary plot based on data available
ncol = plot_order.__len__()
if ncol == 1:
ax1 = plt.subplot2grid((4, ncol), (0, ncol - 1), rowspan=my_rowspan)
self.axs_list = [ax1]
if ncol >= 2:
ax1 = plt.subplot2grid((4, ncol), (0, 0), rowspan=my_rowspan)
self.axs_list = []
self.axs_list.append(ax1)
for n in range(1, ncol):
ax = plt.subplot2grid((4, ncol), (0, n), rowspan=my_rowspan, sharey=ax1)
self.axs_list.append(ax)
print(self.axs_list)
def to_plot(plot_order=plot_order):
# function to plot plots based on the order indicated in plots_order
plots_dict = {'temperature': plot_temperature,
'density': plot_density,
'stratigraphy': plot_stratigraphy,
'hardness': plot_hardness,
'crystal size': plot_crystalSize,
'sample_name': plot_sample_names,
'dD': plot_dD,
'd18O': plot_d18O,
'd-ex': plot_d_ex}
for i, axs in enumerate(self.axis_list):
plots_dict.get(plot_order[i])(axs)
def plot_dD(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
im = ax.step(np.append(self.sample_profile.df.dD.values[0], self.sample_profile.df.dD.values),
np.append(self.sample_profile.df.height_top.values,0), where='post')
ax.set_title("dD ($^{o}/_{oo}$)")
xlim = ax.get_xlim()
# Add grid following the layering
ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
np.repeat(xlim[0], self.layers_top.__len__()),
color='w', alpha=0.2, edgecolor='k', linewidth=0.5, linestyle=':')
ax.set_xlim(xlim)
#ax.grid(axis='x', linewidth=0.5, linestyle=':')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_d18O(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
im = ax.step(np.append(self.sample_profile.df.d18O.values[0], self.sample_profile.df.d18O.values),
np.append(self.sample_profile.df.height_top.values, 0), where='post', color='#d62728')
ax.set_title("d18O ($^{o}/_{oo}$)")
xlim = ax.get_xlim()
# Add shading for the ice type of sample sample
ax.barh(
self.sample_profile.layer_bot - (self.sample_profile.layer_bot - self.sample_profile.layer_top) / 2,
np.repeat(xlim[1] - xlim[0], self.sample_profile.layer_top.__len__()), - (self.sample_profile.layer_bot - self.sample_profile.layer_top),
np.repeat(xlim[0], self.sample_profile.layer_top.__len__()),
color=cm.bone(pd.Categorical(self.sample_profile.df.ice_type).codes), alpha=0.2)
# Add grid following the layering
ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
np.repeat(xlim[0], self.layers_top.__len__()),
color='w', alpha=0.2, edgecolor='k', linewidth=0.5, linestyle=':')
ax.set_xlim(xlim)
ax.grid(axis='x', linewidth=0.5, linestyle=':')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_d_ex(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
im = ax.step(np.append(self.isotope_profile.df.dxs.values[0], self.isotope_profile.df.dxs.values),
np.append(self.isotope_profile.df.height_top.values, 0), where='post', color='#2ca02c')
ax.set_title("d-excess ($^{o}/_{oo}$)")
xlim = ax.get_xlim()
# Add grid following the layering
ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
np.repeat(xlim[0], self.layers_top.__len__()),
color='w', alpha=0.2, edgecolor='k', linewidth=0.5, linestyle=':')
ax.set_xlim(xlim)
ax.grid(axis='x', linewidth=0.5, linestyle=':')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_density(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
im = ax.plot(self.density_profile.density, self.density_profile.depth)
xlim = ax.get_xlim()
# Add grid following the layering
ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
np.repeat(xlim[0], self.layers_top.__len__()),
color='w', alpha=0.2, edgecolor='k', linewidth=0.5, linestyle=':')
ax.set_xlim(xlim)
ax.grid(axis='x', linewidth=0.5, linestyle=':')
ax.set_title("Density")
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_temperature(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
im = ax.plot(self.temperature_profile.temp, self.temperature_profile.depth)
xlim = ax.get_xlim()
# # Add grid following the layering
ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
np.repeat(xlim[0], self.layers_top.__len__()),
color='w', alpha=0.2, edgecolor='k', linewidth=0.5, linestyle=':')
ax.set_xlim(xlim)
ax.set_title("Temperature ($^\circ$C)")
ax.grid(axis='x', linestyle=':', linewidth=0.5)
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_stratigraphy(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
plt.setp(ax.get_xticklabels(), visible=False)
im2 = ax.barh(self.layers_bot-(self.layers_bot-self.layers_top)/2,
np.repeat(1, self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
color=cm.Blues(self.layers_hardness_index / 6), edgecolor='k', linewidth=0.5)
#edgecolor='k', linewidth=0.5)
ax.set_xlim(0, 1)
# include sample name on pit face
# for i, sample in enumerate(self.sample_name):
# include snowflake symbols
for i, flake in enumerate(self.layers_grainType1.astype(str)):
if flake == 'nan':
flake = None
if flake != None:
if snowflake_symbol_dict.get(flake) != None:
im = plt.imread(path2snowflake + snowflake_symbol_dict.get(flake))
im[im == 0] = np.nan
imagebox = OffsetImage(im, zoom=.01)
if (self.layers_grainType2[i] is None) and (self.layers_grainType3[i] is None):
hloc = 0.5
elif (self.layers_grainType2[i] != None) and (self.layers_grainType3[i] is None):
hloc = 0.33
else:
hloc = 0.25
xy = [hloc,
((self.layers_top[i] - self.layers_bot[i]) / 2 + self.layers_bot[i])] # coordinates to position this image
ab = AnnotationBbox(imagebox, xy, xycoords='data', boxcoords='data', frameon=False)
ax.add_artist(ab)
else:
print('WARNING: [' + flake + '] is not a compatible snowflake type. Check spelling!')
for i, flake in enumerate(self.layers_grainType2.astype(str)):
if flake == 'nan':
flake = None
if flake is not None:
if snowflake_symbol_dict.get(flake) != None:
im = plt.imread(path2snowflake + snowflake_symbol_dict.get(flake))
im[im == 0] = np.nan
imagebox = OffsetImage(im, zoom=.01)
if (self.layers_grainType2[i] != None) and (self.layers_grainType3[i] is None):
hloc2 = 0.66
else:
hloc2 = 0.5
xy = [hloc2,
((self.layers_top[i] - self.layers_bot[i]) / 2 + self.layers_bot[i])] # coordinates to position this image
ab = AnnotationBbox(imagebox, xy, xycoords='data', boxcoords='data', frameon=False)
ax.add_artist(ab)
else:
print('WARNING: [' + flake + '] is not a compatible snowflake type. Check spelling!')
for i, flake in enumerate(self.layers_grainType3.astype(str)):
if flake == 'nan':
flake = None
if flake != None:
if snowflake_symbol_dict.get(flake) != None:
im = plt.imread(path2snowflake + snowflake_symbol_dict.get(flake))
im[im == 0] = np.nan
imagebox = OffsetImage(im, zoom=.01)
xy = [0.75,
((self.layers_top[i] - self.layers_bot[i]) / 2 + self.layers_bot[i])] # coordinates to position this image
ab = AnnotationBbox(imagebox, xy, xycoords='data', boxcoords='data', frameon=False)
ax.add_artist(ab)
else:
print('WARNING: [' + flake + '] is not a compatible snowflake type. Check spelling!')
ax.set_title("Stratigraphy")
return im2
def plot_hardness(ax):
plt.setp(ax.get_yticklabels(), visible=False)
# Add grid following the layering
im = ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2, self.layers_hardness_index,
self.layers_bot - self.layers_top, color=cm.Blues(self.layers_hardness_index / 6), edgecolor='k',
linewidth=0.5)
ax.set_xlim(0, 7)
ax.set_title("Hardness")
labels_ax = ['', 'Fist', '4F', '1F', 'P', 'K', 'I']
ax.set_xticklabels(labels_ax, rotation=45)
ax.xaxis.set_major_locator(MaxNLocator(integer=True, prune='upper'))
return im
def plot_crystalSize(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
im = ax.barh(self.layers_bot-(self.layers_bot-self.layers_top)/2, self.layers_grainSize_max-self.layers_grainSize_mean, 1, self.layers_grainSize_mean)
xlim = ax.get_xlim()
ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
np.repeat(xlim[0], self.layers_top.__len__()),
color='w', alpha=0.2, edgecolor='k', linewidth=0.5, linestyle=':')
ax.xaxis.set_ticks([0, 0.1, 0.2, 0.5, 1, 1.5, 2, 3, 4, 5, 10, 15, 20, 25, 30, 35, 40])
ax.set_xlim(xlim)
ax.set_title("Crystal size (mm)")
ax.grid(axis='x', linewidth=0.5, linestyle=':')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_sample_names(ax):
# add here code for plotting column of sample names
ax.set_xlim([0,1])
for i, name in enumerate(self.sample_profile.sample_name.astype(str)):
if name != 'nan':
ax.text(0.5, self.sample_profile.depth[i], name,
bbox={'facecolor':'red', 'edgecolor':'none', 'alpha':0.5, 'pad':1},fontsize=5)
xlim = ax.get_xlim()
ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
np.repeat(xlim[0], self.layers_top.__len__()),
color='w', alpha=0.2, edgecolor='k', linewidth=0.5, linestyle=':')
ax.set_xlim(xlim)
ax.set_title("Sample Name")
plt.setp(ax.get_xticklabels(), visible=False)
if metadata:
metadata_text = "Date: " + p.metadata.date + '; Time [24hr]: ' + '\n' + \
"Observer: " + p.metadata.observer + '\n' + \
"Location description: " + p.metadata.location_description + '\n' + \
"East : " + str(p.metadata.east) + ' ' + \
"North: " + str(p.metadata.north) + ' ' + \
"Elevation: " + str(p.metadata.elevation) + ' ' + p.metadata.elevation_unit + '\n' + \
"Air temperature: " + str(p.metadata.air_temperature) + '$^{\circ}C$' '\n'
plt.figtext(0.08, 0.12 , metadata_text,
horizontalalignment='left',
verticalalignment='center', wrap=True, fontsize=4)
to_plot(plots_order)
if invert_depth:
fig.gca().invert_yaxis()
plt.tight_layout()
plt.subplots_adjust(wspace=0)
if save == True:
fig.savefig(fig_fname)
print('Figure saved as ' + fig_fname)
def print_metadata(self):
print('Not implemented [print_metadata()]')
def print_layers(self):
print('Not implemented [print_layers()]')
#
#
| ArcticSnow/snowpyt | build/lib/snowpyt/pit_class.py | Python | mit | 23,226 | [
"CRYSTAL"
] | b047a142ca89652d2c20973046d19a814f1012280eef87522bcad8fec9068d43 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Bibauthorid HTML templates"""
# pylint: disable=W0105
# pylint: disable=C0301
# from cgi import escape
# from urllib import quote
#
import invenio.bibauthorid_config as bconfig
from invenio.config import CFG_SITE_LANG
from invenio.config import CFG_SITE_URL, CFG_SITE_SECURE_URL, CFG_BASE_URL
from invenio.config import CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL, CFG_WEBAUTHORPROFILE_CFG_HEPNAMES_EMAIL
from invenio.bibformat import format_record
from invenio.session import get_session
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import PERSONID_EXTERNAL_IDENTIFIER_MAP, CREATE_NEW_PERSON
from invenio.bibauthorid_webapi import get_person_redirect_link, get_canonical_id_from_person_id, \
get_person_names_from_id, get_person_info_by_pid
from invenio.bibauthorid_frontinterface import get_uid_of_author
from invenio.bibauthorid_frontinterface import get_bibrefrec_name_string
from invenio.bibauthorid_frontinterface import get_canonical_name_of_author
from invenio.messages import gettext_set_language, wash_language
from invenio.webuser import get_email
from invenio.htmlutils import escape_html
from jinja2 import DictLoader, Environment
# from invenio.textutils import encode_for_xml
class WebProfileMenu():
def get_menu_items(self):
return self.menu
def _set_is_owner(self, is_owner):
if isinstance(is_owner, bool):
self.owner = is_owner
def _set_is_admin(self, is_admin):
if isinstance(is_admin, bool):
self.is_admin = is_admin
def _set_canonical_name(self, canonical_name):
if isinstance(canonical_name, str):
self.canonical_name = canonical_name
def _configure_localisation(self, ln):
self.localise = gettext_set_language(ln)
def _set_active_menu_item(self, current_page):
for item in self.menu:
if item['page'] == current_page:
item['active'] = True
def _get_standard_menu_items(self):
personalise = ""
if self.owner:
personalise = "Your "
menu = [
{
'page': "profile",
'text': "%s" % self.localise("View %sProfile" % personalise),
"static": False,
"active": False,
"canonical_name": self.canonical_name,
"disabled": self.canonical_name is ""
},
{
'page': "manage_profile",
'text': "%s" % self.localise("Manage %sProfile" % personalise),
'static': False,
'active': False,
"canonical_name": self.canonical_name,
"disabled": self.canonical_name is ""
},
{
'page': "claim",
'text': "%s" % self.localise("Manage %sPublications" % personalise),
'static': False,
'active': False,
"canonical_name": self.canonical_name,
"disabled": self.canonical_name is ""
},
{
'page': "search",
'text': "%s" % self.localise("Search Profiles"),
'static': True,
'active': False
},
{
'page': "help",
'text': "%s" % self.localise("Help"),
'static': True,
'active': False,
}
]
return menu
def _get_admin_menu_items(self):
admin_menu_items = self._get_standard_menu_items()
open_tickets_item = {
'page': "claim/tickets_admin",
'text': "%s" % self.localise("Open Tickets"),
'static': True,
'active': False
}
admin_menu_items.append(open_tickets_item)
return list(admin_menu_items)
def _create_menu(self, current_page):
if self.is_admin:
self.menu = self._get_admin_menu_items()
else:
self.menu = self._get_standard_menu_items()
self._set_active_menu_item(current_page)
def __init__(self, canonical_name, current_page, ln, is_owner=False, is_admin=False):
self._configure_localisation(ln)
self._set_canonical_name(canonical_name)
self._set_is_owner(is_owner)
self._set_is_admin(is_admin)
self._create_menu(current_page)
class WebProfilePage():
def __init__(self, page, heading, no_cache=False):
self.css_dir = CFG_BASE_URL + "/img"
self.img_dir = CFG_BASE_URL + "/img"
self.scripts_dir = CFG_BASE_URL + "/js"
self.url = CFG_BASE_URL + "/author"
self.scripts = [
"jquery-ui.min.js",
"jquery.form.js",
"jquery.dataTables.min.js",
"jquery-lightbox/js/jquery.lightbox-0.5.js",
"jquery.omniwindow.js",
# "jquery.blockUI.js",
"spin.min.js",
"sly.min.js",
"parsley.js",
"bootstrap.min.js",
"underscore-min.js",
"backbone.js",
"handlebars.js",
"bibauthorid.js",
"webauthorprofile.js",
]
self.stylesheets = [
"jquery-ui/themes/smoothness/jquery-ui.css",
"datatables_jquery-ui.css",
"bibauthorid.css",
"bootstrap.min.css"
]
self.template_files = {
'head.html': """\
{% if no_cache %}
<!-- Meta -->
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Cache-Control" CONTENT="no-cache">
<META HTTP-EQUIV="Pragma-directive" CONTENT="no-cache">
<META HTTP-EQUIV="Cache-Directive" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="0">
{% endif %}
{% if scripts %}
<!-- Scripts -->
{% for item in scripts %}<script type="text/javascript" src="{{ scripts_dir }}/{{ item }}"></script>
{% endfor %}
{% endif %}
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
tex2jax: {inlineMath: [['$','$']],
processEscapes: true},
showProcessingMessages: false,
messageStyle: "none"
});
</script>
<script src="/MathJax/MathJax.js?config=TeX-AMS_HTML" type="text/javascript">
</script>
{% if stylesheets %}
<!-- Stylesheets -->
{% for item in stylesheets %}<link rel="stylesheet" type="text/css" href="{{ css_dir }}/{{ item }}" />
{% endfor %}
{% endif %}
""",
'profile_menu.html': """
<span class="bsw"><ul id="authorid_menu" class="nav nav-pills">
{% for item in menu %}\
<li{{ ' class="active"' if item.active }}{{ ' class="disabled"' if item.disabled }}>
<a href="{{ url }}/{{ item.page }}{% if not item.static %}/{{ item.canonical_name }}{% endif %}">{{ item.text }}</a>
</li>
{% endfor %}
</ul></span>
""",
'index.html': """\
{% if bootstrap %}<div class="hidden" id="jsbootstrap">{{ bootstrap|e }}</div>{% endif %}
<div class="ow-overlay ow-closed"></div>
<span class="bsw">
{% if debug %}{% include 'debug_block.html' %}{% endif %}
<div id="person_name"><h1 class="authornametitle">{{ title }}</h1></div>
<div id="person_menu">\
{% if menu %}{% include 'profile_menu.html' %}{% endif %}
</div>
<div id="bai_content">
{% block content %}{% endblock %}
</div>
</span>
""",
'generic_wrapper.html': """
{% extends "index.html" %}
{% block content %}
{{ html|safe }}
{% endblock%}
""",
'debug_block.html': """
<div id="debug_info"><span class="bsw">
<table class="table table-striped">
<caption><h2>Debug Information</h2></caption>
<thead>
<tr>
<th>Key</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for key, value in debug.iteritems() %}
<tr>
<td>{{ key|e}}</td>
<td>{{ value|e }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</span></div>
"""
}
self._initialise_class_variables()
self.no_cache = no_cache
self.heading = heading
self.page = page
self.bootstrap_data = None
self.loader = DictLoader(self.template_files)
self.environment = Environment(loader=self.loader)
def _initialise_class_variables(self):
self.menu = None
self.debug = None
def create_profile_menu(self, canonical_name, ln, is_owner=False, is_admin=False):
menu = WebProfileMenu(canonical_name, self.page, ln, is_owner, is_admin)
self.menu = menu.get_menu_items()
def add_profile_menu(self, menu):
self.menu = menu.get_menu_items()
def add_debug_info(self, debug):
self.debug = debug
def add_bootstrapped_data(self, data):
self.bootstrap_data = data
def get_head(self):
if self.page.lower() != 'profile' and "webauthorprofile.js" in self.scripts:
self.scripts.remove("webauthorprofile.js")
return self.environment.get_template("head.html").render({
'no_cache': self.no_cache,
'scripts': self.scripts,
'stylesheets': self.stylesheets,
'css_dir': self.css_dir,
'scripts_dir': self.scripts_dir
})
def get_body(self):
return self.environment.get_template("index.html").render({
'title': self.heading,
'menu': self.menu,
'url': self.url,
'debug': self.debug,
'bootstrap': self.bootstrap_data
})
def get_wrapped_body(self, content):
return self.environment.get_template("generic_wrapper.html").render({
'title': self.heading,
'menu': self.menu,
'url': self.url,
'html': content,
'debug': self.debug,
'bootstrap': self.bootstrap_data
})
import xml.sax.saxutils
class Template:
"""Templating functions used by aid"""
# Class level variable for profile menu bar
DEFAULT_PROFILE_MENU_ITEMS = [
("/author/profile/","View Profile",False),
("/author/manage_profile/","Manage Profile",False),
("/author/claim/","Manage Publications",False),
("/author/profile/","Help",True)
]
def __init__(self, language=CFG_SITE_LANG):
"""Set defaults for all aid template output"""
self.language = language
self._ = gettext_set_language(wash_language(language))
def tmpl_person_detail_layout(self, content):
'''
writes HTML content into the person css container
@param content: HTML content
@type content: string
@return: HTML code
@rtype: string
'''
html = []
h = html.append
h('<div id="aid_person">')
h(content)
h('</div>')
return "\n".join(html)
def tmpl_transaction_box(self, teaser_key, messages, show_close_btn=True):
'''
Creates a notification box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param messages: list of keys to a dict which return the message to display in the box
@type messages: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
transaction_teaser_dict = { 'success': 'Success!',
'failure': 'Failure!' }
transaction_message_dict = { 'confirm_success': '%s transaction%s successfully executed.',
'confirm_failure': '%s transaction%s failed. The system may have been updating during your operation. Please try again or contact %s to obtain help.',
'confirm_operation': '%s transaction%s successfully ticketized.',
'reject_success': '%s transaction%s successfully executed.',
'reject_failure': '%s transaction%s failed. The system may have been updating during your operation. Please try again or contact %s to obtain help.',
'reset_success': '%s transaction%s successfully executed.',
'reset_failure': '%s transaction%s failed. The system may have been updating during your operation. Please try again or contact %s to obtain help.' }
teaser = self._(transaction_teaser_dict[teaser_key])
html = []
h = html.append
for key in transaction_message_dict.keys():
same_kind = [mes for mes in messages if mes == key]
trans_no = len(same_kind)
if trans_no == 0:
continue
elif trans_no == 1:
args = [trans_no, '']
else:
args = [trans_no, 's']
color = ''
if teaser_key == 'failure':
color = 'background: #FFC2C2;'
args.append(CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL)
message = self._(transaction_message_dict[key] % tuple(args))
h('<div id="aid_notification_' + key + '" class="ui-widget ui-alert">')
h(' <div style="%s margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">' % (color))
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify" style="border-style: none;">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_merge_transaction_box(self, teaser_key, messages, show_close_btn=True):
'''
Creates a notification box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param messages: list of keys to a dict which return the message to display in the box
@type messages: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
transaction_teaser_dict = { 'success': 'Success!',
'failure': 'Failure!' }
transaction_message_dict = { 'confirm_success': '%s merge transaction%s successfully executed.',
'confirm_failure': '%s merge transaction%s failed. This happened because there is at least one profile in the merging list that is either connected to a user or it has claimed papers.'
' Please edit the list accordingly.',
'confirm_operation': '%s merge transaction%s successfully ticketized.'}
teaser = self._(transaction_teaser_dict[teaser_key])
html = []
h = html.append
for key in transaction_message_dict.keys():
same_kind = [mes for mes in messages if mes == key]
trans_no = len(same_kind)
if trans_no == 0:
continue
elif trans_no == 1:
args = [trans_no, '']
else:
args = [trans_no, 's']
color = ''
if teaser_key == 'failure':
color = 'background: #FFC2C2;'
message = self._(transaction_message_dict[key] % tuple(args))
h('<div id="aid_notification_' + key + '" class="ui-widget ui-alert">')
h(' <div style="%s margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">' % (color))
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify" style="border-style: none;">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_login_transaction_box(self, teaser_key, messages, show_close_btn=True):
'''
Creates a notification box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param messages: list of keys to a dict which return the message to display in the box
@type messages: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
transaction_teaser_dict = { 'success': 'Success!',
'failure': 'Failure!' }
transaction_message_dict = { 'confirm_success': 'You are now connected to Inspire through arXiv.'}
teaser = self._(transaction_teaser_dict[teaser_key])
html = []
h = html.append
for key in transaction_message_dict.keys():
same_kind = [mes for mes in messages if mes == key]
trans_no = len(same_kind)
color = ''
if teaser_key == 'failure':
color = 'background: #FFC2C2;'
message = self._(transaction_message_dict[key])
h('<div id="aid_notification_' + key + '" class="ui-widget ui-alert">')
h(' <div style="%s margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">' % (color))
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify" style="border-style: none;">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_notification_box(self, teaser_key, message_key, bibrefs, show_close_btn=True):
'''
Creates a notification box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param bibrefs: bibrefs which are about to be assigned
@type bibrefs: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
notification_teaser_dict = {'info': 'Info!' }
notification_message_dict = {'attribute_papers': 'You are about to assign the following paper%s:' }
teaser = self._(notification_teaser_dict[teaser_key])
arg = ''
if len(bibrefs) > 1:
arg = 's'
message = self._(notification_message_dict[message_key] % (arg))
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
h("<ul>")
for paper in bibrefs:
if ',' in paper:
pbibrec = paper.split(',')[1]
else:
pbibrec = paper
h("<li>%s</li>" % (format_record(int(pbibrec), "ha")))
h("</ul>")
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_error_box(self, teaser_key, message_key, show_close_btn=True):
'''
Creates an error box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
error_teaser_dict = {'sorry': 'Sorry.',
'error': 'Error:' }
error_message_dict = {'check_entries': 'Please check your entries.',
'provide_transaction': 'Please provide at least one transaction.' }
teaser = self._(error_teaser_dict[teaser_key])
message = self._(error_message_dict[message_key])
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="background: #FFC2C2; margin-top: 20px; padding: 0pt 0.7em; color:#000000;" class="ui-state-error ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-alert"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"> ')
h('<a rel="nofollow" href="#" style="color: #000000; border: 1px #000000 solid;" class="aid_close-notify">X</a></span>')
h('</p> </div>')
h('</div>')
return "\n".join(html)
def tmpl_ticket_box(self, teaser_key, message_key, trans_no, show_close_btn=True):
'''
Creates a semi-permanent box informing about ticket
status notifications
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param trans_no: number of transactions in progress
@type trans_no: integer
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
ticket_teaser_dict = {'in_process': 'Claim in process!' }
ticket_message_dict = {'transaction': 'There %s %s transaction%s in progress.' }
teaser = self._(ticket_teaser_dict[teaser_key])
if trans_no == 1:
args = ['is', trans_no, '']
else:
args = ['are', trans_no, 's']
message = self._(ticket_message_dict[message_key] % tuple(args))
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s ' % (teaser, message))
h('<a rel="nofollow" id="checkout" href="%s/author/claim/action?checkout=True">' % (CFG_SITE_URL,) + self._('Click here to review the transactions.') + '</a>')
h('<br>')
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_search_ticket_box(self, teaser_key, message_key, bibrefs, show_close_btn=False):
'''
Creates a box informing about a claim in progress for
the search.
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param bibrefs: bibrefs which are about to be assigned
@type bibrefs: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
error_teaser_dict = {'person_search': 'Person search for assignment in progress!' }
error_message_dict = {'assign_papers': 'You are searching for a person to assign the following paper%s:' }
teaser = self._(error_teaser_dict[teaser_key])
arg = ''
if len(bibrefs) > 1:
arg = 's'
message = self._(error_message_dict[message_key] % (arg))
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s ' % (teaser, message))
h("<ul>")
for paper in bibrefs:
if ',' in paper:
pbibrec = paper.split(',')[1]
else:
pbibrec = paper
h("<li>%s</li>"
% (format_record(int(pbibrec), "ha")))
h("</ul>")
h('<a rel="nofollow" id="checkout" href="%s/author/claim/action?cancel_search_ticket=True">' % (CFG_SITE_URL,) + self._('Quit searching.') + '</a>')
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify">X</a></span></p>')
h(' </div>')
h('</div>')
h('<p> </p>')
return "\n".join(html)
def tmpl_merge_ticket_box(self, teaser_key, message_key, primary_cname):
message = self._('When you merge a set of profiles, all the information stored will be assigned to the primary profile. This includes papers, ids or citations.'
' After merging, only the primary profile will remain in the system, all other profiles will be automatically deleted.</br>')
error_teaser_dict = {'person_search': message }
error_message_dict = {'merge_profiles': 'You are about to merge the following profiles:' }
teaser = self._(error_teaser_dict[teaser_key])
message = self._(error_message_dict[message_key])
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> </br>%s ' % (teaser, message))
h("<table id=\"mergeList\" >\
<tr></tr>\
<th></th>\
<th></th>\
<th></th>\
<th></th>\
<tr></tr>")
h("<tr><td></td><td><a id=\"primaryProfile\" href='%s/author/profile/%s'target='_blank'>%s</a></td><td id=\"primaryProfileTd\">primary profile</td><td></td></tr>"
% (CFG_SITE_URL, primary_cname, primary_cname))
# for profile in profiles:
# h("<li><a href='%s'target='_blank' class=\"profile\" >%s</a><a class=\"setPrimaryProfile\">Set as primary</a> <a class=\"removeProfile\">Remove</a></li>"
# % (profile, profile))
h("</table>")
h('<div id="mergeListButtonWrapper">')
h('<form action="%s/author/claim/action" method="get"><input type="hidden" name="cancel_merging" value="True" /> <input type="hidden" name="primary_profile" value="%s" /> <input type="submit" id="cancelMergeButton" class="aid_btn_red" value="%s" /></form>' %
(CFG_SITE_URL, primary_cname, self._('Cancel merging')))
h('<form action="%s/author/claim/action" method="get"><input type="hidden" name="merge" value="True" /><input type="submit" id="mergeButton" class="aid_btn_green" value="%s" /></form>' %
(CFG_SITE_URL, self._('Merge profiles')))
h(' </div>')
h(' </div>')
h('</div>')
h('<p> </p>')
return "\n".join(html)
def tmpl_meta_includes(self, kill_browser_cache=False):
'''
Generates HTML code for the header section of the document
META tags to kill browser caching
Javascript includes
CSS definitions
@param kill_browser_cache: Do we want to kill the browser cache?
@type kill_browser_cache: boolean
'''
js_path = "%s/js" % CFG_SITE_URL
imgcss_path = "%s/img" % CFG_SITE_URL
result = []
# Add browser cache killer, hence some notifications are not displayed
# out of the session.
if kill_browser_cache:
result = [
'<META HTTP-EQUIV="Pragma" CONTENT="no-cache">',
'<META HTTP-EQUIV="Cache-Control" CONTENT="no-cache">',
'<META HTTP-EQUIV="Pragma-directive" CONTENT="no-cache">',
'<META HTTP-EQUIV="Cache-Directive" CONTENT="no-cache">',
'<META HTTP-EQUIV="Expires" CONTENT="0">']
scripts = ["jquery-ui.min.js",
"jquery.form.js",
"jquery.dataTables.min.js",
"bibauthorid.js",
"bootstrap.min.js"]
result.append('<link rel="stylesheet" type="text/css" href='
'"%s/jquery-ui/themes/smoothness/jquery-ui.css" />'
% (imgcss_path))
result.append('<link rel="stylesheet" type="text/css" href='
'"%s/datatables_jquery-ui.css" />'
% (imgcss_path))
result.append('<link rel="stylesheet" type="text/css" href='
'"%s/bibauthorid.css" />'
% (imgcss_path))
result.append('<link rel="stylesheet" type="text/css" href='
'"%s/bootstrap.min.css" />'
% (imgcss_path))
for script in scripts:
result.append('<script type="text/javascript" src="%s/%s">'
'</script>' % (js_path, script))
return "\n".join(result)
def tmpl_author_confirmed(self, bibref, pid, verbiage_dict={'alt_confirm':'Confirmed.',
'confirm_text':'This record assignment has been confirmed.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Repeal!',
'repeal_text':'Repeal record assignment',
'to_other_text':'Assign to another person',
'alt_to_other':'To other person!'
},
show_reset_button=True):
'''
Generate play per-paper links for the table for the
status "confirmed"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param verbiage_dict: language for the link descriptions
@type verbiage_dict: dict
'''
stri = ('<!--2!--><span id="aid_status_details"> '
'<img src="%(url)s/img/aid_check.png" alt="%(alt_confirm)s" />'
'%(confirm_text)s <br>')
if show_reset_button:
stri = stri + (
'<a rel="nofollow" id="aid_reset_gr" class="aid_grey op_action" href="%(url)s/author/claim/action?reset=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_reset_gray.png" alt="%(alt_forget)s" style="margin-left:22px;" />'
'%(forget_text)s</a><br>')
stri = stri + (
'<a rel="nofollow" id="aid_repeal" class="aid_grey op_action" href="%(url)s/author/claim/action?repeal=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_reject_gray.png" alt="%(alt_repeal)s" style="margin-left:22px;"/>'
'%(repeal_text)s</a><br>'
'<a rel="nofollow" id="aid_to_other" class="aid_grey op_action" href="%(url)s/author/claim/action?to_other_person=True&selection=%(ref)s">'
'<img src="%(url)s/img/aid_to_other_gray.png" alt="%(alt_to_other)s" style="margin-left:22px;"/>'
'%(to_other_text)s</a> </span>')
return (stri
% ({'url': CFG_SITE_URL, 'ref': bibref, 'pid': pid,
'alt_confirm':verbiage_dict['alt_confirm'],
'confirm_text':verbiage_dict['confirm_text'],
'alt_forget':verbiage_dict['alt_forget'],
'forget_text':verbiage_dict['forget_text'],
'alt_repeal':verbiage_dict['alt_repeal'],
'repeal_text':verbiage_dict['repeal_text'],
'to_other_text':verbiage_dict['to_other_text'],
'alt_to_other':verbiage_dict['alt_to_other']}))
def tmpl_author_repealed(self, bibref, pid, verbiage_dict={'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Rejected!',
'repeal_text':'Repeal this record assignment.',
'to_other_text':'Assign to another person',
'alt_to_other':'To other person!'
}):
'''
Generate play per-paper links for the table for the
status "repealed"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param verbiage_dict: language for the link descriptions
@type verbiage_dict: dict
'''
stri = ('<!---2!--><span id="aid_status_details"> '
'<img src="%(url)s/img/aid_reject.png" alt="%(alt_repeal)s" />'
'%(repeal_text)s <br>'
'<a rel="nofollow" id="aid_confirm" class="aid_grey op_action" href="%(url)s/author/claim/action?confirm=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_check_gray.png" alt="%(alt_confirm)s" style="margin-left: 22px;" />'
'%(confirm_text)s</a><br>'
'<a rel="nofollow" id="aid_to_other" class="aid_grey op_action" href="%(url)s/author/claim/action?to_other_person=True&selection=%(ref)s">'
'<img src="%(url)s/img/aid_to_other_gray.png" alt="%(alt_to_other)s" style="margin-left:22px;"/>'
'%(to_other_text)s</a> </span>')
return (stri
% ({'url': CFG_SITE_URL, 'ref': bibref, 'pid': pid,
'alt_confirm':verbiage_dict['alt_confirm'],
'confirm_text':verbiage_dict['confirm_text'],
'alt_forget':verbiage_dict['alt_forget'],
'forget_text':verbiage_dict['forget_text'],
'alt_repeal':verbiage_dict['alt_repeal'],
'repeal_text':verbiage_dict['repeal_text'],
'to_other_text':verbiage_dict['to_other_text'],
'alt_to_other':verbiage_dict['alt_to_other']}))
def tmpl_author_undecided(self, bibref, pid, verbiage_dict={'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_repeal':'Rejected!',
'repeal_text':'This record has been repealed.',
'to_other_text':'Assign to another person',
'alt_to_other':'To other person!'
}):
'''
Generate play per-paper links for the table for the
status "no decision taken yet"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param verbiage_dict: language for the link descriptions
@type verbiage_dict: dict
'''
# batchprocess?mconfirm=True&bibrefs=['100:17,16']&pid=1
string = ('<!--0!--><span id="aid_status_details"> '
'<a rel="nofollow" id="aid_confirm" class="op_action" href="%(url)s/author/claim/action?confirm=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_check.png" alt="%(alt_confirm)s" />'
'%(confirm_text)s</a><br />'
'<a rel="nofollow" id="aid_repeal" class="op_action" href="%(url)s/author/claim/action?repeal=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_reject.png" alt="%(alt_repeal)s" />'
'%(repeal_text)s</a> <br />'
'<a rel="nofollow" id="aid_to_other" class="op_action" href="%(url)s/author/claim/action?to_other_person=True&selection=%(ref)s">'
'<img src="%(url)s/img/aid_to_other.png" alt="%(alt_to_other)s" />'
'%(to_other_text)s</a> </span>')
return (string
% ({'url': CFG_SITE_URL, 'ref': bibref, 'pid': pid,
'alt_confirm':verbiage_dict['alt_confirm'],
'confirm_text':verbiage_dict['confirm_text'],
'alt_repeal':verbiage_dict['alt_repeal'],
'repeal_text':verbiage_dict['repeal_text'],
'to_other_text':verbiage_dict['to_other_text'],
'alt_to_other':verbiage_dict['alt_to_other']}))
def tmpl_open_claim(self, bibrefs, pid, last_viewed_pid,
search_enabled=True):
'''
Generate entry page for "claim or attribute this paper"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param last_viewed_pid: last ID that had been subject to an action
@type last_viewed_pid: int
'''
t_html = []
h = t_html.append
h(self.tmpl_notification_box('info', 'attribute_papers', bibrefs, show_close_btn=False))
h('<p> ' + self._('Your options') + ': </p>')
bibs = ''
for paper in bibrefs:
if bibs:
bibs = bibs + '&'
bibs = bibs + 'selection=' + str(paper)
if pid > -1:
h('<div><a rel="nofollow" id="clam_for_myself" href="%s/author/claim/action?confirm=True&%s&pid=%s" '
'class="confirmlink"><button type="button">%s</div></br>' % (CFG_SITE_URL, bibs, str(pid), self._("Claim for yourself")))
if last_viewed_pid:
h('<div><a rel="nofollow" id="clam_for_last_viewed" href="%s/author/claim/action?confirm=True&%s&pid=%s" '
'class="confirmlink"><button type="button">%s</div></br>' % (CFG_SITE_URL, bibs, str(last_viewed_pid[0]), self._('Assign to') + str(last_viewed_pid[1])))
if search_enabled:
h('<div><a rel="nofollow" id="claim_search" href="%s/author/claim/action?to_other_person=True&%s" '
'class="confirmlink"><button type="button">%s</div></br>' % (CFG_SITE_URL, bibs, self._('Search for a person to assign the paper to')))
return "\n".join(t_html)
def __tmpl_admin_records_table(self, form_id, person_id, bibrecids, verbiage_dict={'no_doc_string':'Sorry, there are currently no documents to be found in this category.',
'b_confirm':'Confirm',
'b_repeal':'Repeal',
'b_to_others':'Assign to other person',
'b_forget':'Forget decision'},
buttons_verbiage_dict={'mass_buttons':{'no_doc_string':'Sorry, there are currently no documents to be found in this category.',
'b_confirm':'Confirm',
'b_repeal':'Repeal',
'b_to_others':'Assign to other person',
'b_forget':'Forget decision'},
'record_undecided':{'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_repeal':'Rejected!',
'repeal_text':'This record has been repealed.'},
'record_confirmed':{'alt_confirm':'Confirmed.',
'confirm_text':'This record assignment has been confirmed.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Repeal!',
'repeal_text':'Repeal record assignment'},
'record_repealed':{'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Rejected!',
'repeal_text':'Repeal this record assignment.'}},
show_reset_button=True):
'''
Generate the big tables for the person overview page
@param form_id: name of the form
@type form_id: string
@param person_id: Person ID
@type person_id: int
@param bibrecids: List of records to display
@type bibrecids: list
@param verbiage_dict: language for the elements
@type verbiage_dict: dict
@param buttons_verbiage_dict: language for the buttons
@type buttons_verbiage_dict: dict
'''
no_papers_html = ['<div style="text-align:left;margin-top:1em;"><strong>']
no_papers_html.append('%s' % self._(verbiage_dict['no_doc_string']))
no_papers_html.append('</strong></div>')
if not bibrecids or not person_id:
return "\n".join(no_papers_html)
pp_html = []
h = pp_html.append
h('<form id="%s" action="/author/claim/action" method="post">'
% (form_id))
# +self._(' On all pages: '))
h('<div class="aid_reclist_selector">')
h('<a rel="nofollow" rel="group_1" href="#select_all">' + self._('Select All') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#select_none">' + self._('Select None') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#invert_selection">' + self._('Invert Selection') + '</a> | ')
h('<a rel="nofollow" id="toggle_claimed_rows" href="javascript:toggle_claimed_rows();" '
'alt="hide">' + self._('Hide successful claims') + '</a>')
h('</div>')
h('<div class="aid_reclist_buttons">')
h(('<img src="%s/img/aid_90low_right.png" alt="∟" />')
% (CFG_SITE_URL))
h('<input type="hidden" name="pid" value="%s" />' % (person_id))
h('<input type="submit" name="assign" value="%s" class="aid_btn_blue" />' % self._(verbiage_dict['b_confirm']))
h('<input type="submit" name="reject" value="%s" class="aid_btn_blue" />' % self._(verbiage_dict['b_repeal']))
h('<input type="submit" name="to_other_person" value="%s" class="aid_btn_blue" />' % self._(verbiage_dict['b_to_others']))
# if show_reset_button:
# h('<input type="submit" name="reset" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_forget'])
h(" </div>")
h('<table class="paperstable" cellpadding="3" width="100%">')
h("<thead>")
h(" <tr>")
h(' <th> </th>')
h(' <th>' + self._('Paper Short Info') + '</th>')
h(' <th>' + self._('Author Name') + '</th>')
h(' <th>' + self._('Affiliation') + '</th>')
h(' <th>' + self._('Date') + '</th>')
h(' <th>' + self._('Experiment') + '</th>')
h(' <th>' + self._('Actions') + '</th>')
h(' </tr>')
h('</thead>')
h('<tbody>')
for idx, paper in enumerate(bibrecids):
h(' <tr style="padding-top: 6px; padding-bottom: 6px;">')
h(' <td><input type="checkbox" name="selection" '
'value="%s" /> </td>' % (paper['bibref']))
rec_info = format_record(int(paper['recid']), "ha")
rec_info = str(idx + 1) + '. ' + rec_info
h(" <td>%s</td>" % (rec_info))
h(" <td>%s</td>" % (paper['authorname']))
aff = ""
if paper['authoraffiliation']:
aff = paper['authoraffiliation']
else:
aff = self._("Not assigned")
h(" <td>%s</td>" % (aff))
if paper['paperdate']:
pdate = paper['paperdate']
else:
pdate = 'N.A.'
h(" <td>%s</td>" % pdate)
if paper['paperexperiment']:
pdate = paper['paperexperiment']
else:
pdate = 'N.A.'
h(" <td>%s</td>" % pdate)
paper_status = self._("No status information found.")
if paper['flag'] == 2:
paper_status = self.tmpl_author_confirmed(paper['bibref'], person_id,
verbiage_dict=buttons_verbiage_dict['record_confirmed'],
show_reset_button=show_reset_button)
elif paper['flag'] == -2:
paper_status = self.tmpl_author_repealed(paper['bibref'], person_id,
verbiage_dict=buttons_verbiage_dict['record_repealed'])
else:
paper_status = self.tmpl_author_undecided(paper['bibref'], person_id,
verbiage_dict=buttons_verbiage_dict['record_undecided'])
h(' <td><div id="bibref%s" style="float:left"><!--%s!-->%s </div>'
% (paper['bibref'], paper['flag'], paper_status))
if 'rt_status' in paper and paper['rt_status']:
h('<img src="%s/img/aid_operator.png" title="%s" '
'alt="actions pending" style="float:right" '
'height="24" width="24" />'
% (CFG_SITE_URL, self._("Operator review of user actions pending")))
h(' </td>')
h(" </tr>")
h(" </tbody>")
h("</table>")
# +self._(' On all pages: '))
h('<div class="aid_reclist_selector">')
h('<a rel="nofollow" rel="group_1" href="#select_all">' + self._('Select All') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#select_none">' + self._('Select None') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#invert_selection">' + self._('Invert Selection') + '</a> | ')
h('<a rel="nofollow" id="toggle_claimed_rows" href="javascript:toggle_claimed_rows();" '
'alt="hide">' + self._('Hide successful claims') + '</a>')
h('</div>')
h('<div class="aid_reclist_buttons">')
h(('<img src="%s/img/aid_90low_right.png" alt="∟" />')
% (CFG_SITE_URL))
h('<input type="hidden" name="pid" value="%s" />' % (person_id))
h('<input type="submit" name="assign" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_confirm'])
h('<input type="submit" name="reject" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_repeal'])
h('<input type="submit" name="to_other_person" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_to_others'])
# if show_reset_button:
# h('<input type="submit" name="reset" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_forget'])
h(" </div>")
h("</form>")
return "\n".join(pp_html)
def __tmpl_reviews_table(self, person_id, bibrecids, admin=False):
'''
Generate the table for potential reviews.
@param form_id: name of the form
@type form_id: string
@param person_id: Person ID
@type person_id: int
@param bibrecids: List of records to display
@type bibrecids: list
@param admin: Show admin functions
@type admin: boolean
'''
no_papers_html = ['<div style="text-align:left;margin-top:1em;"><strong>']
no_papers_html.append(self._('Sorry, there are currently no records to be found in this category.'))
no_papers_html.append('</strong></div>')
if not bibrecids or not person_id:
return "\n".join(no_papers_html)
pp_html = []
h = pp_html.append
h('<form id="review" action="/author/claim/batchprocess" method="post">')
h('<table class="reviewstable" cellpadding="3" width="100%">')
h(' <thead>')
h(' <tr>')
h(' <th> </th>')
h(' <th>' + self._('Paper Short Info') + '</th>')
h(' <th>' + self._('Actions') + '</th>')
h(' </tr>')
h(' </thead>')
h(' <tbody>')
for paper in bibrecids:
h(' <tr>')
h(' <td><input type="checkbox" name="selected_bibrecs" '
'value="%s" /> </td>' % (paper))
rec_info = format_record(int(paper[0]), "ha")
if not admin:
rec_info = rec_info.replace("person/search?q=", "author/")
h(" <td>%s</td>" % (rec_info))
h(' <td><a rel="nofollow" href="%s/author/claim/batchprocess?selected_bibrecs=%s&mfind_bibref=claim">'% (CFG_SITE_URL, paper) +
self._('Review Transaction') + '</a></td>')
h(" </tr>")
h(" </tbody>")
h("</table>")
h('<div style="text-align:left;"> ' + self._('On all pages') + ': ')
h('<a rel="nofollow" rel="group_1" href="#select_all">' + self._('Select All') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#select_none">' + self._('Select None') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#invert_selection">' + self._('Invert Selection') + '</a>')
h('</div>')
h('<div style="vertical-align:middle;">')
h('∟ ' + self._('With selected do') + ': ')
h('<input type="hidden" name="pid" value="%s" />' % (person_id))
h('<input type="hidden" name="mfind_bibref" value="claim" />')
h('<input type="submit" name="submit" value="Review selected transactions" />')
h(" </div>")
h('</form>')
return "\n".join(pp_html)
def tmpl_admin_person_info_box(self, ln, person_id= -1, names=[]):
'''
Generate the box showing names
@param ln: the language to use
@type ln: string
@param person_id: Person ID
@type person_id: int
@param names: List of names to display
@type names: list
'''
html = []
h = html.append
if not ln:
pass
h('<div class="accordion" id="accordion1">')
h('<div class="accordion-group">')
# Define accordion heading
h('<span class=\"bsw\"><div class="accordion-heading">')
h('<a class="accordion-toggle" data-toggle="collapse" data-parent="#accordion1" href="#collapseVariants">')
h('%s</a>' % self._('View name variants'))
h('</div>')
h('<div id="collapseVariants" class="accordion-body collapse">') # Start variants accordion body
# Populate accordion with name variants
h('<div class="accordion-inner">')
for name in names:
h("%s (%s)<br>" % (name[0], name[1]))
h('</div>')
h('</div>') # Close variants accordion body
h('</div>') # Close accordion group
h('</div></span>') # Close accordion
return "\n".join(html)
def tmpl_admin_tabs(self, ln=CFG_SITE_LANG, person_id= -1,
rejected_papers=[],
rest_of_papers=[],
review_needed=[],
rt_tickets=[],
open_rt_tickets=[],
show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
show_reset_button=True,
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'],
verbiage_dict={'confirmed':'Records', 'repealed':'Not this person\'s records',
'review':'Records in need of review',
'tickets':'Open Tickets', 'data':'Data',
'confirmed_ns':'Papers of this Person',
'repealed_ns':'Papers _not_ of this Person',
'review_ns':'Papers in need of review',
'tickets_ns':'Tickets for this Person',
'data_ns':'Additional Data for this Person'},
buttons_verbiage_dict={'mass_buttons':{'no_doc_string':'Sorry, there are currently no documents to be found in this category.',
'b_confirm':'Confirm',
'b_repeal':'Repeal',
'b_to_others':'Assign to other person',
'b_forget':'Forget decision'},
'record_undecided':{'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_repeal':'Rejected!',
'repeal_text':'This record has been repealed.'},
'record_confirmed':{'alt_confirm':'Confirmed.',
'confirm_text':'This record assignment has been confirmed.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Repeal!',
'repeal_text':'Repeal record assignment'},
'record_repealed':{'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Rejected!',
'repeal_text':'Repeal this record assignment.'}}):
'''
Generate the tabs for the person overview page
@param ln: the language to use
@type ln: string
@param person_id: Person ID
@type person_id: int
@param rejected_papers: list of repealed papers
@type rejected_papers: list
@param rest_of_papers: list of attributed of undecided papers
@type rest_of_papers: list
@param review_needed: list of papers that need a review (choose name)
@type review_needed:list
@param rt_tickets: list of tickets for this Person
@type rt_tickets: list
@param open_rt_tickets: list of open request tickets
@type open_rt_tickets: list
@param show_tabs: list of tabs to display
@type show_tabs: list of strings
@param ticket_links: list of links to display
@type ticket_links: list of strings
@param verbiage_dict: language for the elements
@type verbiage_dict: dict
@param buttons_verbiage_dict: language for the buttons
@type buttons_verbiage_dict: dict
'''
html = []
h = html.append
h('<div id="aid_tabbing">')
h(' <ul>')
if 'records' in show_tabs:
r = verbiage_dict['confirmed']
h(' <li><a rel="nofollow" href="#tabRecords"><span>%(r)s (%(l)s)</span></a></li>' %
({'r':r, 'l':len(rest_of_papers)}))
if 'repealed' in show_tabs:
r = verbiage_dict['repealed']
h(' <li><a rel="nofollow" href="#tabNotRecords"><span>%(r)s (%(l)s)</span></a></li>' %
({'r':r, 'l':len(rejected_papers)}))
if 'review' in show_tabs:
r = verbiage_dict['review']
h(' <li><a rel="nofollow" href="#tabReviewNeeded"><span>%(r)s (%(l)s)</span></a></li>' %
({'r':r, 'l':len(review_needed)}))
if 'tickets' in show_tabs:
r = verbiage_dict['tickets']
h(' <li><a rel="nofollow" href="#tabTickets"><span>%(r)s (%(l)s)</span></a></li>' %
({'r':r, 'l':len(open_rt_tickets)}))
if 'data' in show_tabs:
r = verbiage_dict['data']
h(' <li><a rel="nofollow" href="#tabData"><span>%s</span></a></li>' % r)
userid = get_uid_of_author(person_id)
if userid:
h('<img src="%s/img/webbasket_user.png" alt="%s" width="30" height="30" />' %
(CFG_SITE_URL, self._("The author has an internal ID!")))
h(' </ul>')
if 'records' in show_tabs:
h(' <div id="tabRecords">')
r = verbiage_dict['confirmed_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
h(self.__tmpl_admin_records_table("massfunctions",
person_id, rest_of_papers,
verbiage_dict=buttons_verbiage_dict['mass_buttons'],
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button))
h(" </div>")
if 'repealed' in show_tabs:
h(' <div id="tabNotRecords">')
r = verbiage_dict['repealed_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
h(self._('These records have been marked as not being from this person.'))
h('<br />' + self._('They will be regarded in the next run of the author ')
+ self._('disambiguation algorithm and might disappear from this listing.'))
h(self.__tmpl_admin_records_table("rmassfunctions",
person_id, rejected_papers,
verbiage_dict=buttons_verbiage_dict['mass_buttons'],
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button))
h(" </div>")
if 'review' in show_tabs:
h(' <div id="tabReviewNeeded">')
r = verbiage_dict['review_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
h(self.__tmpl_reviews_table(person_id, review_needed, True))
h(' </div>')
if 'tickets' in show_tabs:
h(' <div id="tabTickets">')
r = verbiage_dict['tickets']
h('<noscript><h5>%s</h5></noscript>' % r)
r = verbiage_dict['tickets_ns']
h('<p>%s:</p>' % r)
if rt_tickets:
pass
# open_rt_tickets = [a for a in open_rt_tickets if a[1] == rt_tickets]
for t in open_rt_tickets:
name = self._('Not provided')
surname = self._('Not provided')
uidip = self._('Not available')
comments = self._('No comments')
email = self._('Not provided')
date = self._('Not Available')
actions = []
for info in t[0]:
if info[0] == 'firstname':
name = info[1]
elif info[0] == 'lastname':
surname = info[1]
elif info[0] == 'uid-ip':
uidip = info[1]
elif info[0] == 'comments':
comments = info[1]
elif info[0] == 'email':
email = info[1]
elif info[0] == 'date':
date = info[1]
elif info[0] in ['assign', 'reject']:
actions.append(info)
if 'delete' in ticket_links:
h(('<strong>Ticket number: %(tnum)s </strong> <a rel="nofollow" id="cancel" href=%(url)s/author/claim/action?cancel_rt_ticket=True&selection=%(tnum)s&pid=%(pid)s>' + self._(' Delete this ticket') + ' </a>')
% ({'tnum':t[1], 'url':CFG_SITE_URL, 'pid':str(person_id)}))
if 'commit' in ticket_links:
h((' or <a rel="nofollow" id="commit" href=%(url)s/author/claim/action?commit_rt_ticket=True&selection=%(tnum)s&pid=%(pid)s>' + self._(' Commit this entire ticket') + ' </a> <br>')
% ({'tnum':t[1], 'url':CFG_SITE_URL, 'pid':str(person_id)}))
h('<dd>')
h('Open from: %s, %s <br>' % (surname, name))
h('Date: %s <br>' % date)
h('identified by: %s <br>' % uidip)
h('email: %s <br>' % email)
h('comments: %s <br>' % comments)
h('Suggested actions: <br>')
h('<dd>')
for a in actions:
bibref, bibrec = a[1].split(',')
pname = get_bibrefrec_name_string(bibref)
title = ""
try:
title = get_fieldvalues(int(bibrec), "245__a")[0]
except IndexError:
title = self._("No title available")
title = escape_html(title)
if 'commit_entry' in ticket_links:
h('<a rel="nofollow" id="action" href="%(url)s/author/claim/action?%(action)s=True&pid=%(pid)s&selection=%(bib)s&rt_id=%(rt)s">%(action)s - %(name)s on %(title)s </a>'
% ({'action': a[0], 'url': CFG_SITE_URL,
'pid': str(person_id), 'bib':a[1],
'name': pname, 'title': title, 'rt': t[1]}))
else:
h('%(action)s - %(name)s on %(title)s'
% ({'action': a[0], 'name': pname, 'title': title}))
if 'del_entry' in ticket_links:
h(' - <a rel="nofollow" id="action" href="%(url)s/author/claim/action?cancel_rt_ticket=True&pid=%(pid)s&selection=%(bib)s&rt_id=%(rt)s&rt_action=%(action)s"> Delete this entry </a>'
% ({'action': a[0], 'url': CFG_SITE_URL,
'pid': str(person_id), 'bib': a[1], 'rt': t[1]}))
h(' - <a rel="nofollow" id="show_paper" target="_blank" href="%(url)s/record/%(record)s"> View record <br>' % ({'url':CFG_SITE_URL, 'record':str(bibrec)}))
h('</dd>')
h('</dd><br>')
# h(str(open_rt_tickets))
h(" </div>")
if 'data' in show_tabs:
h(' <div id="tabData">')
r = verbiage_dict['data_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
full_canonical_name = str(get_canonical_id_from_person_id(person_id))
if '.' in str(full_canonical_name) and not isinstance(full_canonical_name, int):
canonical_name = full_canonical_name[0:full_canonical_name.rindex('.')]
else:
canonical_name = str(person_id)
h('<div> <strong> Person id </strong> <br> %s <br>' % person_id)
h('<strong> <br> Canonical name setup </strong>')
h('<div style="margin-top: 15px;"> Current canonical name: %s' % full_canonical_name)
h('<form method="GET" action="%s/author/claim/action" rel="nofollow">' % CFG_SITE_URL)
h('<input type="hidden" name="set_canonical_name" value="True" />')
h('<input name="canonical_name" id="canonical_name" type="text" style="border:1px solid #333; width:500px;" value="%s" /> ' % canonical_name)
h('<input type="hidden" name="pid" value="%s" />' % person_id)
h('<input type="submit" value="set canonical name" class="aid_btn_blue" />')
h('<br>NOTE: If the canonical ID is without any number (e.g. J.Ellis), it will take the first available number. ')
h('If the canonical ID is complete (e.g. J.Ellis.1) that ID will be assigned to the current person ')
h('and if another person had that ID, he will lose it and get a new one. </form>')
h('</div>')
userid = get_uid_of_author(person_id)
h('<div> <br>')
h('<strong> Internal IDs </strong> <br>')
if userid:
email = get_email(int(userid))
h('UserID: INSPIRE user %s is associated with this profile with email: %s' % (str(userid), str(email)))
else:
h('UserID: There is no INSPIRE user associated to this profile!')
h('<br></div>')
h('</div> </div>')
h('</div>')
return "\n".join(html)
def tmpl_bibref_check(self, bibrefs_auto_assigned, bibrefs_to_confirm):
'''
Generate overview to let user chose the name on the paper that
resembles the person in question.
@param bibrefs_auto_assigned: list of auto-assigned papers
@type bibrefs_auto_assigned: list
@param bibrefs_to_confirm: list of unclear papers and names
@type bibrefs_to_confirm: list
'''
html = []
h = html.append
h('<form id="review" action="/author/claim/action" method="post">')
h('<p><strong>' + self._("Make sure we match the right names!")
+ '</strong></p>')
h('<p>' + self._('Please select an author on each of the records that will be assigned.') + '<br/>')
h(self._('Papers without a name selected will be ignored in the process.'))
h('</p>')
for person in bibrefs_to_confirm:
if not "bibrecs" in bibrefs_to_confirm[person]:
continue
person_name = bibrefs_to_confirm[person]["person_name"]
if person_name.isspace():
h((self._('Claim for person with id') + ': %s. ') % person)
h(self._('This seems to be an empty profile without names associated to it yet'))
h(self._('(the names will be automatically gathered when the first paper is claimed to this profile).'))
else:
h((self._("Select name for") + " %s") % (person_name))
pid = person
for recid in bibrefs_to_confirm[person]["bibrecs"]:
h('<div id="aid_moreinfo">')
try:
fv = get_fieldvalues(int(recid), "245__a")[0]
except (ValueError, IndexError, TypeError):
fv = self._('Error retrieving record title')
fv = escape_html(fv)
h(self._("Paper title: ") + fv)
h('<select name="bibrecgroup%s">' % (recid))
h('<option value="" >-- ' + self._('Ignore') + ' --</option>')
h('<option value="" selected>-- Choose author name --</option>')
for bibref in bibrefs_to_confirm[person]["bibrecs"][recid]:
h('<option value="%s||%s">%s</option>'
% (pid, bibref[0], bibref[1]))
h('</select>')
h("</div>")
if bibrefs_auto_assigned:
h(self._('The following names have been automatically chosen:'))
for person in bibrefs_auto_assigned:
if not "bibrecs" in bibrefs_auto_assigned[person]:
continue
h((self._("For") + " %s:") % bibrefs_auto_assigned[person]["person_name"])
pid = person
for recid in bibrefs_auto_assigned[person]["bibrecs"]:
try:
fv = get_fieldvalues(int(recid), "245__a")[0]
except (ValueError, IndexError, TypeError):
fv = self._('Error retrieving record title')
fv = escape_html(fv)
h('<div id="aid_moreinfo">')
h(('%s' + self._(' -- With name: ')) % (fv))
# , bibrefs_auto_assigned[person]["bibrecs"][recid][0][1]))
# asbibref = "%s||%s" % (person, bibrefs_auto_assigned[person]["bibrecs"][recid][0][0])
pbibref = bibrefs_auto_assigned[person]["bibrecs"][recid][0][0]
h('<select name="bibrecgroup%s">' % (recid))
h('<option value="" selected>-- ' + self._('Ignore') + ' --</option>')
for bibref in bibrefs_auto_assigned[person]["bibrecs"][recid]:
selector = ""
if bibref[0] == pbibref:
selector = ' selected="selected"'
h('<option value="%s||%s"%s>%s</option>'
% (pid, bibref[0], selector, bibref[1]))
h('</select>')
# h('<input type="hidden" name="bibrecgroup%s" value="%s" />'
# % (recid, asbibref))
h('</div>')
h('<div style="text-align:center;">')
h(' <input type="submit" class="aid_btn_green" name="bibref_check_submit" value="Accept" />')
h(' <input type="submit" class="aid_btn_blue" name="cancel_stage" value="Delete all transactions" />')
h("</div>")
h('</form>')
return "\n".join(html)
def tmpl_invenio_search_box(self):
'''
Generate little search box for missing papers. Links to main invenio
search on start papge.
'''
html = []
h = html.append
h('<div style="margin-top: 15px;"> <strong>Search for missing papers:</strong> <form method="GET" action="%s/search">' % CFG_SITE_URL)
h('<input name="p" id="p" type="text" style="border:1px solid #333; width:500px;" /> ')
h('<input type="submit" name="action_search" value="search" '
'class="aid_btn_blue" />')
h('</form> </div>')
return "\n".join(html)
@staticmethod
def tmpl_profile_navigation_bar(person_info, ln, active, menu_items=None):
"""
Generates a profile specific navigation bar.
The menu_items parameter is a list of tuples with three components.
The third component is a boolean that represents whether the content is static, i.e. It is not specific to
a particular profile. Set it to False if the canonical name of the profile should be appended after the route.
True indicates that the content is static and remains the same regardless of the profile and thus will not append
the canonical name of the profile to the route.
@param person_info: A dict describing a person, must contain key 'canonical_name'
@param ln: Localisation
@param active: Sets a menu item to active if it contains the passed substring.
@param menu_items: List of 3-tuples e.g. ("/path/of/route/","Menu Item Name",False)
@return: HTML markup wrapped in 'ul' tags
@rtype: string
"""
# Default navigation bar content
if menu_items is None:
menu_items = Template.DEFAULT_PROFILE_MENU_ITEMS
_ = gettext_set_language(ln)
navigation_bar = "<span class=\"bsw\"><ul id=\"authorid_menu\" class=\"nav nav-pills\">"
for item in menu_items:
(rel_url, link_text, static) = item
if not static:
if person_info['canonical_name']:
rel_url += str(person_info['canonical_name'])
link_text = _(link_text)
if active.lower() in link_text.lower():
navigation_bar += "<li class=\"bsw active\"><a href=\"%s%s\">%s</a></li>" % (CFG_SITE_URL, rel_url, link_text)
else:
navigation_bar += "<li><a href=\"%s%s\">%s</a></li>" % (CFG_SITE_URL, rel_url, link_text)
return navigation_bar + "</ul></span>"
def tmpl_person_menu(self, pid, ln):
'''
Generate the menu bar
'''
person_info = get_person_info_by_pid(pid)
profile_menu = Template.tmpl_profile_navigation_bar(person_info, ln, "Manage Publications")
return "\n" + profile_menu
def tmpl_person_menu_admin(self, pid, ln):
'''
Generate the menu bar
'''
person_info = get_person_info_by_pid(pid)
menu_items = list(Template.DEFAULT_PROFILE_MENU_ITEMS)
menu_items.append(("/author/claim/tickets_admin", "Open Tickets", True))
profile_menu = Template.tmpl_profile_navigation_bar(person_info, ln, "Manage Publications", menu_items)
return "\n" + profile_menu
def tmpl_ticket_final_review(self, req, mark_yours=[], mark_not_yours=[],
mark_theirs=[], mark_not_theirs=[], autoclaim=False):
'''
Generate final review page. Displaying transactions if they
need confirmation.
@param req: Apache request object
@type req: Apache request object
@param mark_yours: papers marked as 'yours'
@type mark_yours: list
@param mark_not_yours: papers marked as 'not yours'
@type mark_not_yours: list
@param mark_theirs: papers marked as being someone else's
@type mark_theirs: list
@param mark_not_theirs: papers marked as NOT being someone else's
@type mark_not_theirs: list
'''
def html_icon_legend():
html = []
h = html.append
h('<div id="legend">')
h("<p>")
h(self._("Symbols legend: "))
h("</p>")
h('<span style="margin-left:25px; vertical-align:middle;">')
h('<img src="%s/img/aid_granted.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("Everything is shiny, captain!")))
h(self._('The result of this request will be visible immediately'))
h('</span><br />')
h('<span style="margin-left:25px; vertical-align:middle;">')
h('<img src="%s/img/aid_warning_granted.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("Confirmation needed to continue")))
h(self._('The result of this request will be visible immediately but we need your confirmation to do so for this paper has been manually claimed before'))
h('</span><br />')
h('<span style="margin-left:25px; vertical-align:middle;">')
h('<img src="%s/img/aid_denied.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("This will create a change request for the operators")))
h(self._("The result of this request will be visible upon confirmation through an operator"))
h("</span>")
h("</div>")
return "\n".join(html)
def mk_ticket_row(ticket, autoclaim = False):
recid = -1
rectitle = ""
recauthor = "No Name Found."
personname = "No Name Found."
try:
recid = ticket['bibref'].split(",")[1]
except (ValueError, KeyError, IndexError):
return ""
try:
rectitle = get_fieldvalues(int(recid), "245__a")[0]
except (ValueError, IndexError, TypeError):
rectitle = self._('Error retrieving record title')
rectitle = escape_html(rectitle)
if "authorname_rec" in ticket:
recauthor = ticket['authorname_rec']
if "person_name" in ticket:
personname = ticket['person_name']
html = []
h = html.append
# h("Debug: " + str(ticket) + "<br />")
h('<td width="25"> </td>')
h('<td>')
h(rectitle)
h('</td>')
h('<td>')
h((personname + " (" + self._("Selected name on paper") + ": %s)") % recauthor)
h('</td>')
h('<td>')
if ticket['status'] == "granted":
h('<img src="%s/img/aid_granted.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("Everything is shiny, captain!")))
elif ticket['status'] == "warning_granted":
h('<img src="%s/img/aid_warning_granted.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("Verification needed to continue")))
else:
h('<img src="%s/img/aid_denied.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("This will create a request for the operators")))
h('</td>')
h('<td>')
h('<a rel="nofollow" href="%s/author/claim/action?checkout_remove_transaction=%s ">'
'Cancel'
'</a>' % (CFG_SITE_URL, ticket['bibref']))
h('</td>')
return "\n".join(html)
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
html = []
h = html.append
# h(html_icon_legend())
if "checkout_faulty_fields" in pinfo and pinfo["checkout_faulty_fields"]:
h(self.tmpl_error_box('sorry', 'check_entries'))
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "tickets" in pinfo["checkout_faulty_fields"]):
h(self.tmpl_error_box('error', 'provide_transaction'))
# h('<div id="aid_checkout_teaser">' +
# self._('Almost done! Please use the button "Confirm these changes" '
# 'at the end of the page to send this request to an operator '
# 'for review!') + '</div>')
h('<div id="aid_person_names" '
'class="ui-tabs ui-widget ui-widget-content ui-corner-all"'
'style="padding:10px;">')
h("<h4>" + self._('Please provide your information') + "</h4>")
h('<form id="final_review" action="%s/author/claim/action" method="post">'
% (CFG_SITE_URL))
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "user_first_name" in pinfo["checkout_faulty_fields"]):
h("<p class='aid_error_line'>" + self._('Please provide your first name') + "</p>")
h("<p>")
if "user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]:
h((self._("Your first name:") + " %s") % pinfo["user_first_name"])
else:
h(self._('Your first name:') + ' <input type="text" name="user_first_name" value="%s" />'
% pinfo["user_first_name"])
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "user_last_name" in pinfo["checkout_faulty_fields"]):
h("<p class='aid_error_line'>" + self._('Please provide your last name') + "</p>")
h("</p><p>")
if "user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]:
h((self._("Your last name:") + " %s") % pinfo["user_last_name"])
else:
h(self._('Your last name:') + ' <input type="text" name="user_last_name" value="%s" />'
% pinfo["user_last_name"])
h("</p>")
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "user_email" in pinfo["checkout_faulty_fields"]):
h("<p class='aid_error_line'>" + self._('Please provide your eMail address') + "</p>")
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "user_email_taken" in pinfo["checkout_faulty_fields"]):
h("<p class='aid_error_line'>" +
self._('This eMail address is reserved by a user. Please log in or provide an alternative eMail address')
+ "</p>")
h("<p>")
if "user_email_sys" in pinfo and pinfo["user_email_sys"]:
h((self._("Your eMail:") + " %s") % pinfo["user_email"])
else:
h((self._('Your eMail:') + ' <input type="text" name="user_email" value="%s" />')
% pinfo["user_email"])
h("</p><p>")
h(self._("You may leave a comment (optional)") + ":<br>")
h('<textarea name="user_comments">')
if "user_ticket_comments" in pinfo:
h(pinfo["user_ticket_comments"])
h("</textarea>")
h("</p>")
h("<p> </p>")
h('<div style="text-align: center;">')
if not autoclaim:
h((' <input type="submit" name="checkout_continue_claiming" class="aid_btn_green" value="%s" />')
% self._("Continue claiming*"))
h((' <input type="submit" name="checkout_submit" class="aid_btn_green" value="%s" />')
% self._("Confirm these changes**"))
h('<span style="margin-left:150px;">')
if not autoclaim:
h((' <input type="submit" name="cancel" class="aid_btn_red" value="%s" />')
% self._("!Delete the entire request!"))
h('</span>')
h('</div>')
h("</form>")
h('</div>')
h('<div id="aid_person_names" '
'class="ui-tabs ui-widget ui-widget-content ui-corner-all"'
'style="padding:10px;">')
h('<table width="100%" border="0" cellspacing="0" cellpadding="4">')
if not ulevel == "guest":
h('<tr>')
h("<td colspan='5'><h4>" + self._('Mark as your documents') + "</h4></td>")
h('</tr>')
if mark_yours:
for idx, ticket in enumerate(mark_yours):
h('<tr id="aid_result%s">' % ((idx + 1) % 2))
h(mk_ticket_row(ticket, autoclaim))
h('</tr>')
else:
h('<tr>')
h('<td width="25"> </td>')
h('<td colspan="4">Nothing staged as yours</td>')
h("</tr>")
h('<tr>')
h("<td colspan='5'><h4>" + self._("Mark as _not_ your documents") + "</h4></td>")
h('</tr>')
if mark_not_yours:
for idx, ticket in enumerate(mark_not_yours):
h('<tr id="aid_result%s">' % ((idx + 1) % 2))
h(mk_ticket_row(ticket, autoclaim))
h('</tr>')
else:
h('<tr>')
h('<td width="25"> </td>')
h('<td colspan="4">' + self._('Nothing staged as not yours') + '</td>')
h("</tr>")
h('<tr>')
h("<td colspan='5'><h4>" + self._('Mark as their documents') + "</h4></td>")
h('</tr>')
if mark_theirs:
for idx, ticket in enumerate(mark_theirs):
h('<tr id="aid_result%s">' % ((idx + 1) % 2))
h(mk_ticket_row(ticket, autoclaim))
h('</tr>')
else:
h('<tr>')
h('<td width="25"> </td>')
h('<td colspan="4">' + self._('Nothing staged in this category') + '</td>')
h("</tr>")
h('<tr>')
h("<td colspan='5'><h4>" + self._('Mark as _not_ their documents') + "</h4></td>")
h('</tr>')
if mark_not_theirs:
for idx, ticket in enumerate(mark_not_theirs):
h('<tr id="aid_result%s">' % ((idx + 1) % 2))
h(mk_ticket_row(ticket, autoclaim))
h('</tr>')
else:
h('<tr>')
h('<td width="25"> </td>')
h('<td colspan="4">' + self._('Nothing staged in this category') + '</td>')
h("</tr>")
h('</table>')
h("</div>")
h("<p>")
h(self._(" * You can come back to this page later. Nothing will be lost. <br />"))
h(self._(" ** Performs all requested changes. Changes subject to permission restrictions "
"will be submitted to an operator for manual review."))
h("</p>")
h(html_icon_legend())
return "\n".join(html)
def tmpl_choose_profile_search_new_person_generator(self):
def stub():
text = self._("Create new profile")
link = "%s/author/claim/action?associate_profile=True&pid=%s" % (CFG_SITE_URL, str(-1))
return text, link
return stub
def tmpl_assigning_search_new_person_generator(self, bibrefs):
def stub():
text = self._("Create a new Person")
link = "%s/author/claim/action?confirm=True&pid=%s" % (CFG_SITE_URL, str(CREATE_NEW_PERSON))
for r in bibrefs:
link = link + '&selection=%s' % str(r)
return text, link
return stub
def tmpl_choose_profile_search_button_generator(self):
def stub(pid, search_param):
text = self._("This is my profile")
parameters = [('associate_profile', True), ('pid', str(pid)), ('search_param', search_param)]
link = "%s/author/claim/action" % (CFG_SITE_URL)
css_class = ""
to_disable = True
return text, link, parameters, css_class , to_disable
return stub
def tmpl_assigning_search_button_generator(self, bibrefs):
def stub(pid, search_param):
text = self._("Assign paper")
parameters = [('confirm', True), ('pid', str(pid)), ('search_param', search_param)]
for r in bibrefs:
parameters.append(('selection', str(r)))
link = "%s/author/claim/action" % (CFG_SITE_URL)
css_class = ""
to_disable = False
return text, link, parameters, css_class, to_disable
return stub
def merge_profiles_button_generator(self):
def stub(pid, search_param):
text = self._("Add to merge list")
parameters = []
link = ""
css_class = "addToMergeButton"
to_disable = False
return text, link, parameters, css_class, to_disable
return stub
def tmpl_choose_profile_search_bar(self):
def stub(search_param):
activated = True
parameters = [('search_param', search_param)]
link = "%s/author/choose_profile" % ( CFG_SITE_URL, )
return activated, parameters, link
return stub
def tmpl_general_search_bar(self):
def stub(search_param,):
activated = True
parameters = [('q', search_param)]
link = "%s/author/search" % ( CFG_SITE_URL, )
return activated, parameters, link
return stub
def tmpl_merge_profiles_search_bar(self, primary_profile):
def stub(search_param):
activated = True
parameters = [('search_param', search_param), ('primary_profile', primary_profile)]
link = "%s/author/merge_profiles" % ( CFG_SITE_URL, )
return activated, parameters, link
return stub
def tmpl_author_search(self, query, results, shown_element_functions):
'''
Generates the search for Person entities.
@param query: the query a user issued to the search
@type query: string
@param results: list of results
@type results: list
@param search_ticket: search ticket object to inform about pending
claiming procedure
@type search_ticket: dict
'''
if not query:
query = ""
html = []
h = html.append
search_bar_activated = False
if 'show_search_bar' in shown_element_functions.keys():
search_bar_activated, parameters, link = shown_element_functions['show_search_bar'](query)
if search_bar_activated:
h('<div class="fg-toolbar ui-toolbar ui-widget-header ui-corner-tl ui-corner-tr ui-helper-clearfix" id="aid_search_bar">')
h('<form id="searchform" action="%s" method="GET">' % (link,))
h('Find author clusters by name. e.g: <i>Ellis, J</i>: <br>')
for param in parameters[1:]:
h('<input type="hidden" name=%s value=%s>' % (param[0], param[1]))
h('<input placeholder="Search for a name, e.g: Ellis, J" type="text" name=%s style="border:1px solid #333; width:500px;" '
'maxlength="250" value="%s" class="focus" />' % (parameters[0][0], parameters[0][1]))
h('<input type="submit" value="Search" />')
h('</form>')
if 'new_person_gen' in shown_element_functions.keys():
new_person_text, new_person_link = shown_element_functions['new_person_gen']()
h('<a rel="nofollow" href="%s" ><button type="button" id="new_person_link">%s' % (new_person_link, new_person_text))
h('</button></a>')
h('</div>')
if not results and not query:
h('</div>')
return "\n".join(html)
if query and not results:
authemail = CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL
h(('<strong>' + self._("We do not have a publication list for '%s'." +
" Try using a less specific author name, or check" +
" back in a few days as attributions are updated " +
"frequently. Or you can send us feedback, at ") +
"<a rel='nofollow' href=\"mailto:%s\">%s</a>.</strong>") % (query, authemail, authemail))
h('</div>')
return "\n".join(html)
show_action_button = False
if 'button_gen' in shown_element_functions.keys():
show_action_button = True
show_status = False
if 'show_status' in shown_element_functions.keys():
show_status = True
pass_status = False
if 'pass_status' in shown_element_functions.keys():
pass_status = True
# base_color = 100
# row_color = 0
# html table
h('<table id="personsTable">')
h('<!-- Table header -->\
<thead>\
<tr>\
<th scope="col" id="Number" style="width:75px;">Number</th>\
<th scope="col" id="Identifier">Identifier</th>\
<th scope="col" id="Names">Names</th>\
<th scope="col" id="IDs">IDs</th>\
<th scope="col" id="Papers" style="width:350px">Papers</th>\
<th scope="col" id="Link">Link</th>')
if show_status:
h(' <th scope="col" id="Status" >Status</th>')
if show_action_button:
h(' <th scope="col" id="Action">Action</th>')
h(' </tr>\
</thead>\
<!-- Table body -->\
<tbody>')
for index, result in enumerate(results):
# if len(results) > base_color:
# row_color += 1
# else:
# row_color = base_color - (base_color - index *
# base_color / len(results)))
pid = result['pid']
canonical_id = result['canonical_id']
# person row
h('<tr id="pid'+ str(pid) + '">')
h('<td>%s</td>' % (index + 1))
# for nindex, name in enumerate(names):
# color = row_color + nindex * 35
# color = min(color, base_color)
# h('<span style="color:rgb(%d,%d,%d);">%s; </span>'
# % (color, color, color, name[0]))
#Identifier
if canonical_id:
h('<td>%s</td>' % (canonical_id,))
else:
canonical_id = ''
h('<td>%s</td>' % ('No canonical id',))
#Names
h('<td class="emptyName' + str(pid) + '">')
#html.extend(self.tmpl_gen_names(names))
h('</td>')
# IDs
h('<td class="emptyIDs' + str(pid) + '" >')#style="text-align:left;padding-left:35px;"
#html.extend(self.tmpl_gen_ext_ids(external_ids))
h('</td>')
# Recent papers
h('<td>')
h(('<a rel="nofollow" href="#" id="aid_moreinfolink" class="mpid%s">'
'<img src="../img/aid_plus_16.png" '
'alt = "toggle additional information." '
'width="11" height="11"/> '
+ self._('Recent Papers') +
'</a>')
% (pid))
h('<div class="more-mpid%s" id="aid_moreinfo">' % (pid))
h('</div>')
h('</td>')
#Link
h('<td>')
h(('<span>'
'<em><a rel="nofollow" href="%s/author/profile/%s" id="aid_moreinfolink" target="_blank">'
+ self._('Publication List ') + '(%s)</a></em></span>')
% (CFG_SITE_URL,get_person_redirect_link(pid),
get_person_redirect_link(pid)))
h('</td>')
hidden_status = ""
if pass_status:
if result["status"]:
status = "Available"
else:
status = "Not available"
hidden_status = '<input type="hidden" name="profile_availability" value="%s"/>' % status
if show_status:
h('<td>%s</td>' % (status))
if show_action_button:
action_button_text, action_button_link, action_button_parameters, action_button_class, action_button_to_disable = shown_element_functions['button_gen'](pid, query)#class
#Action link
h('<td class="uncheckedProfile' + str(pid) + '" style="text-align:center; vertical-align:middle;">')
parameters_sublink = ''
if action_button_link:
parameters_sublink = '<input type="hidden" name="%s" value="%s" />' % (action_button_parameters[0][0], str(action_button_parameters[0][1]))
for (param_type,param_value) in action_button_parameters[1:]:
parameters_sublink += '<input type="hidden" name="%s" value="%s" />' % (param_type, str(param_value))
disabled = ""
if show_status:
if not result["status"] and action_button_to_disable:
disabled = "disabled"
h('<form action="%s" method="get">%s%s<input type="submit" name="%s" class="%s aid_btn_blue" value="%s" %s/></form>' %
(action_button_link, parameters_sublink, hidden_status, canonical_id, action_button_class, action_button_text, disabled)) #confirmlink check if canonical id
h('</td>')
h('</tr>')
h('</tbody>')
h('</table>')
return "\n".join(html)
def tmpl_gen_papers(self, papers):
"""
Generates the recent papers html code.
Returns a list of strings
"""
html = []
h = html.append
if papers:
h((self._('Showing the') + ' %d ' + self._('most recent documents:')) % len(papers))
h("<ul>")
for paper in papers:
h("<li>%s</li>"
% (format_record(int(paper[0]), "ha")))
h("</ul>")
elif not papers:
h("<p>" + self._('Sorry, there are no documents known for this person') + "</p>")
return html
def tmpl_gen_names(self, names):
"""
Generates the names html code.
Returns a list of strings
"""
html = []
h = html.append
delimiter = ";"
if names:
for i,name in enumerate(names):
if i == 0:
h('<span>%s</span>'
% (name[0],))
else:
h('<span">%s  %s</span>'
% (delimiter, name[0]))
else:
h('%s' % ('No names found',))
return html
def tmpl_gen_ext_ids(self, external_ids):
"""
Generates the external ids html code.
Returns a list of strings
"""
html = []
h = html.append
if external_ids:
h('<table id="externalIDsTable">')
for key, value in external_ids.iteritems():
h('<tr>')
h('<td style="margin-top:5px; width:1px; padding-right:2px;">%s:</td>' % key)
h('<td style="padding-left:5px;width:1px;">')
for i, item in enumerate(value):
if i == 0:
h('%s' % item)
else:
h('; %s' % item)
h('</td>')
h('</tr>')
h('</table>')
else:
h('%s' % ('No external ids found',))
return html
def tmpl_choose_profile_start(self):
'''
Shadows the behaviour of tmpl_search_pagestart
'''
return '<div class="pagebody"><div class="pagebodystripemiddle">'
def tmpl_choose_profile_footer(self):
return ('<br>In case you don\'t find the correct match or your profile is already taken, please contact us here: <a rel="nofollow" href="mailto:%s">%s</a></p>'
% (CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL))
def tmpl_probable_profile_suggestion(self, probable_profile_suggestion_info, last_viewed_profile_suggestion_info, search_param):
'''
Suggest the most likely profile that the user can be based on his papers in external systems that is logged in through.
'''
html = []
h = html.append
last_viewed_profile_message = self._("The following profile is the one you were viewing before logging in: ")
# if the user has searched then his choice should be remembered in case the chosen profile is not available
param=''
if search_param:
param = '&search_param=' + search_param
h('<ul>')
if probable_profile_suggestion_info:
probable_profile_message = self._("Out of %s paper(s) claimed to your arXiv account, %s match this profile: " %
(probable_profile_suggestion_info['num_of_arXiv_papers'],
probable_profile_suggestion_info['num_of_recids_intersection']))
h('<li>')
h('%s %s ' % (probable_profile_message, probable_profile_suggestion_info['name_string']))
h('<a href="%s/author/profile/%s" target="_blank"> %s </a>' % (CFG_SITE_URL, probable_profile_suggestion_info['canonical_id'],
probable_profile_suggestion_info['canonical_name_string']))
h('<a rel="nofollow" href="%s/author/claim/action?associate_profile=True&pid=%s%s" class="confirmlink"><button type="button">%s</a>' % ( CFG_SITE_URL,
str(probable_profile_suggestion_info['pid']), param, 'This is my profile'))
h('</li>')
if last_viewed_profile_suggestion_info:
h('<li>')
h('%s %s ' % (last_viewed_profile_message, last_viewed_profile_suggestion_info['name_string']))
h('<a href="%s/author/profile/%s" target="_blank"> %s </a>' % (CFG_SITE_URL, last_viewed_profile_suggestion_info['canonical_id'],
last_viewed_profile_suggestion_info['canonical_name_string']))
h('<a rel="nofollow" href="%s/author/claim/action?associate_profile=True&pid=%s%s" class="confirmlink"><button type="button">%s</a>' % ( CFG_SITE_URL,
str(last_viewed_profile_suggestion_info['pid']), param, 'This is my profile'))
h('</li>')
h("</ul>")
message = self._("If none of the options suggested above apply, you can look for other possible options from the list below:")
h('<p>%s</p>' % (message,))
h('</br>')
return "\n".join(html)
def tmpl_claim_stub(self, person='-1'):
'''
claim stub page
'''
html = []
h = html.append
h(' <ul><li><a rel="nofollow" href=%s> Login through arXiv.org </a> <small>' % bconfig.BIBAUTHORID_CFG_INSPIRE_LOGIN)
h(' - Use this option if you have an arXiv account and have claimed your papers in arXiv.')
h('(If you login through arXiv.org, INSPIRE will immediately verify you as an author and process your claimed papers.) </small><br><br>')
h(' <li><a rel="nofollow" href=%s/author/claim/%s?open_claim=True> Continue as a guest </a> <small>' % (CFG_SITE_URL, person))
h(' - Use this option if you DON\'T have an arXiv account, or you have not claimed any paper in arXiv.')
h('(If you login as a guest, INSPIRE will need to confirm you as an author before processing your claimed papers.) </small><br><br>')
h('If you login through arXiv.org we can verify that you are the author of these papers and accept your claims rapidly, '
'as well as adding additional claims from arXiv. <br>If you choose not to login via arXiv your changes will '
'be publicly visible only after our editors check and confirm them, usually a few days.<br> '
'Either way, claims made on behalf of another author will go through our staff and may take longer to display. '
'This applies as well to papers which have been previously claimed, by yourself or someone else.')
return "\n".join(html)
def tmpl_welcome_end(self):
'''
Shadows the behaviour of tmpl_search_pageend
'''
return '</div></div>'
def tmpl_choose_profile(self, failed):
'''
SSO landing/choose_profile page.
'''
html = []
h = html.append
if failed:
h('<p><strong><font color="red">Unfortunately the profile you chose is no longer available.</font></strong></p>')
h('<p>We apologise for the inconvenience. Please select another one.</br>Keep in mind that you can create an empty profile and then claim all of your papers in it.')
else:
h('<p><b>You have now successfully logged in via arXiv.org, please choose your profile among these suggestions: </b></p>')
return "\n".join(html)
def tmpl_tickets_admin(self, tickets=[]):
'''
Open tickets short overview for operators.
'''
html = []
h = html.append
if len(tickets) > 0:
h('List of open tickets: <br><br>')
for t in tickets:
h('<a rel="nofollow" href=%(cname)s#tabTickets> %(longname)s - (%(cname)s - PersonID: %(pid)s): %(num)s open tickets. </a><br>'
% ({'cname':str(t[1]), 'longname':str(t[0]), 'pid':str(t[2]), 'num':str(t[3])}))
else:
h('There are currently no open tickets.')
return "\n".join(html)
def tmpl_update_hep_name_headers(self):
"""
Headers used for the hepnames update form
"""
html = []
html.append(r"""<style type="text/css">
.form1
{
margin-left: auto;
margin-right: auto;
}
#tblGrid {
margin-left: 5%;
}
#tblGrid td {
padding-left: 60px;
}
.form2
{
margin-left: 15%;
margin-right: 30%;
}
.span_float_right
{
float:right;
}
.span_float_left
{
float:left;
}
</style>
<script type="text/javascript" src="/js/hepname_update.js"></script>
""")
return "\n".join(html)
def tmpl_update_hep_name(self, full_name, display_name, email,
status, research_field_list,
institution_list, phd_advisor_list,
experiment_list, web_page):
"""
Create form to update a hep name
"""
# Prepare parameters
try:
phd_advisor = phd_advisor_list[0]
except IndexError:
phd_advisor = ''
try:
phd_advisor2 = phd_advisor_list[1]
except IndexError:
phd_advisor2 = ''
is_active = is_retired = is_departed = is_deceased = ''
if status == 'ACTIVE':
is_active = 'selected'
elif status == 'RETIRED':
is_retired = 'selected'
if status == 'DEPARTED':
is_departed = 'selected'
if status == 'DECEASED':
is_deceased = 'selected'
research_field_html = """
<TD><INPUT TYPE=CHECKBOX VALUE=ACC-PHYS name=field>acc-phys</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=ASTRO-PH name=field>astro-ph</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=ATOM-PH name=field>atom-ph</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=CHAO-DYN name=field>chao-dyn</TD></TR>
<tr><TD><INPUT TYPE=CHECKBOX VALUE=CLIMATE name=field>climate</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=COMP name=field>comp</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=COND-MAT name=field>cond-mat</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=GENL-TH name=field>genl-th</TD></TR>
<tr><TD><INPUT TYPE=CHECKBOX VALUE=GR-QC name=field>gr-qc</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=HEP-EX name=field>hep-ex</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=HEP-LAT name=field>hep-lat</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=HEP-PH name=field>hep-ph</TD></TR>
<TR>
<TD><INPUT TYPE=CHECKBOX VALUE=HEP-TH name=field>hep-th</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=INSTR name=field>instr</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=LIBRARIAN name=field>librarian</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=MATH name=field>math</TD></TR>
<TR>
<TD><INPUT TYPE=CHECKBOX VALUE=MATH-PH name=field>math-ph</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=MED-PHYS name=field>med-phys</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=NLIN name=field>nlin</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=NUCL-EX name=field>nucl-ex</TD></TR>
<TR>
<TD><INPUT TYPE=CHECKBOX VALUE=NUCL-TH name=field>nucl-th</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=PHYSICS name=field>physics</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=PLASMA-PHYS name=field>plasma-phys</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=Q-BIO name=field>q-bio</TD></TR>
<TR>
<TD><INPUT TYPE=CHECKBOX VALUE=QUANT-PH name=field>quant-ph</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=SSRL name=field>ssrl</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=OTHER name=field>other</TD>
"""
for research_field in research_field_list:
research_field_html = research_field_html.replace('VALUE=' + research_field, 'checked ' + 'VALUE=' + research_field)
institutions_html = ""
institution_num = 1
for institution_entry in institution_list:
institution = """
<tr>
<td> </td>
<td class="cell_padding"><input name="aff.str" type="hidden">
<input type="text" name="inst%(institution_num)s" size="35" value =%(institution_name)s /></td>
<td class="cell_padding"><select name="rank%(institution_num)s">
<option selected value=''> </option>
<option value='SENIOR'>Senior(permanent)</option>
<option value='JUNIOR'>Junior(leads to Senior)</option>
<option value='STAFF'>Staff(non-research)</option>
<option value='VISITOR'>Visitor</option>
<option value='PD'>PostDoc</option>
<option value='PHD'>PhD</option>
<option value='MAS'>Masters</option>
<option value='UG'>Undergrad</option></select></td>
<TD class="cell_padding"><INPUT TYPE="TEXT" value=%(start_year)s name="sy%(institution_num)s" SIZE="4"/>
<INPUT TYPE="TEXT" value=%(end_year)s name="ey%(institution_num)s" SIZE="4"/></TD>
<TD class="cell_padding"> <INPUT TYPE=CHECKBOX VALUE='Y' name="current%(institution_num)s">
<input type="button" value="Delete row" class="formbutton" onclick="removeRow(this);" />
</td>
</tr>
"""% { 'institution_name': xml.sax.saxutils.quoteattr(institution_entry[0]),
'start_year': xml.sax.saxutils.quoteattr(institution_entry[2]),
'end_year': xml.sax.saxutils.quoteattr(institution_entry[3]),
'institution_num': institution_num
}
institution_num += 1
institution = institution.replace('value=' + '\'' + institution_entry[1] + '\'', 'selected ' + 'VALUE=' + institution_entry[1])
if institution_entry[4] == 'Current':
institution = institution.replace("VALUE='Y'", 'checked ' + "VALUE='Y'")
institutions_html += institution
institutions_html += "<script>occcnt = %s; </script>" % (institution_num-1)
experiments_html = """
<select name=exp id=exp multiple=yes>
<option value=""> </option>
<option value=AMANDA>AMANDA</option>
<option value=AMS>AMS</option>
<option value=ANTARES>ANTARES</option>
<option value=AUGER>AUGER</option>
<option value=BAIKAL>BAIKAL</option>
<option value=BNL-E-0877>BNL-E-0877</option>
<option value=BNL-LEGS>BNL-LEGS</option>
<option value=BNL-RHIC-BRAHMS>BNL-RHIC-BRAHMS</option>
<option value=BNL-RHIC-PHENIX>BNL-RHIC-PHENIX</option>
<option value=BNL-RHIC-PHOBOS>BNL-RHIC-PHOBOS</option>
<option value=BNL-RHIC-STAR>BNL-RHIC-STAR</option>
<option value=CDMS>CDMS</option>
<option value=CERN-LEP-ALEPH>CERN-LEP-ALEPH</option>
<option value=CERN-LEP-DELPHI>CERN-LEP-DELPHI</option>
<option value=CERN-LEP-L3>CERN-LEP-L3</option>
<option value=CERN-LEP-OPAL>CERN-LEP-OPAL</option>
<option value=CERN-LHC-ALICE>CERN-LHC-ALICE</option>
<option value=CERN-LHC-ATLAS>CERN-LHC-ATLAS</option>
<option value=CERN-LHC-B>CERN-LHC-B</option>
<option value=CERN-LHC-CMS>CERN-LHC-CMS</option>
<option value=CERN-LHC-LHCB>CERN-LHC-LHCB</option>
<option value=CERN-NA-060>CERN-NA-060</option>
<option value=CERN-NA-061>CERN-NA-061</option>
<option value=CERN-NA-062>CERN-NA-062</option>
<option value=CERN-PS-214>CERN-PS-214 (HARP)</option>
<option value=CESR-CLEO>CESR-CLEO</option>
<option value=CESR-CLEO-C>CESR-CLEO-C</option>
<option value=CESR-CLEO-II>CESR-CLEO-II</option>
<option value=CHIMERA>CHIMERA</option>
<option value=COBRA>COBRA</option>
<option value=COSY-ANKE>COSY-ANKE</option>
<option value=CUORE>CUORE</option>
<option value=COUPP>COUPP</option>
<option value=DAYA-BAY>DAYA-BAY</option>
<option value=DESY-DORIS-ARGUS>DESY-DORIS-ARGUS</option>
<option value=DESY-HERA-B>DESY-HERA-B</option>
<option value=DESY-HERA-H1>DESY-HERA-H1</option>
<option value=DESY-HERA-HERMES>DESY-HERA-HERMES</option>
<option value=DESY-HERA-ZEUS>DESY-HERA-ZEUS</option>
<option value=DESY-PETRA-MARK-J>DESY-PETRA-MARK-J</option>
<option value=DESY-PETRA-PLUTO-2>DESY-PETRA-PLUTO-2</option>
<option value=DESY-PETRA-TASSO>DESY-PETRA-TASSO</option>
<option value=DOUBLE-CHOOZ>DOUBLE-CHOOZ</option>
<option value=DRIFT>DRIFT</option>
<option value=EXO>EXO</option>
<option value=FERMI-LAT>FERMI-LAT</option>
<option value=FNAL-E-0687>FNAL-E-0687</option>
<option value=FNAL-E-0690>FNAL-E-0690</option>
<option value=FNAL-E-0706>FNAL-E-0706</option>
<option value=FNAL-E-0740>FNAL-E-0740 (D0 Run I)</option>
<option value=FNAL-E-0741>FNAL-E-0741 (CDF Run I)</option>
<option value=FNAL-E-0799>FNAL-E-0799 (KTeV)</option>
<option value=FNAL-E-0815>FNAL-E-0815 (NuTeV)</option>
<option value=FNAL-E-0823>FNAL-E-0823 (D0 Run II)</option>
<option value=FNAL-E-0830>FNAL-E-0830 (CDF Run II)</option>
<option value=FNAL-E-0831>FNAL-E-0831 (FOCUS)</option>
<option value=FNAL-E-0832>FNAL-E-0832 (KTeV)</option>
<option value=FNAL-E-0872>FNAL-E-0872 (DONUT)</option>
<option value=FNAL-E-0875>FNAL-E-0875 (MINOS)</option>
<option value=FNAL-E-0886>FNAL-E-0886 (FNPL)</option>
<option value=FNAL-E-0892>FNAL-E-0892 (USCMS)</option>
<option value=FNAL-E-0898>FNAL-E-0898 (MiniBooNE)</option>
<option value=FNAL-E-0904>FNAL-E-0904 (MUCOOL)</option>
<option value=FNAL-E-0906>FNAL-E-0906 (NuSea)</option>
<option value=FNAL-E-0907>FNAL-E-0907 (MIPP)</option>
<option value=FNAL-E-0907>FNAL-E-0918 (BTeV)</option>
<option value=FNAL-E-0907>FNAL-E-0973 (Mu2e)</option>
<option value=FNAL-E-0937>FNAL-E-0937 (FINeSSE)</option>
<option value=FNAL-E-0938>FNAL-E-0938 (MINERvA)</option>
<option value=FNAL-E-0954>FNAL-E-0954 (SciBooNE)</option>
<option value=FNAL-E-0961>FNAL-E-0961 (COUPP)</option>
<option value=FNAL-E-0974>FNAL-E-0974</option>
<option value=FNAL-LC>FNAL-LC</option>
<option value=FNAL-P-0929>FNAL-P-0929 (NOvA)</option>
<option value=FNAL-T-0962>FNAL-T-0962 (ArgoNeuT)</option>
<option value=FRASCATI-DAFNE-KLOE>FRASCATI-DAFNE-KLOE</option>
<option value=FREJUS-NEMO-3>FREJUS-NEMO-3</option>
<option value=GERDA>GERDA</option>
<option value=GSI-HADES>GSI-HADES</option>
<option value=GSI-SIS-ALADIN>GSI-SIS-ALADIN</option>
<option value=HARP>HARP</option>
<option value=HESS>HESS</option>
<option value=ICECUBE>ICECUBE</option>
<option value=ILC>ILC</option>
<option value=JLAB-E-01-104>JLAB-E-01-104</option>
<option value=KAMLAND>KAMLAND</option>
<option value=KASCADE-GRANDE>KASCADE-GRANDE</option>
<option value=KATRIN>KATRIN</option>
<option value=KEK-BF-BELLE>KEK-BF-BELLE</option>
<option value=KEK-BF-BELLE-II>KEK-BF-BELLE-II</option>
<option value=KEK-T2K>KEK-T2K</option>
<option value=LBNE>LBNE</option>
<option value=LIGO>LIGO</option>
<option value=LISA>LISA</option>
<option value=LSST>LSST</option>
<option value=MAGIC>MAGIC</option>
<option value=MAJORANA>MAJORANA</option>
<option value=MICE>MICE</option>
<option value=PICASSO>PICASSO</option>
<option value=PLANCK>PLANCK</option>
<option value=SDSS>SDSS</option>
<option value=SIMPLE>SIMPLE</option>
<option value=SLAC-PEP2-BABAR>SLAC-PEP2-BABAR</option>
<option value=SNAP>SNAP</option>
<option value=SSCL-GEM>SSCL-GEM</option>
<option value=SUDBURY-SNO>SUDBURY-SNO</option>
<option value=SUDBURY-SNO+>SUDBURY-SNO+</option>
<option value=SUPER-KAMIOKANDE>SUPER-KAMIOKANDE</option>
<option value=VERITAS>VERITAS</option>
<option value=VIRGO>VIRGO</option>
<option value=WASA-COSY>WASA-COSY</option>
<option value=WMAP>WMAP</option>
<option value=XENON>XENON</option>
</select>
"""
for experiment in experiment_list:
experiments_html = experiments_html.replace('value=' + experiment, 'selected ' + 'value=' + experiment)
html = []
html.append("""<H4>Changes to Existing Records</H4>
<P>Send us your details (or someone else's). See our <a href="http://www.slac.stanford.edu/spires/hepnames/help/adding.shtml">help
for additions</A>.<BR>If something doesnt fit in the form, just put it in
the comments section.</P>
<FORM name="hepnames_addition"
onSubmit="return OnSubmitCheck();"
action=http://www.slac.stanford.edu/cgi-bin/form-mail.pl
method=post><INPUT type=hidden value=nowhere name=to id=tofield>
<INPUT type=hidden value="New HEPNames Posting" name=subject> <INPUT
type=hidden value=2bsupplied name=form_contact id=formcont> <INPUT
type=hidden value=/spires/hepnames/hepnames_msgupd.file name=email_msg_file>
<INPUT type=hidden value=/spires/hepnames/hepnames_resp_msg.file
name=response_msg_file><INPUT type=hidden value=0 name=debug>
<INPUT type=hidden value="1095498" name=key>
<INPUT type=hidden value="" name=field>
<INPUT type=hidden value="" name=current1>
<INPUT type=hidden value="" name=inst2><INPUT type=hidden value="" name=rank2>
<INPUT type=hidden value="" name=ey2><INPUT type=hidden value="" name=sy2>
<INPUT type=hidden value="" name=current2>
<INPUT type=hidden value="" name=inst3><INPUT type=hidden value="" name=rank3>
<INPUT type=hidden value="" name=ey3><INPUT type=hidden value="" name=sy3>
<INPUT type=hidden value="" name=current3>
<INPUT type=hidden value="" name=inst4><INPUT type=hidden value="" name=rank4>
<INPUT type=hidden value="" name=ey4><INPUT type=hidden value="" name=sy4>
<INPUT type=hidden value="" name=current4>
<INPUT type=hidden value="" name=inst5><INPUT type=hidden value="" name=rank5>
<INPUT type=hidden value="" name=ey5><INPUT type=hidden value="" name=sy5>
<INPUT type=hidden value="" name=current5>
<INPUT type=hidden value="" name=inst7><INPUT type=hidden value="" name=rank7>
<INPUT type=hidden value="" name=ey7><INPUT type=hidden value="" name=sy7>
<INPUT type=hidden value="" name=current7>
<INPUT type=hidden value="" name=inst6><INPUT type=hidden value="" name=rank6>
<INPUT type=hidden value="" name=ey6><INPUT type=hidden value="" name=sy6>
<INPUT type=hidden value="" name=current6>
<INPUT type=hidden value="" name=inst8><INPUT type=hidden value="" name=rank8>
<INPUT type=hidden value="" name=ey8><INPUT type=hidden value="" name=sy8>
<INPUT type=hidden value="" name=current8>
<INPUT type=hidden value="" name=inst9><INPUT type=hidden value="" name=rank9>
<INPUT type=hidden value="" name=ey9><INPUT type=hidden value="" name=sy9>
<INPUT type=hidden value="" name=current9>
<INPUT type=hidden value="" name=inst10><INPUT type=hidden value="" name=rank10>
<INPUT type=hidden value="" name=ey10><INPUT type=hidden value="" name=sy10>
<INPUT type=hidden value="" name=current10>
<INPUT type=hidden value="" name=inst11><INPUT type=hidden value="" name=rank11>
<INPUT type=hidden value="" name=ey11><INPUT type=hidden value="" name=sy11>
<INPUT type=hidden value="" name=current11>
<INPUT type=hidden value="" name=inst12><INPUT type=hidden value="" name=rank12>
<INPUT type=hidden value="" name=ey12><INPUT type=hidden value="" name=sy12>
<INPUT type=hidden value="" name=current12>
<INPUT type=hidden value="" name=inst13><INPUT type=hidden value="" name=rank13>
<INPUT type=hidden value="" name=ey13><INPUT type=hidden value="" name=sy13>
<INPUT type=hidden value="" name=current13>
<INPUT type=hidden value="" name=inst14><INPUT type=hidden value="" name=rank14>
<INPUT type=hidden value="" name=ey14><INPUT type=hidden value="" name=sy14>
<INPUT type=hidden value="" name=current14>
<INPUT type=hidden value="" name=inst15><INPUT type=hidden value="" name=rank15>
<INPUT type=hidden value="" name=ey15><INPUT type=hidden value="" name=sy15>
<INPUT type=hidden value="" name=current15>
<INPUT type=hidden value="" name=inst17><INPUT type=hidden value="" name=rank17>
<INPUT type=hidden value="" name=ey17><INPUT type=hidden value="" name=sy17>
<INPUT type=hidden value="" name=current17>
<INPUT type=hidden value="" name=inst16><INPUT type=hidden value="" name=rank16>
<INPUT type=hidden value="" name=ey16><INPUT type=hidden value="" name=sy16>
<INPUT type=hidden value="" name=current16>
<INPUT type=hidden value="" name=inst18><INPUT type=hidden value="" name=rank18>
<INPUT type=hidden value="" name=ey18><INPUT type=hidden value="" name=sy18>
<INPUT type=hidden value="" name=current18>
<INPUT type=hidden value="" name=inst19><INPUT type=hidden value="" name=rank19>
<INPUT type=hidden value="" name=ey19><INPUT type=hidden value="" name=sy19>
<INPUT type=hidden value="" name=current19>
<INPUT type=hidden value="" name=inst20><INPUT type=hidden value="" name=rank20>
<INPUT type=hidden value="" name=ey20><INPUT type=hidden value="" name=sy20>
<INPUT type=hidden value="" name=current20>
<INPUT type=hidden value="today" name=DV>
<TABLE class=form1>
<TBODY>
<TR>
<TD><STRONG>Full name</STRONG></TD>
<TD><INPUT SIZE=24 value=%(full_name)s name=authorname> <FONT SIZE=2>E.G.
Lampen, John Francis</FONT> </TD></TR>
<TR>
<TD><STRONG>Display Name</STRONG></TD>
<TD><INPUT SIZE=24 value=%(display_name)s name='dispname'> <FONT SIZE=2>E.G.
LampC)n, John </FONT><//TD></TR>
<TR>
<TD><STRONG> Your Email</STRONG></TD>
<TD><INPUT SIZE=24 value=%(email)s name='username' ID='username'><FONT SIZE=2>(<STRONG>REQ'D
</strong> but not displayed - contact only)</font> </TD></TR>
<TR>
<TD><STRONG>Email </STRONG>(Public)</TD>
<TD><INPUT SIZE=24 value=%(email_public)s name='email' id='email'>
<input type='button' value='Same as Above' class='formbutton' onclick='copyem();'/>
</TD></TR><tr><TD><STRONG>Status</STRONG></TD><TD>
<SELECT NAME=status>
<OPTION %(is_active)s value=ACTIVE>Active</OPTION>
<OPTION %(is_retired)s value=RETIRED>Retired</OPTION>
<OPTION %(is_departed)s value=DEPARTED>Departed</OPTION>
<OPTION %(is_deceased)s value=DECEASED>Deceased</OPTION>
</SELECT></TD></TR>
<tr><TD><STRONG>Field of research</STRONG></TD><td> <table><tbody><tr>
%(research_field_html)s
</TR></TBODY></TABLE></TD></TR>
<table id="tblGrid" >
<tr>
<td> </td>
<td class="cell_padding"><strong> Institution History</strong><br>
<FONT size=2>Please take this name from <A href="http://inspirehep.net/Institutions"
target=_TOP>Institutions</A><FONT color=red><SUP>*</SUP></FONT></TD>
<td class="cell_padding"><strong>Rank</td>
<td class="cell_padding"><strong>Start Year End Year</td>
<td class="cell_padding"><strong>Current</strong></td>
</tr>
%(institutions_html)s
</table>
<table><tr>
<a href="javascript:addRow();"> Click to add new Institution field row
<img src="/img/rightarrow.gif" ></a></tr></table>
<hr>
<table class="form2"><tbody><tr>
<TD><span class="span_float_right"><STRONG>Ph.D. Advisor</STRONG></span></TD>
<TD><span class="span_float_left"><INPUT SIZE=24 value=%(phd_advisor)s name=Advisor1> <FONT SIZE=2>E.G.
Beacom, John Francis</FONT> </span></TD></TR>
<tr><TD><span class="span_float_right"><STRONG>2nd Ph.D. Advisor</STRONG></span></TD>
<TD><span class="span_float_left"><INPUT SIZE=24 value=%(phd_advisor2)s name=Advisor2> <FONT SIZE=2>E.G.
Beacom, John Francis</FONT> </span></TD></TR>
<TD><span class="span_float_right"><STRONG>Experiments</STRONG></span>
<br /><span class="span_float_right"><FONT size=2>Hold the Control key to choose multiple current or past experiments <br> Experiments not listed can be added in the Comments field below </font></span></td>
<td><span class="span_float_left">
%(experiments_html)s
</span></td></tr>
<TR>
<TD><span class="span_float_right"><STRONG>Your web page</STRONG></span></TD>
<TD><span class="span_float_left"><INPUT SIZE=50 value=%(web)s name= URL></span></TD></TR>
<TR>
<TD><span class="span_float_right">Please send us your <STRONG>Comments</STRONG></span></td>
<TD><span class="span_float_left"><TEXTAREA NAME=Abstract ROWS=3 COLS=30></textarea><FONT SIZE=2>(not displayed)</FONT></span></TD></TR>
<tr><TD> <span class="span_float_right"><font size="1">SPAM Robots have been sending us submissions via this form, in order to prevent this we ask that you confirm that you are a real person by answering this question, which should be
easy for you, and hard for a SPAM robot. Cutting down on the extraneous submissions we get means that we can handle real requests faster.</font></span></td><td><span class="span_float_left">
<script type="text/javascript" src="http://www.slac.stanford.edu/spires/hepnames/spbeat.js">
</SCRIPT><br /><STRONG> How many people in image</STRONG> <SELECT NAME=beatspam ID=beatspam> <OPTION VALUE=""> </OPTION>
<option value="1"> one person</option>
<option value="2"> two people</option><option value="3"> three people</option>
<option value="4"> more than three</option></select></span></td></tr>
</TBODY></TABLE><INPUT type=submit class="formbutton" value="Send Request"><br /><FONT
color=red><SUP>*</SUP></FONT>Institution name should be in the form given
in the <A href="http://inspirehep.net/Institutions"
target=_TOP>INSTITUTIONS</A> database<BR>(e.g. Harvard U. * Paris U.,
VI-VII * Cambridge U., DAMTP * KEK, Tsukuba). </FORM>
"""% {'full_name': xml.sax.saxutils.quoteattr(full_name),
'display_name': xml.sax.saxutils.quoteattr(display_name),
'email': xml.sax.saxutils.quoteattr(email),
'email_public': xml.sax.saxutils.quoteattr(email),
'phd_advisor': xml.sax.saxutils.quoteattr(phd_advisor),
'phd_advisor2': xml.sax.saxutils.quoteattr(phd_advisor2),
'web': xml.sax.saxutils.quoteattr(web_page),
'is_active': is_active,
'is_retired': is_retired,
'is_departed': is_departed,
'is_deceased': is_deceased,
'research_field_html': research_field_html,
'institutions_html': institutions_html,
'experiments_html' : experiments_html
})
return "\n".join(html)
# pylint: enable=C0301
def loading_html(self):
return '<img src=/img/ui-anim_basic_16x16.gif> Loading...'
def tmpl_personnametitle(self, person_info, ln, loading=False):
_ = gettext_set_language(ln)
if loading:
html_header = '<span id="personnametitle">' + self.loading_html() + '</span>'
else:
if not person_info['name']:
display_name = " Name not available"
else:
display_name = str(person_info['name']) + ' (' + str(person_info['canonical_name']) + ')'
html_header = ('<h1><span id="personnametitle">%s</span></h1>'
% (display_name))
return html_header
def tmpl_profile_management(self, ln, person_data, arxiv_data, orcid_data, claim_paper_data,
int_ids_data, ext_ids_data, autoclaim_data, support_data,
merge_data, hepnames_data):
'''
SSO landing/manage profile page.
'''
html = list()
html_arxiv = self.tmpl_arxiv_box(arxiv_data, ln, loading=False)
html_orcid = self.tmpl_orcid_box(orcid_data, ln, loading=False)
html_claim_paper = self.tmpl_claim_paper_box(claim_paper_data, ln, loading=False)
if ext_ids_data:
html_ext_ids = self.tmpl_ext_ids_box(person_data['pid'], int_ids_data, ext_ids_data, ln, loading=False)
html_autoclaim = self.tmpl_autoclaim_box(autoclaim_data, ln, loading=True)
html_support = self.tmpl_support_box(support_data, ln, loading=False)
html_merge = self.tmpl_merge_box(merge_data, ln, loading=False)
html_hepnames = self.tmpl_hepnames_box(hepnames_data, ln, loading=False)
g = self._grid
left_side_elements = list()
left_side_elements.append(g(1, 1, cell_padding=5)(html_arxiv))
left_side_elements.append(g(1, 1, cell_padding=5)(html_claim_paper))
if not autoclaim_data['hidden']:
left_side_elements.append(g(1, 1, cell_padding=5)(html_autoclaim))
left_len = len(left_side_elements)
left_side = g(left_len, 1)(*left_side_elements)
right_side_elements = list()
right_side_elements.append(g(1, 1, cell_padding=5)(html_orcid))
if ext_ids_data:
right_side_elements.append(g(1, 1, cell_padding=5)(html_ext_ids))
right_side_elements.append(g(1, 1, cell_padding=5)(html_hepnames))
right_side_elements.append(g(1, 1, cell_padding=5)(html_merge))
right_side_elements.append(g(1, 1, cell_padding=5)(html_support))
right_len = len(right_side_elements)
right_side = g(right_len, 1)(*right_side_elements)
page = g(1, 2)(left_side, right_side)
html.append(page)
return ' '.join(html)
def tmpl_print_searchresultbox(self, bid, header, body):
""" Print a nicely formatted box for search results. """
# first find total number of hits:
out = ('<table class="searchresultsbox" ><thead><tr><th class="searchresultsboxheader">'
+ header + '</th></tr></thead><tbody><tr><td id ="%s" class="searchresultsboxbody">' % bid
+ body + '</td></tr></tbody></table>')
return out
def tmpl_arxiv_box(self, arxiv_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _("""<span title="Login through arXiv is needed to verify this is your profile. When you log in your publication list will automatically update with all your arXiv publications.
You may also continue as a guest. In this case your input will be processed by our staff and will take longer to display."><strong> Login with your arXiv.org account </strong></span>""")
if arxiv_data['login'] == True:
if arxiv_data['view_own_profile'] == True:
html_arxiv = _("You have succesfully logged in via arXiv. </br> You can now manage your profile.</br>")
elif arxiv_data['user_has_pid']:
html_arxiv = _("You have succesfully logged in via arXiv.</br><div> <font color='red'>However the profile you are viewing is not your profile.</br></br></font>")
own_profile_link = "%s/author/manage_profile/%s" % (CFG_SITE_URL, arxiv_data['user_pid'])
own_profile_text = _("Manage your profile")
html_arxiv += '<span class=\"bsw\"><a rel="nofollow" href="%s" class="btn">%s</a></span>' % (own_profile_link, own_profile_text)
else:
html_arxiv = _("You have succesfully logged in, but </br><div><font color='red'> you are not associated to a person yet. Please use the button below to choose your profile </br></br></font>")
login_link = '%s/author/choose_profile' % CFG_SITE_URL
login_text = _("Choose your profile")
html_arxiv += '<br><span class=\"bsw\"><a rel="nofollow" href="%s" class="btn">%s</a></span>' % (login_link, login_text)
else:
html_arxiv = _("Please log in through arXiv to manage your profile.</br>")
login_link = "https://arxiv.org/inspire_login"
login_text = _("Login into Inspire through arXiv.org")
html_arxiv += '<br><span class=\"bsw\"><a rel="nofollow" href="%s" class="btn">%s</a></span>' % (login_link, login_text)
if loading:
html_arxiv = self.loading_html()
if add_box:
arxiv_box = self.tmpl_print_searchresultbox('arxiv', html_head, html_arxiv)
return arxiv_box
else:
return html_arxiv
def tmpl_orcid_box(self, orcid_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _(""" <span title="ORCiD (Open Researcher and Contributor ID) is a unique researcher identifier that distinguishes you from other researchers.
It holds a record of all your research activities. You can add your ORCiD to all your works to make sure they are associated with you. ">
<strong> Connect this profile to an ORCiD </strong> <span>""")
html_orcid = ""
if orcid_data['orcids']:
html_orcid += _('This profile is already connected to the following ORCiD: <strong>%s</strong></br>' % (",".join(orcid_data['orcids']),))
if orcid_data['arxiv_login'] and orcid_data['own_profile']:
html_orcid += '<br><span class=\"bsw\"><a rel="nofollow" href="%s" class="btn">%s</a></span>' % ("%s/author/manage_profile/import_orcid_pubs" % CFG_SITE_SECURE_URL, _("Import your publications from ORCID") )
html_orcid += '<br><br><span class=\"bsw\"><a rel="nofollow" href="http://orcid.org/%s" class="btn">%s</a></span>' % (orcid_data['orcids'][0], _("Visit your profile in ORCID") )
else:
html_orcid += "This profile has not been connected to an ORCiD account yet. "
if orcid_data['arxiv_login'] and (orcid_data['own_profile'] or orcid_data['add_power']):
add_link = "%s/youraccount/oauth2?provider=%s" % (CFG_SITE_URL, 'orcid')
add_text = _("Connect an ORCiD to this profile")
html_orcid += '<br><br><span class=\"bsw\"><a rel="nofollow" href="%s" class="btn">%s</a></span>' % (add_link, add_text)
else:
suggest_text = _("Suggest an ORCiD for this profile:")
html_orcid += '<br><br><span class=\"bsw\"> %s <br> <br>' % suggest_text
html_orcid += '<form class="form-inline"><div class="input-append"><input class="input-xlarge" id="suggested_orcid" type="text">'
html_orcid += ' <a id="orcid_suggestion" class="btn" href="#">'
html_orcid += '<span class="pid hidden">%s</span>%s</a></div></form>' % (orcid_data['pid'], 'Submit Suggestion')
#html_orcid += '<form method="GET" action="%s/author/manage_profile/suggest_orcid" rel="nofollow">' % CFG_SITE_URL
#html_orcid += '<input name="orcid" id="orcid" type="text" style="border:1px solid #333; width:300px;"/>'
#html_orcid += '<input type="hidden" name="pid" value="%s">' % orcid_data['pid']
#html_orcid += '<input type="submit" class="btn" value="%s"> </form>' % ('Submit suggestion',)
#html_orcid += '<a rel="nofollow" href="%s" class="btn">%s</a>' % (suggest_link, suggest_text)
html_orcid += '</span>'
if loading:
html_orcid = self.loading_html()
if add_box:
orcid_box = self.tmpl_print_searchresultbox('orcid', html_head, html_orcid)
return orcid_box
else:
return html_orcid
def tmpl_claim_paper_box(self, claim_paper_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _("""<span title="When you add more publications you make sure your publication list and citations appear correctly on your profile.
You can also assign publications to other authors. This will help INSPIRE provide more accurate publication and citation statistics. "><strong> Manage publications </strong><span>""")
html_claim_paper = ("")
link = "%s/author/claim/%s?open_claim=True" % (CFG_SITE_URL, claim_paper_data['canonical_id'])
text = _("Manage publication list")
html_claim_paper += 'Assign publications to your INSPIRE profile to keep it up to date. </br></br> <span class=\"bsw\"><a rel="nofollow" href="%s" class="btn">%s</a></span>' % (link, text)
if loading:
html_claim_paper = self.loading_html()
if add_box:
claim_paper_box = self.tmpl_print_searchresultbox('claim_paper', html_head, html_claim_paper)
return claim_paper_box
else:
return html_claim_paper
def tmpl_ext_ids_box(self, personid, int_ids_data, ext_ids_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _("<strong> Person identifiers, internal and external </strong>")
html_ext_ids = 'This is personID: %s <br>' % personid
html_ext_ids += '<span class=\"bsw\"><div> <strong> External ids: </strong><br>'
# if the user has permission to add/remove ids, in other words if the profile is his or he is admin
if ext_ids_data['person_id'] == ext_ids_data['user_pid'] or ext_ids_data['ulevel'] == "admin":
add_text = _('add external id')
add_parameter = 'add_external_id'
remove_text = _('delete selected ids')
remove_parameter = 'delete_external_ids'
add_missing_text = _('Harvest missing external ids from claimed papers')
add_missing_parameter = 'add_missing_external_ids'
else:
add_text = _('suggest external id to add')
add_parameter = 'suggest_external_id_to_add'
remove_text = _('suggest selected ids to delete')
remove_parameter = 'suggest_external_ids_to_delete'
add_missing_text = _('suggest missing ids')
add_missing_parameter = 'suggest_missing_external_ids'
html_ext_ids += '<form method="GET" action="%s/author/claim/action" rel="nofollow">' % (CFG_SITE_URL)
html_ext_ids += '<input type="hidden" name="%s" value="True">' % (add_missing_parameter,)
html_ext_ids += '<input type="hidden" name="pid" value="%s">' % ext_ids_data['person_id']
html_ext_ids += '<br> <input type="submit" class="btn" value="%s"> </form>' % (add_missing_text,)
if 'ext_ids' in ext_ids_data and ext_ids_data['ext_ids']:
html_ext_ids += '<form method="GET" action="%s/author/claim/action" rel="nofollow">' % (CFG_SITE_URL)
html_ext_ids += ' <input type="hidden" name="%s" value="True">' % (remove_parameter,)
html_ext_ids += ' <input type="hidden" name="pid" value="%s">' % ext_ids_data['person_id']
for key in ext_ids_data['ext_ids']:
try:
sys = [system for system in PERSONID_EXTERNAL_IDENTIFIER_MAP if PERSONID_EXTERNAL_IDENTIFIER_MAP[system] == key][0]
except (IndexError):
sys = ''
for id_value in ext_ids_data['ext_ids'][key]:
html_ext_ids += '<br> <input type="checkbox" name="existing_ext_ids" value="%s||%s"> <strong> %s: </strong> %s' % (key, id_value, sys, id_value)
html_ext_ids += ' <br> <br> <input type="submit" class="btn" value="%s"> <br> </form>' % (remove_text,)
else:
html_ext_ids += 'UserID: There are no external users associated to this profile!'
html_ext_ids += '<br> <br>'
html_ext_ids += '<form method="GET" action="%s/author/claim/action" rel="nofollow">' % (CFG_SITE_URL)
html_ext_ids += ' <input type="hidden" name="%s" value="True">' % (add_parameter,)
html_ext_ids += ' <input type="hidden" name="pid" value="%s">' % ext_ids_data['person_id']
html_ext_ids += ' <select name="ext_system">'
html_ext_ids += ' <option value="" selected>-- ' + self._('Choose system') + ' --</option>'
for el in PERSONID_EXTERNAL_IDENTIFIER_MAP:
html_ext_ids += ' <option value="%s"> %s </option>' % (PERSONID_EXTERNAL_IDENTIFIER_MAP[el], el)
html_ext_ids += ' </select>'
html_ext_ids += ' <input type="text" name="ext_id" id="ext_id" style="border:1px solid #333; width:350px;">'
html_ext_ids += ' <input type="submit" class="btn" value="%s" >' % (add_text,)
# html_ext_ids += '<br>NOTE: please note that if you add an external id it will replace the previous one (if any).')
html_ext_ids += '<br> </form> </div></span>'
html_ext_ids += '<br> <div> <strong> Inspire user ID: </strong> <br>'
html_ext_ids += "Current user id: %s <br>" % repr(int_ids_data['uid'])
html_ext_ids += "Previous user ids: %s <br> " % repr(int_ids_data['old_uids'])
html_ext_ids += '<br>'
html_ext_ids += '<form method="GET" action="%s/author/claim/action" rel="nofollow">' % (CFG_SITE_URL)
html_ext_ids += ' <input type="text" name="uid" id="uid" style="border:1px solid #333; width:350px;">'
html_ext_ids += ' <input type="hidden" name="%s" value="True">' % ('set_uid',)
html_ext_ids += ' <input type="hidden" name="pid" value="%s">' % ext_ids_data['person_id']
html_ext_ids += ' <input type="submit" class="btn" value="%s"> </form>' % ('Set (steal!) user id',)
html_ext_ids += '</div>'
if loading:
html_ext_ids += self.loading_html()
if add_box:
ext_ids_box = self.tmpl_print_searchresultbox('external_ids', html_head, html_ext_ids)
return ext_ids_box
else:
return html_ext_ids
# for ajax requests add_box and loading are false
def tmpl_autoclaim_box(self, autoclaim_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = None
if autoclaim_data['hidden']:
return None
html_head = _("""<span title="You don’t need to add all your publications one by one.
This list contains all your publications that were automatically assigned to your INSPIRE profile through arXiv and ORCiD. "><strong> Automatically assigned publications </strong> </span>""")
if loading:
if autoclaim_data['num_of_claims'] == 0:
html_autoclaim = ''
else:
html_autoclaim = _("<span id=\"autoClaimMessage\">Please wait as we are assigning %s papers from external systems to your"
" Inspire profile</span></br>"% (str(autoclaim_data["num_of_claims"])))
html_autoclaim += self.loading_html();
else:
html_autoclaim = ''
# FIXME: unsuccesful claims temporarily hidden until autoclaim ticket implementation is finished
# if "unsuccessfull_recids" in autoclaim_data.keys() and autoclaim_data["unsuccessfull_recids"]:
#
# message = ''
# if autoclaim_data["num_of_unsuccessfull_recids"] > 1:
# message = _("The following %s publications need your review before they can be assigned to your profile:" % (str(autoclaim_data["num_of_unsuccessfull_recids"]),))
# else:
# message = _("The following publications need your review before they can be assigned to your profile:")
# html_autoclaim += "<br><span id=\"autoClaimUnSuccessMessage\">%s</span></br>"% (message,)
# html_autoclaim += '<div style="border:2px;height:100px;overflow:scroll;overflow-y:auto;overflow-x:auto;">'
# html_autoclaim += '<br><strong>Publication title</strong> <ol type="1"> <br>'
# for rec in autoclaim_data['unsuccessfull_recids']:
# html_autoclaim += '<li> <a href="%s/record/%s"> <b> ' % (CFG_SITE_URL, rec) + autoclaim_data['recids_to_external_ids'][rec] + '</b></a></li>\n'
# html_autoclaim += '</ol><br>\n</div>'
#
# link = "%s/author/claim/action?confirm=True&pid=%s&autoclaim_show_review=True" % (CFG_SITE_URL, autoclaim_data['person_id'])
# text = _("Review assigning")
# html_autoclaim += '<br><span class=\"bsw\"><a rel="nofollow" href="%s" class="btn">%s</a></span><br><br>' % (link, text)
if "successfull_recids" in autoclaim_data.keys() and autoclaim_data["successfull_recids"]:
message = _('The following publications have been successfully assigned to your profile:')
html_autoclaim += "<span id=\"autoClaimSuccessMessage\">%s</span><br>" % (message,)
html_autoclaim += '<div style="border:2px;height:300px;overflow:scroll;overflow-y:auto;overflow-x:auto;">'
html_autoclaim += '<br><strong>Publication title</strong> <ol type="1" style="padding-left:20px"> <br>'
for rec in autoclaim_data['successfull_recids']:
html_autoclaim += '<li> <a href="%s/record/%s"> <b> ' % (CFG_SITE_URL, rec) + autoclaim_data['recids_to_external_ids'][rec] + '</b></a></li>\n'
html_autoclaim += '</ol><br>\n</div>'
if not html_autoclaim:
html_autoclaim = 'There are no publications to be automatically assigned'
if add_box:
autoclaim_box = self.tmpl_print_searchresultbox('autoclaim', html_head, html_autoclaim)
return autoclaim_box
else:
return html_autoclaim
def tmpl_support_box(self, support_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
help_link = "%s/author/help" % (CFG_SITE_URL)
help_text = _("Get help!")
html_head = _("<strong> Contact </strong>")
html_support = _("Please contact our user support in case you need help or you just want to suggest some new ideas. We will get back to you. </br>")
html_support += '<br><span class=\"bsw\"><a rel="nofollow" href="%s" class="btn">%s</a></span>' % (help_link, help_text)
if loading:
html_support = self.loading_html()
if add_box:
support_box = self.tmpl_print_searchresultbox('support', html_head, html_support)
return support_box
else:
return html_support
def tmpl_merge_box(self, merge_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _("""<span title="It sometimes happens that somebody's publications are scattered among two or more profiles for various reasons
(different spelling, change of name, multiple people with the same name). You can merge a set of profiles together.
This will assign all the information (including publications, IDs and citations) to the profile you choose as a primary profile.
After the merging only the primary profile will exist in the system and all others will be automatically deleted. "><strong> Merge profiles </strong><span>""")
html_merge = _("If your or somebody else's publications in INSPIRE exist in multiple profiles, you can fix that here. </br>")
merge_link = "%s/author/merge_profiles?search_param=%s&primary_profile=%s" % (CFG_SITE_URL, merge_data['search_param'], merge_data['canonical_id'])
merge_text = _("Merge profiles")
html_merge += '<br><span class=\"bsw\"><a rel="nofollow" href="%s" class="btn">%s</a></span>' % (merge_link, merge_text)
if loading:
html_merge = self.loading_html()
if add_box:
merge_box = self.tmpl_print_searchresultbox('merge', html_head, html_merge)
return merge_box
else:
return html_merge
def tmpl_hepnames_box(self, hepnames_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
if not loading:
try:
heprec = str(hepnames_data['heprecord'][0])
except (TypeError, KeyError, IndexError):
heprec = ''
if hepnames_data['HaveHep']:
contents = hepnames_data['heprecord']
else:
contents = ''
if not hepnames_data['HaveChoices']:
contents += ("There is no HepNames record associated with this profile. "
"<a href='http://slac.stanford.edu/spires/hepnames/additions.shtml'> Create a new one! </a> <br>"
"The new HepNames record will be visible and associated <br> to this author "
"after manual revision, usually within a few days.")
else:
#<a href="mailto:address@domain.com?subject=title&body=something">Mail Me</a>
contents += ("There is no unique HepNames record associated "
"with this profile. <br> Please tell us if you think it is one of "
"the following, or <a href='http://slac.stanford.edu/spires/hepnames/additions.shtml'> Create a new one! </a> <br>"
"<br><br> Possible choices are: ")
#mailbody = ("Hello! Please connect the author profile %s "
# "with the HepNames record %s. Best regards" % (hepnames_data['cid'], '%s'))
#mailstr = '<form method="GET" action="%s/author/manage_profile/connect_author_with_hepname" rel="nofollow">' \
# '<input type="hidden" name="cname" value="%s">' \
# '<input type="hidden" name="hepname" value="%s">' \
# '<input type="submit" class="btn" value="%s"> </form>' % (CFG_SITE_URL, hepnames_data['cid'], '%s', 'This is the right one!',)
#mailstr = ('''<class="choose_hepname" cname="%s" hepname_rec=%s> This is the right one! </class="choose_hepname">''' % (hepnames_data['cid'], '%s'))
#mailstr = ('''<a href='mailto:%s?subject=HepNames record match: %s %s&body=%s'>'''
# '''This is the right one!</a>''' % ('%s', hepnames_data['cid'], heprec, '%s'))
mailstr = ('''<a id="hepname_connection" class="btn" href="#"><span class="cname hidden">%s</span><span class="hepname hidden">%s</span>%s</a>''' % (hepnames_data['cid'], '%s', 'This is the right one!'))
choices = ['<tr><td>' + x[0] + '</td><td> </td><td align="right">' + mailstr % x[1] + '</td></tr>'
for x in hepnames_data['HepChoices']]
contents += '<table>' + ' '.join(choices) + '</table>'
else:
contents = self.loading_html()
if not add_box:
return contents
else:
return self.tmpl_print_searchresultbox('hepdata', '<strong> HepNames data </strong>', contents)
def tmpl_open_table(self, width_pcnt=False, cell_padding=False, height_pcnt=False):
options = []
if height_pcnt:
options.append('height=%s' % height_pcnt)
if width_pcnt:
options.append('width=%s' % width_pcnt)
else:
options.append('width=100%')
if cell_padding:
options.append('cellpadding=%s' % cell_padding)
else:
options.append('cellpadding=0')
return '<table border=0 %s >' % ' '.join(options)
def tmpl_close_table(self):
return "</table>"
def tmpl_open_row(self):
return "<tr>"
def tmpl_close_row(self):
return "</tr>"
def tmpl_open_col(self):
return "<td valign='top'>"
def tmpl_close_col(self):
return "</td>"
def _grid(self, rows, cols, table_width=False, cell_padding=False):
tmpl = self
def cont(*boxes):
out = []
h = out.append
idx = 0
h(tmpl.tmpl_open_table(width_pcnt=table_width, cell_padding=cell_padding))
for _ in range(rows):
h(tmpl.tmpl_open_row())
for _ in range(cols):
h(tmpl.tmpl_open_col())
h(boxes[idx])
idx += 1
h(tmpl.tmpl_close_col())
h(tmpl.tmpl_close_row())
h(tmpl.tmpl_close_table())
return '\n'.join(out)
return cont
def tmpl_message_form(self, last_page_visited, name_to_prefill, email_to_prefill, incomplete_params):
html = []
h = html.append
#h('<div style="display: block; width: 600px; text-align: left;">')
h('<div style="width:100%; minheight: 500px;">')
h( '<div style="background-color: #F1F1FA; display: table; border-radius: 10px; padding: 20px; color: #3366CC; font: Helvetica 12pt;border: 1px solid black; margin: 0px auto;">')
h( '<div align="center">')
h( '<p style="font-size: 20px; font-weight: bold;"> Get help!</p>')
h( '<p style="font-size: 14px; font-weight: bold;"> Write here on any issue, suggestions or technical request.</p>')
if incomplete_params:
h( '<p style="font-size: 14px; font-weight: bold;"> <font color="red">Please fill the forms correctly!</font></p>')
h( '</div>')
h( '<form action="%s/author/claim/action" method="post">' % ( CFG_SITE_URL, ))
h( '<fieldset style="border: 0; display: inline-block;">')
h( '<p><label for="Name"> Name: </label><input style="float: right; border-radius: 4px;" required="True" name="Name" value="%s" type="text" size="40"></p>' % (name_to_prefill))
h( '<p><label for="E-mail"> E-mail: </label><input style="float: right; border-radius: 4px;" name="E-mail" value="%s" type="email" size="40"></p>'
% (email_to_prefill))
h( '<input type="hidden" name="last_page_visited" value="%s" />' % (str(last_page_visited),))
h( '<p>Comment:</p>')
h( '<p><textarea style="max-width:500px; min-width:500px; min-height:300px; border-radius: 4px;" name="Comment" cols="60" rows="5" required="True" id="Comment"></textarea></p>')
h( '</fieldset>')
h( '<button class="aid_btn_blue" style="display: block; margin: 0 auto;" type="submit" name="send_message">Submit</button>')
h( '</form>')
h( '</div>')
h('</div>')
return ' '.join(html)
# pylint: enable=C0301
def tmpl_help_page(self):
helphtml = """
<iframe src="/img/bibauthorid-help.html" seamless style="width:100%;height:6000px"></iframe>
"""
return helphtml
verbiage_dict = {'guest': {'confirmed': 'Papers',
'repealed': 'Papers removed from this profile',
'review': 'Papers in need of review',
'tickets': 'Open Tickets', 'data': 'Data',
'confirmed_ns': 'Papers of this Person',
'repealed_ns': 'Papers _not_ of this Person',
'review_ns': 'Papers in need of review',
'tickets_ns': 'Tickets for this Person',
'data_ns': 'Additional Data for this Person'},
'user': {'owner': {'confirmed': 'Your papers',
'repealed': 'Not your papers',
'review': 'Papers in need of review',
'tickets': 'Your tickets', 'data': 'Data',
'confirmed_ns': 'Your papers',
'repealed_ns': 'Not your papers',
'review_ns': 'Papers in need of review',
'tickets_ns': 'Your tickets',
'data_ns': 'Additional Data for this Person'},
'not_owner': {'confirmed': 'Papers',
'repealed': 'Papers removed from this profile',
'review': 'Papers in need of review',
'tickets': 'Your tickets', 'data': 'Data',
'confirmed_ns': 'Papers of this Person',
'repealed_ns': 'Papers _not_ of this Person',
'review_ns': 'Papers in need of review',
'tickets_ns': 'Tickets you created about this person',
'data_ns': 'Additional Data for this Person'}},
'admin': {'confirmed': 'Papers',
'repealed': 'Papers removed from this profile',
'review': 'Papers in need of review',
'tickets': 'Tickets', 'data': 'Data',
'confirmed_ns': 'Papers of this Person',
'repealed_ns': 'Papers _not_ of this Person',
'review_ns': 'Papers in need of review',
'tickets_ns': 'Request Tickets',
'data_ns': 'Additional Data for this Person'}}
buttons_verbiage_dict = {'guest': {'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'Yes, those papers are by this person.',
'b_repeal': 'No, those papers are not by this person',
'b_to_others': 'Assign to other person',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Confirm!',
'confirm_text': 'Yes, this paper is by this person.',
'alt_repeal': 'Rejected!',
'repeal_text': 'No, this paper is <i>not</i> by this person',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_confirmed': {'alt_confirm': 'Confirmed.',
'confirm_text': 'Marked as this person\'s paper',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repeal!',
'repeal_text': 'But it\'s <i>not</i> this person\'s paper.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_repealed': {'alt_confirm': 'Confirm!',
'confirm_text': 'But it <i>is</i> this person\'s paper.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repealed',
'repeal_text': 'Marked as not this person\'s paper',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'}},
'user': {'owner': {'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'These are mine!',
'b_repeal': 'These are not mine!',
'b_to_others': 'It\'s not mine, but I know whose it is!',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Mine!',
'confirm_text': 'This is my paper!',
'alt_repeal': 'Not mine!',
'repeal_text': 'This is not my paper!',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_confirmed': {'alt_confirm': 'Not Mine.',
'confirm_text': 'Marked as my paper!',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget assignment decision',
'alt_repeal': 'Not Mine!',
'repeal_text': 'But this is not mine!',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_repealed': {'alt_confirm': 'Mine!',
'confirm_text': 'But this is my paper!',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision!',
'alt_repeal': 'Not Mine!',
'repeal_text': 'Marked as not your paper.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'}},
'not_owner': {'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'Yes, those papers are by this person.',
'b_repeal': 'No, those papers are not by this person',
'b_to_others': 'Assign to other person',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Confirm!',
'confirm_text': 'Yes, this paper is by this person.',
'alt_repeal': 'Rejected!',
'repeal_text': 'No, this paper is <i>not</i> by this person',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_confirmed': {'alt_confirm': 'Confirmed.',
'confirm_text': 'Marked as this person\'s paper',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repeal!',
'repeal_text': 'But it\'s <i>not</i> this person\'s paper.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_repealed': {'alt_confirm': 'Confirm!',
'confirm_text': 'But it <i>is</i> this person\'s paper.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repealed',
'repeal_text': 'Marked as not this person\'s paper',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'}}},
'admin': {'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'Yes, those papers are by this person.',
'b_repeal': 'No, those papers are not by this person',
'b_to_others': 'Assign to other person',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Confirm!',
'confirm_text': 'Yes, this paper is by this person.',
'alt_repeal': 'Rejected!',
'repeal_text': 'No, this paper is <i>not</i> by this person',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_confirmed': {'alt_confirm': 'Confirmed.',
'confirm_text': 'Marked as this person\'s paper',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repeal!',
'repeal_text': 'But it\'s <i>not</i> this person\'s paper.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_repealed': {'alt_confirm': 'Confirm!',
'confirm_text': 'But it <i>is</i> this person\'s paper.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repealed',
'repeal_text': 'Marked as not this person\'s paper',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'}}}
| jmartinm/invenio | modules/bibauthorid/lib/bibauthorid_templates.py | Python | gpl-2.0 | 180,706 | [
"VisIt"
] | 2ac70309bacd3b9b771b0461848b219c5f74d0522b1ff9a4151dd2178f45bebc |
#
# Copyright 2021 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
from collections import defaultdict
from datetime import datetime
from subprocess import Popen, PIPE
root = os.path.dirname(sys.argv[0])
def read_authors(fn):
return {email.strip('<>'): name for name, email in
[line.rsplit(maxsplit=1) for line in open(fn, 'r')]}
def parse_git_log(log, authors):
committers = defaultdict(set)
author = None
date = None
for line in log.decode('latin1').split('\n'):
if line.startswith('commit'):
if date is not None and author is not None:
committers[author].add(date.year)
elif line.startswith('Author:'):
email = line.rsplit('<', maxsplit=1)[1][:-1]
elif line.startswith('Date:'):
date = datetime.strptime(line[5:].rsplit(maxsplit=1)[0].strip(),
'%a %b %d %H:%M:%S %Y')
try:
author = authors[email]
except KeyError:
author = email
elif 'copyright' in line.lower() or 'license' in line.lower():
date = None
if date is not None:
committers[author].add(date.year)
return committers
def pretty_years(years):
def add_to_year_string(s, pprev_year, prev_year):
if pprev_year == prev_year:
# It is a single year
if s is None:
return f'{prev_year}'
else:
return f'{s}, {prev_year}'
else:
# It is a range
if s is None:
return f'{pprev_year}-{prev_year}'
else:
return f'{s}, {pprev_year}-{prev_year}'
years = sorted(years)
prev_year = pprev_year = years[0]
s = None
for year in years[1:]:
if year - prev_year > 1:
s = add_to_year_string(s, pprev_year, prev_year)
pprev_year = year
prev_year = year
return add_to_year_string(s, pprev_year, prev_year)
authors = read_authors('{}/../AUTHORS'.format(root))
process = Popen(['git', 'log', '--follow', sys.argv[1]], stdout=PIPE,
stderr=PIPE)
stdout, stderr = process.communicate()
committers = parse_git_log(stdout, authors)
prefix = 'Copyright'
for name, years in committers.items():
print('{} {} {}'.format(prefix, pretty_years(years), name))
prefix = ' ' * len(prefix)
print()
| libAtoms/matscipy | maintenance/copyright.py | Python | lgpl-2.1 | 3,148 | [
"Matscipy"
] | 4f63ec5fc2014a9ec1de7e6705b1d69dd964849c845135b5456fcbf2803acff4 |
# -*- coding: utf-8 -*-
#
# CleanerVersion documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 22 13:37:08 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
import cleanerversion
sys.path.insert(len(sys.path), os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CleanerVersion'
copyright = u'2014, Jean-Christophe Zulian, Brian King, Andrea Marcacci, ' \
u'Manuel Jeckelmann'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cleanerversion.get_version(2)
# The full version, including alpha/beta/rc tags.
release = cleanerversion.get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CleanerVersionDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CleanerVersion.tex', u'CleanerVersion Documentation',
u'Jean-Christophe Zulian, Brian King, Andrea Marcacci, Manuel Jeckelmann',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index',
'cleanerversion',
u'CleanerVersion Documentation',
[u'Jean-Christophe Zulian, Brian King, Andrea Marcacci, '
u'Manuel Jeckelmann'],
1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'CleanerVersion',
u'CleanerVersion Documentation',
u'Jean-Christophe Zulian, Brian King, Andrea Marcacci, Manuel Jeckelmann',
'CleanerVersion',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'django': ('https://docs.djangoproject.com/en/dev/',
'https://docs.djangoproject.com/en/dev/_objects/')
}
| swisscom/cleanerversion | docs/conf.py | Python | apache-2.0 | 9,124 | [
"Brian"
] | bcbfa5104d748c246b452ec6afe3aef2d19a584d30975f4f7790c7ac45bc6448 |
""" StateMachine
This module contains the basic blocks to build a state machine ( State and
StateMachine ). And the RSS implementation of it, using its own states map.
"""
from DIRAC import S_OK, S_ERROR
__RCSID__ = '$Id: $'
class State( object ):
"""
State class that represents a single step on a StateMachine, with all the
possible transitions, the default transition and an ordering level.
"""
def __init__( self, level, stateMap = list(), defState = None ):
"""
Constructor.
examples:
>>> s0 = State( 100 )
>>> s1 = State( 0, [ 'StateName1', 'StateName2' ], defState = 'StateName1' )
>>> s2 = State( 0, [ 'StateName1', 'StateName2' ] )
# this example is tricky. The transition rule says that will go to
# nextState, e.g. 'StateNext'. But, it is not on the stateMap, and there
# is no default defined, so it will end up going to StateNext anyway. You
# must be careful while defining states and their stateMaps and defaults.
:Parameters:
**level** - `int`
each state is mapped to an integer, which is used to sort the states
according to that integer.
**stateMap** - `list`
it is a list ( of strings ) with the reachable states from this particular
status. If not defined, we assume there are no restrictions.
**defState** - [ None, `str` ]
default state used in case the next state it is not stateMap ( not defined
or simply not there ).
"""
self.level = level
self.stateMap = stateMap
self.default = defState
def transitionRule( self, nextState ):
"""
Method that selects next state, knowing the default and the transitions
map, and the proposed next state. If <nextState> is in stateMap, goes there.
If not, then goes to <self.default> if any. Otherwise, goes to <nextState>
anyway.
examples:
>>> s0.transitionRule( 'nextState' )
'nextState'
>>> s1.transitionRule( 'StateName2' )
'StateName2'
>>> s1.transitionRule( 'StateNameNotInMap' )
'StateName1'
>>> s2.transitionRule( 'StateNameNotInMap' )
'StateNameNotInMap'
:Parameters:
**nextState** - `string`
name of the state in the stateMap
:return: `str` ( state name )
"""
#If next state is on the list of next states, go ahead.
if nextState in self.stateMap:
return nextState
#If not, calculate defaultState:
# if there is a default, that one
# otherwise is nextState ( states with empty list have no movement restrictions )
defaultNext = ( 1 and self.default ) or nextState
return defaultNext
class StateMachine( object ):
"""
StateMachine class that represents the whole state machine with all transitions.
"""
def __init__( self, state = None ):
"""
Constructor.
examples:
>>> sm0 = StateMachine()
>>> sm1 = StateMachine( state = 'Active' )
:Parameters:
**state** - [ None, `str`]
current state of the StateMachine, could be None if we do not use the
StateMachine to calculate transitions. Beware, it is not checked if the
state is on the states map !
"""
self.state = state
# To be overwritten by child classes, unless you like Nirvana state that much.
self.states = { 'Nirvana' : State( 100 ) }
def levelOfState( self, state ):
"""
Given a state name, it returns its level ( integer ), which defines the hierarchy.
>>> sm0.levelOfState( 'Nirvana' )
100
>>> sm0.levelOfState( 'AnotherState' )
-1
:Parameters:
**state** - `str`
name of the state, it should be on <self.states> key set
:return: `int` || -1 ( if not on <self.states> )
"""
if not state in self.states:
return -1
return self.states[ state ].level
def setState( self, state ):
"""
Makes sure the state is either None or known to the machine
examples:
>>> sm0.setState( None )[ 'OK' ]
True
>>> sm0.setState( 'Nirvana' )[ 'OK' ]
True
>>> sm0.setState( 'AnotherState' )[ 'OK' ]
False
:Parameters:
**state** - [ None, `str` ]
state which will be set as current state of the StateMachine
:return: S_OK || S_ERROR
"""
#FIXME: do we really have to accept None as state ??
if state is None:
self.state = state
elif state in self.states.keys():
self.state = state
else:
return S_ERROR( '%s is not a valid state' % state )
return S_OK()
def getStates( self ):
"""
Returns all possible states in the state map
examples:
>>> sm0.getStates()
[ 'Nirvana' ]
:return: list( stateNames )
"""
return self.states.keys()
def getNextState( self, candidateState ):
"""
Method that gets the next state, given the proposed transition to candidateState.
If candidateState is not on the state map <self.states>, it is rejected. If it is
not the case, we have two options: if <self.state> is None, then the next state
will be <candidateState>. Otherwise, the current state is using its own
transition rule to decide.
examples:
>>> sm0.getNextState( None )
S_OK( None )
>>> sm0.getNextState( 'NextState' )
S_OK( 'NextState' )
:Parameters:
**candidateState** - `str`
name of the next state
:return: S_OK( nextState ) || S_ERROR
"""
if not candidateState in self.states:
return S_ERROR( '%s is not a valid state' % candidateState )
# FIXME: do we need this anymore ?
if self.state is None:
return S_OK( candidateState )
return S_OK( self.states[ self.state ].transitionRule( candidateState ) )
#...............................................................................
class RSSMachine( StateMachine ):
"""
RSS implementation of the State Machine. It defines six states, which ordered
by level conform the following list ( higher level first ): Unknown, Active,
Degraded, Probing, Banned, Error.
The StateMachine allows any transition except if the current state is Banned,
which will force any transition to any state different of Error, Banned and
Probing to Probing.
"""
def __init__( self, state ):
"""
Constructor.
examples:
>>> rsm0 = RSSMachine( None )
>>> rsm1 = RSSMachine( 'Unknown' )
:Parameters:
**state** - [ None, `str` ]
name of the current state of the StateMachine
"""
super( RSSMachine, self ).__init__( state )
# Defines state map.
self.states = {
'Unknown' : State( 5 ),
'Active' : State( 4 ),
'Degraded' : State( 3 ),
'Probing' : State( 2 ),
'Banned' : State( 1, [ 'Error', 'Banned', 'Probing' ], defState = 'Probing' ),
'Error' : State( 0 )
}
def orderPolicyResults( self, policyResults ):
"""
Method built specifically to interact with the policy results obtained on the
PDP module. It sorts the input based on the level of their statuses, the lower
the level state, the leftmost position in the list. Beware, if any of the statuses
is not know to the StateMachine, it will be ordered first, as its level will be
-1 !.
examples:
>>> rsm0.orderPolicyResults( [ { 'Status' : 'Active', 'A' : 'A' },
{ 'Status' : 'Banned', 'B' : 'B' } ] )
[ { 'Status' : 'Banned', 'B' : 'B' }, { 'Status' : 'Active', 'A' : 'A' } ]
>>> rsm0.orderPolicyResults( [ { 'Status' : 'Active', 'A' : 'A' },
{ 'Status' : 'Rubbish', 'R' : 'R' } ] )
[ { 'Status' : 'Rubbish', 'R' : 'R' }, { 'Status' : 'Active', 'A' : 'A' } ]
:Parameters:
**policyResults** - list
list of dictionaries to be ordered. The dictionary can have any key as
far as the key `Status` is present.
:result: list( dict ), which is ordered
"""
#We really do not need to return, as the list is mutable
policyResults.sort( key = self.levelOfPolicyState )
def levelOfPolicyState( self, policyResult ):
"""
Returns the level of the state associated with the policy, -1 if something
goes wrong. It is mostly used while sorting policies with method `orderPolicyResults`.
examples:
>>> rsm0.levelOfPolicyState( { 'Status' : 'Active', 'A' : 'A' } )
5
>>> rsm0.levelOfPolicyState( { 'Status' : 'Rubbish', 'R' : 'R' } )
-1
:Parameters:
**policyResult** - dict
dictionary that must have the `Status` key.
:return: int || -1 ( if policyResult[ 'Status' ] is not known by the StateMachine )
"""
return self.levelOfState( policyResult[ 'Status' ] )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF | vmendez/DIRAC | ResourceStatusSystem/PolicySystem/StateMachine.py | Python | gpl-3.0 | 9,395 | [
"DIRAC"
] | 08485e081c016b42a2aebf7a651c12d048ec301f1fd1c45da49712cf4d2bcab1 |
#!/usr/bin/env python
# coding: utf-8
# # Minimal Example: Gaussian Processes
#
# In this example script, we'll reproduce Figure 7 from the fitting release paper ([Conroy et al. 2020](http://phoebe-project.org/publications/2020Conroy+)).
#
# <img src="http://phoebe-project.org/images/figures/2020Conroy+_fig7.png" alt="Figure 7" width="800px"/>
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# In[2]:
import matplotlib.pyplot as plt
plt.rc('font', family='serif', size=14, serif='STIXGeneral')
plt.rc('mathtext', fontset='stix')
# In[3]:
import phoebe
import numpy as np
logger = phoebe.logger('warning')
# we'll set the random seed so that the noise model is reproducible
np.random.seed(123456789)
# # Create fake "observations"
# In[4]:
b = phoebe.default_binary()
# In[5]:
b.add_dataset('lc', compute_times=phoebe.linspace(0,5,501))
# In[6]:
b.run_compute()
# In[7]:
times = b.get_value(qualifier='times', context='model')
fluxes = b.get_value(qualifier='fluxes', context='model') + np.random.normal(size=times.shape) * 0.07 + 0.2*np.sin(times)
sigmas = np.ones_like(fluxes) * 0.05
# # Create a New System
# In[8]:
b = phoebe.default_binary()
# In[9]:
b.add_dataset('lc', times=times, fluxes=fluxes, sigmas=sigmas)
# In[10]:
afig, mplfig = b.plot(show=True)
# In[11]:
afig, mplfig = b.plot(x='phases', show=True)
# In[12]:
b.run_compute(model='withoutGPs')
# # Add GPs
#
# See the API docs for [b.add_gaussian_process](../api/phoebe.frontend.bundle.Bundle.add_gaussian_process.md) and [gaussian_process](../api/phoebe.parameters.feature.gaussian_process.md).
# In[13]:
b.add_gaussian_process(dataset='lc01', kernel='sho')
# In[14]:
b.add_gaussian_process(dataset='lc01', kernel='matern32')
# In[15]:
print(b.get_gaussian_process())
# # Run Forward Model
#
# Since the system itself is still time-independent, the model is computed for one cycle according to `compute_phases`, but is then interpolated at the phases of the times in the dataset to compute and expose the fluxes including gaussian processes at the dataset times.
#
# If the model were time-dependent, then using `compute_times` or `compute_phases` without covering a sufficient time-span will raise an error.
# In[16]:
print(b.run_checks_compute())
# In[17]:
b.flip_constraint('compute_phases', solve_for='compute_times')
b.set_value('compute_phases', phoebe.linspace(0,1,101))
# In[18]:
print(b.run_checks_compute())
# In[19]:
b.run_compute(model='withGPs')
# In[20]:
afig, mplfig = b.plot(c={'withoutGPs': 'red', 'withGPs': 'green'},
ls={'withoutGPs': 'dashed', 'withGPs': 'solid'},
s={'model': 0.03},
save='figure_GPs_times.pdf',
show=True)
# In[21]:
afig, mplfig = b.plot(c={'withoutGPs': 'red', 'withGPs': 'green'},
ls={'withoutGPs': 'dashed', 'withGPs': 'solid'},
s={'model': 0.03},
x='phases',
save='figure_GPs_phases.pdf', show=True)
| phoebe-project/phoebe2-docs | development/examples/minimal_GPs.py | Python | gpl-3.0 | 3,237 | [
"Gaussian"
] | ad03eec1e4a8f31e34bf95897c247ed9976535cedf51db8bfa8d0a4dfd92a1ed |
__author__ = 'Brian'
# My first neural net!!!
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Consider implementing feature scaling/whitening (scipy.cluster.vq.whiten?)
# Consider implementing PCA whitening
# Consider implementing an autoencoder
# Consider implementing other optimization algorithms besides vanilla gradient descent, such as stochastic gradient descent,
# Adagrad, Adadelta, Adam, Nesterov's accelerated gradient descent, momentum, RMSprop
# Involve learning rate decay?
# Consider implementing dropout and maxout
# Consider implementing other activation functions (any others?)
# Consider implementing k-fold cross-validation and confusion matrix for classification to validate model performance
# Consider implementing a RNN
# Consider implementing a Reinforcement Learning agent
# Consider implementing a genetic algorithm or other evolutionary algorithms
# Consider implementing a Hidden Markov Model
# Consider implementing a SVM
# Consider implementing a SOM
# Consider implementing Attention Mechanisms
# Consider using deep learning frameworks like TensorFlow, Theano, Caffe, Torch, Neon, Keras, etc.
# Consider making a model with SyntaxNet
# Sigmoid function to get "activations" in [0, 1] for nodes in hidden layer:
# g(z) = 1/(1+e^(-z))
def sigmoid(z):
return 1/(1 + np.exp(-z))
# Tanh function to get "activations" in [-1, 1] for nodes in the hidden layer:
# g(z) = 2/(1+e^(-2z)) - 1
def tanh(z):
return 2/(1 + np.exp(-2*z)) - 1
# Computes leaky ReLU ( max(0, z) ) (normal RelU uses alpha = 0)
def relu(z):
alpha = 0.01 # can be modified
if z < 0:
return alpha * z
else:
return z
# Softmax function to get "activations" in [, ] for nodes in the hidden layer:
# P(y=k|x;theta) = e^(thetak*x)/sumK(e^(theta*x)) where k in {1, 2,..., K}
# g(z) = e^z[k]/sum(e^z)
def softmax(z, k):
return np.exp(z[k-1])/np.sum(np.exp(z))
# Softplus function to get "activations" ( "softer" RelU, which is max(0, z) )
# g(z) = log(1+e^z)
# derivative of softplus is simply the sigmoid function
def softplus(z):
return np.log(1 + np.exp(z))
# Derivative of sigmoid function to compute gradient terms in the hidden layer:
# g'(z) = sigmoid(z)*(1-sigmoid(z)) for sigmoid function
def dsigmoid(z):
return np.multiply(sigmoid(z), (1 - sigmoid(z)))
# Derivative of tanh function to compute gradient terms in the hidden layer:
# g'(z) = (1+tanh(z))*(1-tanh(z)) for tanh function
def dtanh(z):
return np.multiply((1 + tanh(z)), (1 - tanh(z)))
# Derivative of ReLU
def drelu(z):
alpha = 0.01
if z < 0:
return alpha
else:
return 1
# Calculate error term of hidden layer:
# # error2 = (theta2.T*error3) .* g'(z2)
def calcErrorTerm(theta, error, z):
return np.multiply((theta[:, 1:].T * error), dtanh(z))
# Calculate the regularized cost function for logistic regression:
# J(theta) = (1/m)*sum(-y*log(h)-(1-y)*log(1-h)) + (lambda/2m)*(sum(theta1^2)+sum(theta2^2))
def calcCostLg(h, y, theta1, theta2):
m = y.shape[0]
cost = 0
cost += np.sum(-np.multiply(y, np.log10(h)) - np.multiply((1 - y), np.log10(1 - h))) \
+ (regLambda/(2*m)) * (np.sum(np.square(theta1)) + np.sum(np.square(theta2)))
return cost
# Calculate the regularized cost function for linear regression:
# J(theta) = (1/2)*(sum(h - y)^2 + lambda*(sum(theta1^2)+sum(theta2^2))
def calcCostLr(h, y, theta1, theta2):
m = y.shape[0]
J = 1/2 * (np.sum(np.square(h - y)) + (regLambda * (np.sum(np.dot(theta1.T, theta1)) + np.sum(np.dot(theta2.T, theta2)))))
return J
"""
Multilayer perceptron
"""
# Train the neural net
def trainPerceptron():
# Read in data
filename = "file"
data = pd.read_csv(filename)
input = data[:, :-4]
y = data[:, -4:]
# Initialize key values
m = input.shape[0]
j1 = input.shape[1] + 1
j2 = 6
j3 = 4
epsilon = 0.13
numLayers = 3
targetCost = 0.0001
cost = 99999999
alpha = 0.01
regLambda = 1
# Initialize weights
theta1 = np.random.rand(j2-1, j1) * (2*epsilon) - epsilon
theta2 = np.random.rand(j3, j2) * (2*epsilon) - epsilon
while (cost >= targetCost):
# for j in range(1000):
# initialize a matrix to store the predictions
h = np.zeros((m, j3))
# initialize a count to accumulate adjustments to the weights
gradient1 = np.zeros((j2, j1+1))
gradient2 = np.zeros((j3, j2+1))
# Determine delta matrix for each layer
for i in range(m):
# Forward propagation
a1 = input[i].T
a1 = np.vstack((np.ones((1, 1)), a1))
z2 = np.dot(theta1, a1b)
a2 = tanh(z2)
a2 = np.vstack((np.ones((1, 1)), a2))
z3 = np.dot(theta2, a2b)
a3 = tanh(z3)
h[i, :] = a3
# Backpropagation
actual = y[i].T
delta3 = a3 - actual
delta2 = calcErrorTerm(theta2, error3, z2)
# Calculate adjustments for weights for this iteration
adjustments1 = np.dot(delta2, a1.T) # careful, bias term doesn't get multiplied through
adjustments2 = np.dot(delta3, a2.T) # careful, bias term doesn't get multiplied through
# Accumulate adjustments
gradient1 += adjustments1
gradient2 += adjustments2
# Adjust weights using regularization
adjustBias = alpha * (gradient1[:, 0] / m)
adjustWeights = alpha * (gradient1[:, 1:] / m + ((regLambda/m) * theta1[:, 1:]))
theta1[:, 0] -= adjustBias
theta1[:, 1:] -= adjustWeights
adjustBias = alpha * (gradient2[:, 0] / m)
adjustWeights = alpha * (gradient2[:, 1:] / m + ((regLambda/m) * theta2[:, 1:]))
theta2[:, 0] -= adjustBias
theta2[:, 1:] -= adjustWeights
cost = calcCostLg(h, y, theta1, theta2)
"""
Convolutional neural network (LeNet)
"""
# It may be a lot easier to learn something like Theano or TensorFlow and use it for functions like convolution and pooling
# Flatten image into a one-dimensional vector to reduce dimensions of tensors by one?
# Does deconvolution actually need to be implemented by dividing the fourier transforms of delta by W then taking the inverse fourier transform?
##-> means that the corresponding operation is run here, likely using a machine learning library
def trainCNN():
images = []
images.append("all images in np.matrix form")
y = ["correct labels"]
alpha = 0.01
regLambda = 1
epsilon = 0.13
channels = 3 # RGB or grayscale
kernelSize = (5, 5) # size of convolution kernel (could be different for various layers depending on image size)
maxPool = (2, 2) # stride of subsampling pool (could be different for various layers, and could be mean or L^p pooling)
imageShape = images[0].shape # dimensions of input images (assume 32x32)
c1 = 4 # number of convolved feature maps in layer 1
s1 = c1 # number of pooled feature maps in layer 1
c2 = 12 # number of convolved feature maps in layer 2
s2 = c2 # number of pooled feature maps in layer 2
n1 = 20 # number of nodes in fully connected layer 1 (there could be more hidden layers)
n2 = 10 # number of nodes in fully connected layer 2 (output layer)
W1 = np.random.rand(c1, 1, kernelSize[0], kernelSize[1], channels) * (2*epsilon) - epsilon # numpy array of convolution kernels connecting input image to c1
b1 = np.random.rand(c1, 1) * (2*epsilon) - epsilon # biases for convolution kernels connecting input image to c1
W2 = np.random.rand(c2, s1, kernelSize[0], kernelSize[1], channels) * (2*epsilon) - epsilon # numpy array of convolution kernels connecting s1 to c2
b2 = np.random.rand(c2, s1) * (2*epsilon) - epsilon # biases for convolution kernels connecting s1 to c2
W3 = np.random.rand(n1, s2, kernelSize[0], kernelSize[1], channels) * (2*epsilon) - epsilon # numpy array of convolution kernels connecting s2 to n1
b3 = np.random.rand(n1, s2) * (2*epsilon) - epsilon # biases for convolution kernels connecting s2 to n1
W4 = np.random.rand(n2, n1) * (2*epsilon) - epsilon # weights connecting n1 to n2
b4 = np.random.rand(n2) * (2*epsilon) - epsilon # weights for n1 bias term
for p in range(len(images)):
# Is there a better way to vectorize all this?
# Reshape dimensions of tensors to be consistent with TensorFlow?
image = images[p] # should be (32, 32, 3)
c1Convolved = np.zeros((c1, imageShape[0]-kernelSize[0]+1, imageShape[1]-kernelSize[1]+1, channels)) # should be (4, 28, 28, 3)
c1Activated = np.zeros(c1Convolved.shape) # should be (4, 28, 28, 3)
c1Pooled = np.zeros((c1Convolved.shape[0], c1Convolved.shape[1]/maxPool[0], c1Convolved.shape[2]/maxPool[1], channels)) # should be (4, 14, 14, 3)
c2Convolved = np.zeros((c2, c1Pooled.shape[0]-kernelSize[0]+1, c1Pooled.shape[1]-kernelSize[1]+1, channels)) # should be (12, 10, 10, 3)
c2Activated = np.zeros(c2Convolved.shape) # should be (12, 10, 10, 3)
c2Pooled = np.zeros((c2Convolved.shape[0], c2Convolved.shape[1]/maxPool[0], c2Convolved.shape[2]/maxPool[1], channels)) # should be (12, 5, 5, 3)
n1Convolved = np.zeros((n1))
n1Activated = np.zeros((n1))
n2Convolved = np.zeros((n2))
n2Activated = np.zeros((n2))
delta1Convolved = np.zeros(c1Convolved.shape) # should be (4, 28, 28, 3)
delta1Pooled = np.zeros(c1Pooled.shape) # should be (4, 14, 14, 3)
delta2Convolved = np.zeros(c2Convolved.shape) # should be (12, 10, 10, 3)
delta2Pooled = np.zeros(c2Pooled.shape) # should be (12, 5, 5, 3)
delta3 = np.zeros(n1)
delta4 = np.zeros(n2)
# initialize an array to store predictions
h = np.zeros((n2))
# Forward propagation layer 1
for i in range(c1):
##-> convolve image with W1[i, 0, :, :, :], add b1[i, 0], and store it in c1Convolved[i, :, :, :]
##-> run activation function on c1Convolved[:, :, :, :] for each pixel and channel, and store it in c1Activated[:, :, :, :]
##-> run max pooling on c1Activated[:, :, :, :] and store it in c1Pooled[:, :, :, :]
# Forward propagation layer 2
for i in range(c2):
for j in range(c1):
##-> convolve c1Pooled[j, :, :, :] with W2[i, j, :, :, :], add b2[i, j], and add it to c2Convolved[i, :, :, :]
## run activation function on c2Convolved[:, :, :, :] for each pixel and channel, and store it in c2Activated[:, :, :, :]
## run max pooling on c2Activated[:, :, :, :] and store it in c2Pooled[:, :, :, :]
# Forward propagation layer 3
for i in range(n1):
for j in range(c2):
##-> convolve c2Pooled[j, :, :, :] with W3[i, j, :, :, :], add b3[i, j], average the channels (yes?), and add the resulting number to n1Convolved[i]
##-> run activation function on n1Convolved and store it in n1Activated
# Forward propagation layer 4
n2Convolved += np.dot(W4, n1Activated)
n2Convolved += b4
##-> run softmax activation function on n2Convolved and store it n2Activated
# Backpropagation layer 4
delta4 = n2Activated - y
# Backpropagation layer 3
delta3 = calcErrorTerm(W4, delta4, n1Convolved) # don't need to factor in b4 to calculating delta3
# Backpropagation layer 2
for i in range(c2):
for j in range(n1):
##-> deconvolve delta3[j] with W3[j, i, :, :, :] and add it to delta2Pooled[i, :, :, :]
# expands shape to that of delta2Pooled, and means error is being distributed through all (3) channels
##-> upsample delta2Pooled[:, :, :, :] and store it in delta2Convolved[:, :, :, :]
##-> multiply element-wise delta2Convolved[:, :, :, :] with the result of running c2Convolved[:, :, :, :]
# through the derivative of the activation function and store it in delta2Convolved[:, :, :, :]
# Backpropagation layer 1
for i in range(c1):
for j in range(c2):
##-> deconvolve delta2Convolved[j, :, :, :] with W2[j, i, :, :, :] and add it to delta1Pooled[i, :, :, :]
# expands shape to that of delta1Pooled, and means error is continuing to be distributed through all (3) channels
##-> upsample delta1Pooled[:, :, :, :] and store it in delta1Convolved[:, :, :, :]
##-> multiply element-wise delta1Convolved[:, :, :, :] with the result of running c1Convolved[:, :, :, :]
# through the derivative of the activation function and store it in delta1Convolved[:, :, :, :]
# Compute gradients for layer 1
for i in range(c1):
##-> convolve image with delta1Convolved[i, :, :, :] and subtract that (times alpha) from W1[i, 0, :, :, :]
##-> average three channels of delta1Convolved[i, :, :, :] and subtract the width and height dimensions from b1[i, 0]
# TODO: Regularization
# Compute gradients for layer 2
for i in range(c2):
for j in range(c1):
##-> convolve c1Pooled[j, :, :, :] with delta2Convolved[i, :, :, :] and subtract that (times alpha) from W2[i, j, :, :, :]
##-> average three channels of delta2Convolved[i, :, :, :] and subtract the width and height dimensions from b2[i, j]
# TODO: Regularization
# Compute gradients for layer 3
for i in range(n1):
for j in range(c2):
##-> convolve c2Pooled[j, :, :, :] with delta3[i] and subtract that (times alpha) from W3[i, j, :, :, :]
##-> subtract delta3[i] from b3[i, j]
# TODO: Regularization
# Compute gradients for layer 4
W4 -= alpha * (np.outer(delta4, n1Activated) + (regLambda * W4)) # is regLambda correct? What about m?
b4 -= delta4
# FIXME: Fix biases, right now their operations don't make sense at all
# Biases should have one vector component for each output node of a given layer
"""
Implement a convolutional neural network in the machine learning Python API TensorFlow.
"""
mnist = input_data.read_data_sets('MNIST_data', one_hot=True) # holds pointer to MNIST data
# Define some functions that make code more concise and modular so we don't have type out
# TensorFlow operations a bunch of times
# Initialize weights in a Variable tensor
def weight(shape):
init = tf.truncated_normal(shape=shape, mean=0.0, stddev=0.1)
return tf.Variable(initial_value=init)
# Initialize biases in a Variable tensor
def bias(shape):
init = tf.constant(value=0.1, shape=shape)
return tf.Variable(initial_value=init)
# Create an Operation for convolution
def convolve(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# Create an Operation for 2x2 max pooling
def maxpool(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Build the computational graph in a TensorFlow Session (the context manager)
sess = tf.Session()
# Weights and biases for convolutional layer 1
W_conv1 = weight([5, 5, 1, 32]) # (800 weights)
b_conv1 = weight([32])
# Create a Placeholder tensor for the input data and true output labels
x = tf.placeholder(dtype=tf.float32, shape=[None, 784])
x_image = tf.reshape(x, [-1, 28, 28, 1])
y_label = tf.placeholder(dtype=tf.float32, shape=[None, 10])
# Convolution and pooling Operation for convolutional layer 1
h_conv1 = tf.nn.relu(convolve(x_image, W_conv1) + b_conv1) # 28x28x1 -> 28x28x32
h_pool1 = maxpool(h_conv1) # 28x28x32 -> 14x14x32
# Weights and biases for convolutional layer 2
W_conv2 = weight([5, 5, 32, 64])
b_conv2 = bias([64])
# Convolution and pooling Operation for convolutional layer 2
h_conv2 = tf.nn.relu(convolve(h_pool1, W_conv2) + b_conv2) # 14x14x32 -> 14x14x64
h_pool2 = maxpool(h_conv2) # 14x14x64 -> 7x7x64
# Weights and biases for fully connected layer 1
W_fc1 = weight([7*7*64, 1024])
b_fc1 = bias([1024])
# Activation function for fully connected layer 1
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) # 7*7*64 = 3,136 neurons flattened
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # 3,136 -> 1,024 (3,211,264 weights)
# Implement dropout, TensorFlow takes care of the details in the computational graph
keep_probability = tf.placeholder(dtype=tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_probability)
# Weights and biases for fully connected layer 2
W_fc2 = weight([1024, 10])
b_fc2 = bias([10])
# Predicted output
y_prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) # 1024 -> 10 (10,240 weights)
# Build out the final steps of the computational graph so the model can be automatically
# trained via backpropagation
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(y_prediction), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_prediction, 1), tf.argmax(y_label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Prepare the Session to be run by initializing all Variables
sess.run(tf.initialize_all_variables())
# Train the model
for i in range(20000):
batch = mnist.train.next_batch(50)
# Print train accuracy every 100 iterations
if i % 100 == 0:
train_accuracy = accuracy.eval(session=sess, feed_dict={x: batch[0],
y_label: batch[1],
keep_probability: 1.0})
print("Step %d, training accuracy %g"%(i, train_accuracy))
# Run one epoch of training with dropout set to 50% keep probability
train_step.run(session=sess, feed_dict={x: batch[0],
y_label: batch[1],
keep_probability: 0.5})
# Print test accuracy (TensorFlow automatically partitions train and test data)
print("Test accuracy %g"%accuracy.eval(session=sess, feed_dict={x: mnist.test.images,
y_label: mnist.test.labels,
keep_probability: 1.0}))
| bhwester/neural-network | neuralnet.py | Python | mit | 18,251 | [
"Brian"
] | 661452e9a04d5c4066deca6c190570a4c994ecafa57ded409bc5f9cdc006007c |
#!/usr/bin/python
# Emacs settings: -*- tab-width: 4 -*-
#
# Copyright (c) 2002-2003 Apple Computer, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# parselog.py, written and contributed by Kevin Marks
#
# Requires OS X 10.3 Panther or later, for Python and Core Graphics Python APIs
# Invoke from the command line with "parselog.py fname" where fname is a log file made by mDNSNetMonitor
#
# Caveats:
# It expects plain ASCII, and doesn't handle spaces in record names very well right now
# There's a procedure you can follow to 'sanitize' an mDNSNetMonitor log file to make it more paletable to parselog.py:
# 1. Run mDNSNetMonitor in a terminal window.
# When you have enough traffic, type Ctrl-C and save the content of the terminal window to disk.
# Alternatively, you can use "mDNSNetMonitor > logfile" to write the text directly to a file.
# You now have a UTF-8 text file.
# 2. Open the UTF-8 text file using BBEdit or some other text editor.
# (These instructions are for BBEdit, which I highly recommend you use when doing this.)
# 3. Make sure BBEdit correctly interprets the file as UTF-8.
# Either set your "Text Files Opening" preference to "UTF-8 no BOM", and drop the file onto BBEdit,
# or manually open the File using "File -> Open" and make sure the "Read As" setting is set to "UTF-8 no BOM"
# Check in the document pulldown menu in the window toolbar to make sure that it says "Encoding: UTF-8 no BOM"
# 4. Use "Tools -> Convert to ASCII" to replace all special characters with their seven-bit ascii equivalents.
# (e.g. curly quotes are converted to straight quotes)
# 5. Do a grep search and replace. (Cmd-F; make sure Grep checkbox is turned on.)
# Enter this search text : ^(.................\(................\S*) (.* -> .*)$
# Enter this replacement text: \1-\2
# Click "Replace All"
# Press Cmd-Opt-= repeatedly until there are no more instances to be replaced.
# You now have text file with all spaces in names changed to hyphens
# 6. Save the new file. You can save it as "UTF-8 no BOM", or as "Mac Roman". It really doesn't matter which --
# the file now contains only seven-bit ascii, so it's all the same no matter how you save it.
# 7. Run "parselog.py fname"
# 8. Open the resulting fname.pdf file with a PDF viewer like Preview on OS X
#
# Key to what you see:
# Time is on the horizontal axis
# Individual machines are shown on the vertical axis
# Filled red circle: Normal query Hollow red circle: Query requesting unicast reply
# Filled orange circle: Probe (service starting) Hollow orange circle: First probe (requesting unicast reply)
# Filled green circle: Normal answer Hollow green circle: Goodbye message (record going away)
# Hollow blue circle: Legacy query (from old client)
# $Log: parselog.py,v $
# Revision 1.4 2006/09/05 20:00:14 cheshire
# Moved Emacs settings to second line of file
#
# Revision 1.3 2006/08/14 23:24:47 cheshire
# Re-licensed mDNSResponder daemon source code under Apache License, Version 2.0
#
# Revision 1.2 2003/12/01 21:47:44 cheshire
# APSL
#
# Revision 1.1 2003/10/10 02:14:17 cheshire
# First checkin of parselog.py, a tool to create graphical representations of mDNSNetMonitor logs
from CoreGraphics import *
import math # for pi
import string
import sys, os
import re
def parselog(inFile):
f = open(inFile)
hunt = 'getTime'
ipList = {}
querySource = {}
plotPoints = []
maxTime=0
minTime = 36*60*60
spaceExp = re.compile(r'\s+')
print "Reading " + inFile
while 1:
lines = f.readlines(100000)
if not lines:
break
for line in lines:
if (hunt == 'skip'):
if (line == '\n' or line == '\r' or line ==''):
hunt = 'getTime'
# else:
# msg = ("skipped" , line)
# print msg
elif (hunt == 'getTime'):
if (line == "^C\n" ):
break
time = line.split(' ')[0].split(':')
if (len(time)<3):
#print "bad time, skipping",time
hunt = 'skip'
else:
hunt = 'getIP'
#print (("getTime:%s" % (line)), time)
elif (hunt == 'getIP'):
ip = line.split(' ',1)
ip = ip[0]
secs=0
for t in time:
secs = secs*60 +float(t)
if (secs>maxTime):
maxTime=secs
if (secs<minTime):
minTime=secs
if (not ip in ipList):
ipList[ip] = [len(ipList), "", ""]
#print (("getIP:%s" % (line)), time, secs)
hunt = 'getQA'
elif (hunt == 'getQA'):
qaList = spaceExp.split(line)
# qaList[0] Source Address
# qaList[1] Operation type (PU/PM/QU/QM/AN etc.)
# qaList[2] Record type (PTR/SRV/TXT etc.)
# For QU/QM/LQ:
# qaList[3] RR name
# For PU/PM/AN/AN+/AD/AD+/KA:
# qaList[3] TTL
# qaList[4] RR name
# qaList[5...] "->" symbol and following rdata
#print qaList
if (qaList[0] == ip):
if (qaList[1] == '(QU)' or qaList[1] == '(LQ)' or qaList[1] == '(PU)'):
plotPoints.append([secs, ipList[ip][0], (qaList[1])[1:-1]])
elif (qaList[1] == '(QM)'):
plotPoints.append([secs, ipList[ip][0], (qaList[1])[1:-1]])
querySource[qaList[3]] = len(plotPoints)-1
elif (qaList[1] == '(PM)'):
plotPoints.append([secs, ipList[ip][0], (qaList[1])[1:-1]])
querySource[qaList[4]] = len(plotPoints)-1
elif (qaList[1] == '(AN)' or qaList[1] == '(AN+)' or qaList[1] == '(DE)'):
plotPoints.append([secs, ipList[ip][0], (qaList[1])[1:-1]])
try:
theQuery = querySource[qaList[4]]
theDelta = secs - plotPoints[theQuery][0]
if (theDelta < 1.0):
plotPoints[-1].append(querySource[qaList[4]])
#print "Answer AN+ %s points to %d" % (qaList[4],querySource[qaList[4]])
except:
#print "Couldn't find any preceeding question for", qaList
pass
elif (qaList[1] != '(KA)' and qaList[1] != '(AD)' and qaList[1] != '(AD+)'):
print "Operation unknown", qaList
if (qaList[1] == '(AN)' or qaList[1] == '(AN+)' or qaList[1] == '(AD)' or qaList[1] == '(AD+)'):
if (qaList[2] == 'HINFO'):
ipList[ip][1] = qaList[4]
ipList[ip][2] = string.join(qaList[6:])
#print ipList[ip][1]
elif (qaList[2] == 'AAAA'):
if (ipList[ip][1] == ""):
ipList[ip][1] = qaList[4]
ipList[ip][2] = "Panther"
elif (qaList[2] == 'Addr'):
if (ipList[ip][1] == ""):
ipList[ip][1] = qaList[4]
ipList[ip][2] = "Jaguar"
else:
if (line == '\n'):
hunt = 'getTime'
else:
hunt = 'skip'
f.close()
#print plotPoints
#print querySource
#width=20.0*(maxTime-minTime)
if (maxTime < minTime + 10.0):
maxTime = minTime + 10.0
typesize = 12
width=20.0*(maxTime-minTime)
pageHeight=(len(ipList)+1) * typesize
scale = width/(maxTime-minTime)
leftMargin = typesize * 60
bottomMargin = typesize
pageRect = CGRectMake (-leftMargin, -bottomMargin, leftMargin + width, bottomMargin + pageHeight) # landscape
outFile = "%s.pdf" % (".".join(inFile.split('.')[:-1]))
c = CGPDFContextCreateWithFilename (outFile, pageRect)
print "Writing " + outFile
ourColourSpace = c.getColorSpace()
# QM/QU red solid/hollow
# PM/PU orange solid/hollow
# LQ blue hollow
# AN/DA green solid/hollow
#colourLookup = {"L":(0.0,0.0,.75), "Q":(.75,0.0,0.0), "P":(.75,0.5,0.0), "A":(0.0,0.75,0.0), "D":(0.0,0.75,0.0), "?":(.25,0.25,0.25)}
colourLookup = {"L":(0.0,0.0,1.0), "Q":(1.0,0.0,0.0), "P":(1.0,0.8,0.0), "A":(0.0,1.0,0.0), "D":(0.0,1.0,0.0), "?":(1.0,1.0,1.0)}
c.beginPage (pageRect)
c.setRGBFillColor(.75,0.0,0.0,1.0)
c.setRGBStrokeColor(.25,0.75,0.25,1.0)
c.setLineWidth(0.25)
for point in plotPoints:
#c.addArc((point[0]-minTime)*scale,point[1]*typesize+6,5,0,2*math.pi,1)
c.addArc((point[0]-minTime)*scale,point[1]*typesize+6,typesize/4,0,2*math.pi,1)
theColour = colourLookup[(point[2])[0]]
if (((point[2])[0]) != "L") and (((point[2])[0]) != "Q") and (((point[2])[0]) != "P") and (((point[2])[0]) != "A") and (((point[2])[0]) != "D"):
print "Unknown", point
if ((point[2])[-1] == "M" or (point[2])[0]== "A"):
c.setRGBFillColor(theColour[0],theColour[1],theColour[2],.5)
c.fillPath()
else:
c.setRGBStrokeColor(theColour[0],theColour[1],theColour[2],.5)
c.setLineWidth(1.0)
c.strokePath()
c.setRGBStrokeColor(.25,0.75,0.25,1.0)
c.setLineWidth(0.25)
for index in point[3:]:
c.beginPath()
c.moveToPoint((point[0]-minTime)*scale,point[1]*typesize+6)
c.addLineToPoint(((plotPoints[index])[0]-minTime)*scale,(plotPoints[index])[1]*typesize+6)
c.closePath()
c.strokePath()
c.setRGBFillColor (0,0,0, 1)
c.setTextDrawingMode (kCGTextFill)
c.setTextMatrix (CGAffineTransformIdentity)
c.selectFont ('Gill Sans', typesize, kCGEncodingMacRoman)
c.setRGBStrokeColor(0.25,0.0,0.0,1.0)
c.setLineWidth(0.1)
for ip,[height,hname,hinfo] in ipList.items():
c.beginPath()
c.moveToPoint(pageRect.origin.x,height*typesize+6)
c.addLineToPoint(width,height*typesize+6)
c.closePath()
c.strokePath()
c.showTextAtPoint(pageRect.origin.x + 2, height*typesize + 2, ip, len(ip))
c.showTextAtPoint(pageRect.origin.x + 2 + typesize*8, height*typesize + 2, hname, len(hname))
c.showTextAtPoint(pageRect.origin.x + 2 + typesize*25, height*typesize + 2, hinfo, len(hinfo))
for time in range (int(minTime),int(maxTime)+1):
c.beginPath()
c.moveToPoint((time-minTime)*scale,pageRect.origin.y)
c.addLineToPoint((time-minTime)*scale,pageHeight)
c.closePath()
if (int(time) % 10 == 0):
theHours = time/3600
theMinutes = time/60 % 60
theSeconds = time % 60
theTimeString = '%d:%02d:%02d' % (theHours, theMinutes, theSeconds)
# Should measure string width, but don't know how to do that
theStringWidth = typesize * 3.5
c.showTextAtPoint((time-minTime)*scale - theStringWidth/2, pageRect.origin.y + 2, theTimeString, len(theTimeString))
c.setLineWidth(0.3)
else:
c.setLineWidth(0.1)
c.strokePath()
c.endPage()
c.finish()
for arg in sys.argv[1:]:
parselog(arg)
| ghmajx/asuswrt-merlin | release/src/router/mDNSResponder/mDNSPosix/parselog.py | Python | gpl-2.0 | 10,526 | [
"Jaguar"
] | 7332ef800477122807d1a84162b518c59f3a062b94da44f1e5c256e4f823acfd |
'''
Set up Visual Sudio to build a specified MPIR configuration
Copyright (C) 2011, 2012, 2013, 2014, 2015 Brian Gladman
'''
from operator import itemgetter
from os import listdir, walk, unlink, makedirs
from os.path import split, splitext, isdir, relpath, join, exists
from os.path import dirname, normpath
from copy import deepcopy
from sys import argv, exit
from filecmp import cmp
from shutil import copy
from re import compile, search
from collections import defaultdict
from uuid import uuid4
from time import sleep
from _msvccompiler import MSVCCompiler
import argparse
parser = argparse.ArgumentParser(description='Build flint tests')
# for script debugging
parser.add_argument('--debug', choices=["True", "False"], default="False")
# what to build
parser.add_argument('--platform', default="x64")
parser.add_argument('--configuration', default="Release")
parser.add_argument('--library-type', choices=["dll", "lib"], default="lib")
parser.add_argument('--interfaces-tests', choices=["True", "False"], default="True")
args = parser.parse_args()
print(args)
debug = args.debug == "True"
intd = '\\%s\\%s\\' % (args.platform, args.configuration)
library_type = args.library_type
build_interfaces_tests = args.interfaces_tests == "True"
# The path to flint, solution and project directories
script_dir = dirname(__file__)
project_name = 'flint'
build_vc = 'build.vc14'
flint_dir = normpath(join(script_dir, '../../'))
solution_dir = normpath(join(flint_dir, build_vc))
try:
input = raw_input
except NameError:
pass
app_type, lib_type, dll_type = 0, 1, 2
app_str = ('Application', 'StaticLibrary', 'DynamicLibrary')
app_ext = ('.exe', '.lib', '.dll')
# copy from file ipath to file opath but avoid copying if
# opath exists and is the same as ipath (this is to avoid
# triggering an unecessary rebuild).
def write_f(ipath, opath):
if exists(ipath) and not isdir(ipath):
if exists(opath):
if isdir(opath) or cmp(ipath, opath):
return
copy(ipath, opath)
ignore_dirs = ( '.git', 'doc', 'examples', 'lib', 'exe', 'dll', 'win_hdrs')
req_extns = ( '.h', '.c', '.cc', '.cpp' )
def find_src(path):
c, h, cx, hx, t, tx, p = [], [], [], [], [], [], []
for root, dirs, files in walk(path):
if 'template' in root:
continue
_, _t = split(root)
if _t in ignore_dirs:
continue
if 'build.vc' in root:
for di in list(dirs):
dirs.remove(di)
for di in list(dirs):
if di in ignore_dirs:
dirs.remove(di)
if 'template' in di:
dirs.remove(di)
relp = relpath(root, flint_dir)
if relp == '.':
relp = ''
for f in files:
if 'template' in f:
continue
n, x = splitext(f)
if x not in req_extns:
continue
pth, leaf = split(root)
fp = join(relp, f)
if leaf == 'tune':
continue
if leaf == 'test':
p2, l2 = split(pth)
l2 = '' if l2 == 'flint2' else l2
if 'flintxx' in pth:
tx += [(l2, fp)]
else:
t += [(l2, fp)]
elif leaf == 'profile':
p2, l2 = split(pth)
l2 = '' if l2 == 'flint2' else l2
p += [(l2, fp)]
elif leaf == 'flintxx':
cx += [fp]
elif x == '.c':
c += [(leaf, fp)]
elif x == '.h':
if n.endswith('xx'):
hx += [fp]
else:
h += [fp]
for x in (c, h, cx, hx, t, tx, p):
x.sort()
return (c, h, cx, hx, t, tx, p)
c, h, cx, hx, t, tx, p = find_src(flint_dir)
# def compile(self, sources,
# output_dir=None, macros=None, include_dirs=None, debug=0,
# extra_preargs=None, extra_postargs=None, depends=None):
# def link(self, target_desc, objects, output_filename, output_dir=None,
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, debug=0, extra_preargs=None,
# extra_postargs=None, build_temp=None, target_lang=None):
cc = MSVCCompiler()
error_list = []
inc_dirs = [
'..\\',
'..\\..\\',
'..\\..\\..\\mpir\\' + library_type + intd,
'..\\..\\..\\mpfr\\' + library_type + intd,
'..\\..\\..\\pthreads\\' + library_type + intd
]
libs = [
'..\\..\\' + library_type + intd + library_type + '_flint',
'..\\..\\..\\mpir\\' + library_type + intd + 'mpir',
'..\\..\\..\\mpfr\\' + library_type + intd + 'mpfr',
'..\\..\\..\\pthreads\\' + library_type + intd + 'pthreads'
]
if (library_type == "lib"):
macros = [('PTW32_STATIC_LIB',1)]
else:
macros = [('PTW32_BUILD',1)]
for l2, fp in t:
fdn, fx = splitext(fp)
fd, fn = split(fdn)
if (not build_interfaces_tests and "interface" in fn):
continue
source = [join('..\\..\\', fp)]
p = fd.rfind('test')
assert p >= 0
tmp_dir = 'test\\test'
outd = '..\\tests\\' + fd[:p] + intd
try:
obj = cc.compile(source, output_dir=tmp_dir, include_dirs=inc_dirs,macros=macros)
cc.link("executable", obj, fn + '.exe', output_dir=outd, libraries=libs)
except:
error_list += [(l2, fp)]
print('Build Errors:')
for l2, fp in error_list:
print(' ', l2, fp)
| jpflori/flint2 | build.vc14/build_tests/build_tests.py | Python | lgpl-2.1 | 5,047 | [
"Brian"
] | bfa88c668be951ca362d3a0c3059c6975eb774695de8a62f5cc87b70da15d03d |
"""
Extended math utilities.
"""
# Authors: G. Varoquaux, A. Gramfort, A. Passos, O. Grisel
# License: BSD
import math
from . import check_random_state
import numpy as np
from scipy import linalg
def norm(v):
v = np.asarray(v)
__nrm2, = linalg.get_blas_funcs(['nrm2'], [v])
return __nrm2(v)
def _fast_logdet(A):
"""
Compute log(det(A)) for A symmetric
Equivalent to : np.log(np.linalg.det(A))
but more robust
It returns -Inf if det(A) is non positive or is not defined.
"""
# XXX: Should be implemented as in numpy, using ATLAS
# http://projects.scipy.org/numpy/browser/trunk/numpy/linalg/linalg.py#L1559
ld = np.sum(np.log(np.diag(A)))
a = np.exp(ld / A.shape[0])
d = np.linalg.det(A / a)
ld += np.log(d)
if not np.isfinite(ld):
return -np.inf
return ld
def _fast_logdet_numpy(A):
"""
Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A))
but more robust
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
# Numpy >= 1.5 provides a fast logdet
if hasattr(np.linalg, 'slogdet'):
fast_logdet = _fast_logdet_numpy
else:
fast_logdet = _fast_logdet
try:
factorial = math.factorial
except AttributeError:
# math.factorial is only available in Python >= 2.6
import operator
def factorial(x):
# don't use reduce operator or 2to3 will fail.
# ripped from http://www.joelbdalley.com/page.pl?38
# Ensure that n is a Natural number
n = abs(int(n))
if n < 1: n = 1
# Store n! in variable x
x = 1
# Compute n!
for i in range(1, n + 1):
x = i * x
# Return n!
return x
try:
import itertools
combinations = itertools.combinations
except AttributeError:
def combinations(seq, r=None):
"""Generator returning combinations of items from sequence <seq>
taken <r> at a time. Order is not significant. If <r> is not given,
the entire sequence is returned.
"""
if r == None:
r = len(seq)
if r <= 0:
yield []
else:
for i in xrange(len(seq)):
for cc in combinations(seq[i+1:], r-1):
yield [seq[i]]+cc
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "tocsr"):
d = float(w.data.size) / w.size
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly"""
from scipy import sparse
if sparse.issparse(a) or sparse.issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return np.dot(a,b)
def fast_svd(M, k, p=None, q=0, transpose='auto', random_state=0):
"""Computes the k-truncated randomized SVD
Parameters
===========
M: ndarray or sparse matrix
Matrix to decompose
k: int
Number of singular values and vectors to extract.
p: int (default is k)
Additional number of samples of the range of M to ensure proper
conditioning. See the notes below.
q: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
=====
This algorithm finds the exact truncated singular values decomposition
using randomization to speed up the computations. It is particularly
fast on large matrices on which you whish to extract only a small
number of components.
(k + p) should be strictly higher than the rank of M. This can be
checked by ensuring that the lowest extracted singular value is on
the order of the machine precision of floating points.
References
==========
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909)
A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
if p == None:
p = k
random_state = check_random_state(random_state)
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
# generating random gaussian vectors r with shape: (M.shape[1], k + p)
r = random_state.normal(size=(M.shape[1], k + p))
# sampling the range of M using by linear projection of r
Y = safe_sparse_dot(M, r)
del r
# apply q power iterations on Y to make to further 'imprint' the top
# singular values of M in Y
for i in xrange(q):
Y = safe_sparse_dot(M, safe_sparse_dot(M.T, Y))
# extracting an orthonormal basis of the M range samples
from .fixes import qr_economic
Q, R = qr_economic(Y)
del R
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
from scipy import linalg
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if transpose:
# transpose back the results according to the input convention
return V[:k, :].T, s[:k], U[:, :k].T
else:
return U[:, :k], s[:k], V[:k, :]
def logsum(arr, axis=0):
""" Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
========
>>> import numpy as np
>>> from sklearn.utils.extmath import logsum
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsum(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
| joshbohde/scikit-learn | sklearn/utils/extmath.py | Python | bsd-3-clause | 6,785 | [
"Gaussian"
] | 2342ffe7910686469d7a8845ea1b3a0884494c38c77a8ae4d813ddd6be82f262 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author: Jim Dalton
options:
state:
description:
- Create or destroy the ELB
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
require: false
default: None
version_added: "1.6"
health_check:
description:
- An associative array of health check configuration settings (see example)
require: false
default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
extends_documentation_fragment: aws
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
# Create an ELB with load balanacer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
"""
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
from boto.ec2.elb.healthcheck import HealthCheck
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
cross_az_load_balancing=None,
stickiness=None, region=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.stickiness = stickiness
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
# add sitcky options
self.select_stickiness_policy()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy
}
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
return info
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == listener['load_balancer_port']:
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
listener['load_balancer_port'],
listener['instance_port'],
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.Changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.iteritems():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
attributes.cross_zone_load_balancing.enabled = True
else:
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value']):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.stickiness['enabled'] == True:
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': self.stickiness['expiration']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.stickiness['enabled'] == False:
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.stickiness['enabled'] == True:
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.stickiness['enabled'] == False:
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
health_check = module.params['health_check']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
if state == 'present' and not listeners:
module.fail_json(msg="At least one port is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, cross_az_load_balancing,
stickiness,
region=region, **aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| az7arul/ansible-modules-core | cloud/amazon/ec2_elb_lb.py | Python | gpl-3.0 | 33,856 | [
"Dalton"
] | 4769f3e2da538609f0c7934221a41d5241212f22fe350fa2321a76eca4fff86d |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.regions.ImageSensorExplorers.BaseExplorer import BaseExplorer
class RandomJump(BaseExplorer):
"""
This explorer randomly selects positions. It does not do any sweeping.
"""
############################################################################
def __init__(self, jumpOffObject=False, numJumpsPerImage=None,
numVisitsPerImage=None, spaceShape=None, *args, **kwargs):
"""
Parameters:
-----------------------------------------------------------------
jumpOffObject: Whether the sensor can only include a part of the object,
as specified by the bounding box. If False, it will only
move to positions that include as much of the object as
possible.
numJumpsPerImage: The number of iterations for which RandomJump
should dwell on one image before moving on to the next one.
numVisitsPerImage: The number of times RandomJump should visit each
image (and do numJumpsPerImage jumps on it).
spaceShape: The (height, width) of the 2-D space to explore. This
constrains how far away from the center point an image
is allowed to be presented.
"""
BaseExplorer.__init__(self, *args, **kwargs)
if type(jumpOffObject) not in (bool, int):
raise RuntimeError("'jumpOffObject' should be a boolean")
if numJumpsPerImage is not None and type(numJumpsPerImage) is not int:
raise RuntimeError("'numJumpsPerImage' should be an integer")
if numVisitsPerImage is not None and type(numVisitsPerImage) is not int:
raise RuntimeError("'numVisitsPerImage' should be an integer")
if numVisitsPerImage is not None and numJumpsPerImage is None:
raise RuntimeError("Must specify 'numJumpsPerImage'"
" when using 'numVisitsPerImage'")
if spaceShape is not None and \
(len(spaceShape) != 2 or spaceShape[0] < 1 or spaceShape[1] < 1):
raise RuntimeError("'spaceShape' should be a 2-item tuple specifying the"
"(height, width) of the overall space to explore.")
self.jumpOffObject = jumpOffObject
self.numJumpsPerImage = numJumpsPerImage
self.numVisitsPerImage = numVisitsPerImage
self.spaceShape = spaceShape
# Keeps track of how many jumps on this image
self.numJumpsThisImage = 0
self.lastImageIndex = None
############################################################################
def first(self):
"""
Set up the position.
BaseExplorer picks image 0, offset (0,0), etc., but explorers that wish
to set a different first position should extend this method. Such explorers
may wish to call BaseExplorer.first(center=False), which initializes the
position tuple but does not call centerImage() (which could cause
unnecessary filtering to occur).
"""
BaseExplorer.first(self, center=False)
if not self.numImages:
return
isBlank = True
while isBlank:
# Pick a random position
if not self.numJumpsPerImage or self.lastImageIndex is None or \
(self.numJumpsThisImage % self.numJumpsPerImage == 0):
# Pick new image
image = self.pickRandomImage(self.random)
self.lastImageIndex = image
self.numJumpsThisImage = 0
else:
image = self.lastImageIndex
self.position['image'] = image
self.position['filters'] = self.pickRandomFilters(self.random)
filteredImages = self.getFilteredImages()
# Pick a random offset
if self.spaceShape is not None:
self.centerImage()
# NOTE: self.position['offset'] is (x, y), whereas our spaceShape is
# (height, width). Also note that the self.position['offset']
# direction is counter-intuitive: negative numbers move us to the RIGHT
# and DOWN instead of LEFT and UP.
xOffset = self.random.randint(-(self.spaceShape[1]//2), self.spaceShape[1]//2)
yOffset = self.random.randint(-(self.spaceShape[0]//2), self.spaceShape[0]//2)
#print "(yOffset, xOffset) = ", yOffset, xOffset
self.position['offset'][0] += xOffset
self.position['offset'][1] += yOffset
else:
ebbox = self._getEffectiveBoundingBox(filteredImages[0])
self.position['offset'] = [
self.random.randint(ebbox[0], ebbox[2]-1),
self.random.randint(ebbox[1], ebbox[3]-1)
]
# Check if the position is blank
isBlank = self.isBlank(self.jumpOffObject)
self.position['reset'] = True
self.numJumpsThisImage += 1
############################################################################
def next(self, seeking=False):
"""
Go to the next position (next iteration).
seeking -- Boolean that indicates whether the explorer is calling next()
from seek(). If True, the explorer should avoid unnecessary computation
that would not affect the seek command. The last call to next() from
seek() will be with seeking=False.
"""
self.first()
############################################################################
def getNumIterations(self, image):
"""
Get the number of iterations required to completely explore the input space.
Explorers that do not wish to support this method should not override it.
image -- If None, returns the sum of the iterations for all the loaded
images. Otherwise, image should be an integer specifying the image for
which to calculate iterations.
ImageSensor takes care of the input validation.
"""
if self.numVisitsPerImage is None:
return -1
totalPerImage = self.numFilteredVersionsPerImage * self.numJumpsPerImage \
* self.numVisitsPerImage
if image is None:
return totalPerImage * self.numImages
else:
return totalPerImage
############################################################################
def _getEffectiveBoundingBox(self, image):
"""
Calculate the 'effective' bounding box from the image's bounding box,
taking into account the jumpOffObject parameter.
The effective bounding box determines which offsets the explorer should
consider. If 'ebbox' is the bounding box returned from this method, valid
offsets [x,y] are bounded by:
ebbox[0] <= x < ebbox[2]
ebbox[1] <= y < ebbox[3].
"""
bbox = image.split()[1].getbbox()
if self.jumpOffObject:
startX = bbox[0] - self.enabledWidth + 1
startY = bbox[1] - self.enabledHeight + 1
endX = bbox[2]
endY = bbox[3]
else:
startX = min(bbox[0], bbox[2] - self.enabledWidth)
startY = min(bbox[1], bbox[3] - self.enabledHeight)
endX = max(bbox[0], bbox[2] - self.enabledWidth) + 1
endY = max(bbox[1], bbox[3] - self.enabledHeight) + 1
return (startX, startY, endX, endY) | tkaitchuck/nupic | py/regions/ImageSensorExplorers/RandomJump.py | Python | gpl-3.0 | 7,906 | [
"VisIt"
] | cbaa5ac4e51d3fb740a37c977c23b020658e5660678e3fd94651d6577423a756 |
import sys
import ast
from llvm.core import *
from llvm.ee import *
i64 = Type.int(64)
i32 = Type.int(32)
i8 = Type.int(8)
i8ptr = Type.pointer(i8)
def c_i32(n):
return Constant.int(i32, n)
class Context(object):
def __init__(self, mod, block, builder, main):
object.__init__(self)
self.module = mod
self.block = block
self.builder = builder
self.main = main
def fmtstr(value):
lookup = {
ConstantInt: "%d",
ConstantFP: "%f",
ConstantArray: "%s"
}
# XXX horrible.
try:
return lookup[type(value.initializer)]
except AttributeError:
return lookup[type(value)]
def maybegep(value):
if type(value) in (ConstantInt, ConstantFP):
return value
else:
return value.gep([c_i32(0), c_i32(0)])
class LLVMNodeVisitor(ast.NodeVisitor):
def __init__(self, mod, builder):
ast.NodeVisitor.__init__(self)
self.mod = mod
self.builder = builder
self.stack = []
self._sn = 0
self._nn = 0
def strconst(self, value):
i8arr = Type.array(i8, len(value) + 1)
gv = GlobalVariable.new(self.mod, i8arr, ".str%d" % self._sn)
gv.initializer = Constant.stringz(value)
gv.linkage = LINKAGE_INTERNAL
gv.global_constant = True
self._sn += 1
return gv
def numconst(self, value):
if type(value) is int:
numty = Type.int(32)
value = Constant.int(numty, value)
elif type(value) is long:
numty = Type.int(64) # XXX
value = Constant.int(numty, value)
elif type(value) is float:
numty = Type.float(32)
value = Constant.float(numty, value)
else:
raise Error() #XXX
return value
def visit_Str(self, node):
self.stack.append(self.strconst(node.s))
def visit_Num(self, node):
self.stack.append(self.numconst(node.n))
def visit_Print(self, node):
nargs = 0
for expr in node.values:
self.visit(expr)
nargs += 1
args = self.popn(nargs)
fmt = " ".join([fmtstr(arg) for arg in args]) + "\n"
fmt = self.strconst(fmt)
args = [maybegep(arg) for arg in args]
args = [maybegep(fmt)] + args
printf = self.mod.get_function_named("printf")
self.builder.call(printf, args)
def popn(self, n):
r = self.stack[-n:]
self.stack = self.stack[:-n]
return r
def initcontext():
mod = Module.new("<stdin>")
printfty = Type.function(i32, [i8ptr], True)
mod.add_function(printfty, "printf")
mainfn = mod.add_function(Type.function(i32, []), "main")
block = mainfn.append_basic_block("entry")
builder = Builder.new(block)
return Context(mod, block, builder, mainfn)
def main():
root = ast.parse(sys.stdin.read())
ast.fix_missing_locations(root)
ctx = initcontext()
visitor = LLVMNodeVisitor(ctx.module, ctx.builder)
visitor.visit(root)
ctx.builder.ret(Constant.int(i32, 0))
# print ctx.module
ee = ExecutionEngine.new(ctx.module)
ee.run_function(ctx.main, [])
if __name__ == '__main__':
main()
| thomaslee/snappy | snappy.py | Python | mit | 3,232 | [
"VisIt"
] | 6827a220ae94cf751ddffd7cc71c3922c00e5970c1f840cab08f52d676f6a10b |
# tests.test_features.test_rankd
# Test the rankd feature analysis visualizers
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Fri Oct 07 12:19:19 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_rankd.py [01d5996] benjamin@bengfort.com $
"""
Test the Rankd feature analysis visualizers
"""
##########################################################################
## Imports
##########################################################################
import pytest
import numpy as np
import numpy.testing as npt
from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase
from yellowbrick.datasets import load_occupancy
from yellowbrick.features.rankd import *
from yellowbrick.features.rankd import kendalltau
from yellowbrick.features.rankd import RankDBase
from sklearn.datasets import make_regression
from yellowbrick.exceptions import YellowbrickValueError
try:
import pandas as pd
except ImportError:
pd = None
@pytest.fixture(scope='class')
def dataset(request):
"""
Creates a dataset with 6 gaussian features and 2 categorical features
for testing the RankD ranking algorithms. The gaussian features have
different correlations with respect to each other, including strong
positive and negative correlation and no correlation at all.
"""
X, _ = make_regression(
n_samples=100, n_features=6, effective_rank=2, tail_strength=0,
n_informative=2, noise=.45, random_state=27,
)
rand = np.random.RandomState(seed=27)
request.cls.dataset = np.concatenate((X, rand.binomial(1, 0.6, (100,2))), axis=1)
##########################################################################
## Kendall-Tau Tests
##########################################################################
@pytest.mark.usefixtures("dataset")
class TestKendallTau(object):
"""
Test the Kendall-Tau correlation metric
"""
def test_kendalltau(self):
"""
Test results returned match expectations
"""
expected = np.array([
[1.0, -0.68, -0.57454545, 0.49858586, 0.07555556, -0.05858586, 0.02387848, 0.11357219],
[-0.68, 1.0, 0.58666667, -0.69090909, -0.22262626, -0.17171717, -0.05059964, -0.12397575],
[-0.57454545, 0.58666667, 1.0, -0.61050505, 0.18909091, 0.07515152, 0.00341121, -0.0638663],
[0.49858586, -0.69090909, -0.61050505, 1.0, 0.11070707, 0.3030303, 0.03013237, 0.07542581],
[ 0.07555556, -0.22262626, 0.18909091, 0.11070707, 1.0, 0.4610101, 0.01648752, 0.05982047],
[-0.05858586, -0.17171717, 0.07515152, 0.3030303, 0.4610101, 1.0, 0.03695479, -0.02398599],
[0.02387848, -0.05059964, 0.00341121, 0.03013237, 0.01648752, 0.03695479, 1.0, 0.18298883],
[0.11357219, -0.12397575, -0.0638663, 0.07542581, 0.05982047, -0.02398599, 0.18298883, 1.0]
])
npt.assert_almost_equal(expected, kendalltau(self.dataset))
def test_kendalltau_shape(self):
"""
Assert that a square correlation matrix is returned
"""
corr = kendalltau(self.dataset)
assert corr.shape[0] == corr.shape[1]
for (i, j), val in np.ndenumerate(corr):
assert corr[j][i] == pytest.approx(val)
def test_kendalltau_1D(self):
"""
Assert that a 2D matrix is required as input
"""
with pytest.raises(IndexError, match="tuple index out of range"):
X = 0.1 * np.arange(10)
kendalltau(X)
##########################################################################
## RankDBase Tests
##########################################################################
@pytest.mark.usefixtures("dataset")
class TestRankDBase(VisualTestCase):
"""
Test the RankDBase Visualizer
"""
def test_rankdbase_unknown_algorithm(self):
"""
Assert that unknown algorithms raise an exception
"""
with pytest.raises(YellowbrickValueError,
match='.* is unrecognized ranking method') as e:
oz = RankDBase(algorithm='unknown')
oz.fit_transform(self.dataset)
assert str(e.value) == "'unknown' is unrecognized ranking method"
##########################################################################
## Rank1D Base Tests
##########################################################################
@pytest.mark.usefixtures("dataset")
class TestRank1D(VisualTestCase):
"""
Test the Rank1D visualizer
"""
def test_rank1d_shapiro(self):
"""
Test Rank1D using shapiro metric
"""
oz = Rank1D(algorithm='shapiro')
npt.assert_array_equal(oz.fit_transform(self.dataset), self.dataset)
# Check Ranking
expected = np.array([
0.985617, 0.992236, 0.982354, 0.984898,
0.978514, 0.990372, 0.636401, 0.624511
])
assert hasattr(oz, 'ranks_')
assert oz.ranks_.shape == (self.dataset.shape[1],)
npt.assert_array_almost_equal(oz.ranks_, expected)
# Image similarity comparison
oz.finalize()
self.assert_images_similar(oz)
def test_rank1d_orientation(self):
"""
Test Rank1D using vertical orientation
"""
oz = Rank1D(orient='v')
npt.assert_array_equal(oz.fit_transform(self.dataset), self.dataset)
# Image similarity comparison
oz.finalize()
self.assert_images_similar(oz)
@pytest.mark.filterwarnings("ignore:p-value")
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_rank1d_integrated_pandas(self):
"""
Test Rank1D on occupancy dataset with pandas DataFrame and Series
"""
data = load_occupancy(return_dataset=True)
X, y = data.to_pandas()
features = data.meta["features"]
assert isinstance(X, pd.DataFrame)
assert isinstance(y, pd.Series)
# Test the visualizer
oz = Rank1D(features=features, show_feature_names=True)
assert oz.fit(X, y) is oz
assert oz.transform(X) is X
# Image similarity testing
oz.finalize()
self.assert_images_similar(oz)
@pytest.mark.filterwarnings("ignore:p-value")
def test_rank1d_integrated_numpy(self):
"""
Test Rank1D on occupancy dataset with default numpy data structures
"""
data = load_occupancy(return_dataset=True)
X, y = data.to_numpy()
features = data.meta["features"]
assert isinstance(X, np.ndarray)
assert isinstance(y, np.ndarray)
# Test the visualizer
oz = Rank1D(features=features, show_feature_names=True)
assert oz.fit(X, y) is oz
assert oz.transform(X) is X
# Image similarity testing
oz.finalize()
self.assert_images_similar(oz)
##########################################################################
## Rank2D Test Cases
##########################################################################
@pytest.mark.usefixtures("dataset")
class TestRank2D(VisualTestCase):
"""
Test the Rank2D visualizer
"""
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892"
)
def test_rank2d_pearson(self):
"""
Test Rank2D using pearson metric
"""
oz = Rank2D(algorithm='pearson')
npt.assert_array_equal(oz.fit_transform(self.dataset), self.dataset)
# Check Ranking
expected = np.array([
[ 1., -0.86937243, -0.77884764, 0.71424708, 0.10836854, -0.11550965, 0.04494811, 0.1725682 ],
[-0.86937243, 1. , 0.80436327, -0.9086706 , -0.31117192, -0.26313947, -0.0711807 , -0.16924862],
[-0.77884764, 0.80436327, 1. , -0.85520468, 0.30940711, 0.10634903, -0.02485686, -0.10230028],
[ 0.71424708, -0.9086706 , -0.85520468, 1. , 0.12537213, 0.41306822, 0.04704408, 0.1031842 ],
[ 0.10836854, -0.31117192, 0.30940711, 0.12537213, 1. , 0.671111 , 0.06777278, 0.09513859],
[-0.11550965, -0.26313947, 0.10634903, 0.41306822, 0.671111 , 1. , 0.04684117, -0.01072631],
[ 0.04494811, -0.0711807 , -0.02485686, 0.04704408, 0.06777278, 0.04684117, 1. , 0.18298883],
[ 0.1725682 , -0.16924862, -0.10230028, 0.1031842 , 0.09513859, -0.01072631, 0.18298883, 1. ]
])
assert hasattr(oz, 'ranks_')
assert oz.ranks_.shape == (self.dataset.shape[1], self.dataset.shape[1])
npt.assert_array_almost_equal(oz.ranks_, expected)
# Image similarity comparision
oz.finalize()
self.assert_images_similar(oz, tol=0.1)
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892"
)
def test_rank2d_covariance(self):
"""
Test Rank2D using covariance metric
"""
oz = Rank2D(algorithm='covariance')
npt.assert_array_equal(oz.fit_transform(self.dataset), self.dataset)
# Check Ranking
expected = np.array([
[ 4.09266931e-03, -1.41062431e-03, -2.26778429e-03, 3.13507202e-03, 2.21273274e-04, -5.05566875e-04, 1.44499782e-03, 5.45713163e-03],
[-1.41062431e-03, 6.43286363e-04, 9.28539346e-04, -1.58126396e-03, -2.51898163e-04, -4.56609749e-04, -9.07228811e-04, -2.12191333e-03],
[-2.26778429e-03, 9.28539346e-04, 2.07153281e-03, -2.67061756e-03, 4.49467833e-04, 3.31158917e-04, -5.68518509e-04, -2.30156415e-03],
[ 3.13507202e-03, -1.58126396e-03, -2.67061756e-03, 4.70751209e-03, 2.74548546e-04, 1.93898526e-03, 1.62200836e-03, 3.49952628e-03],
[ 2.21273274e-04, -2.51898163e-04, 4.49467833e-04, 2.74548546e-04, 1.01869657e-03, 1.46545939e-03, 1.08700151e-03, 1.50099581e-03],
[-5.05566875e-04, -4.56609749e-04, 3.31158917e-04, 1.93898526e-03, 1.46545939e-03, 4.68073451e-03, 1.61041253e-03, -3.62750059e-04],
[ 1.44499782e-03, -9.07228811e-04, -5.68518509e-04, 1.62200836e-03, 1.08700151e-03, 1.61041253e-03, 2.52525253e-01, 4.54545455e-02],
[ 5.45713163e-03, -2.12191333e-03, -2.30156415e-03, 3.49952628e-03, 1.50099581e-03, -3.62750059e-04, 4.54545455e-02, 2.44343434e-01]
])
assert hasattr(oz, 'ranks_')
assert oz.ranks_.shape == (self.dataset.shape[1], self.dataset.shape[1])
npt.assert_array_almost_equal(oz.ranks_, expected)
# Image similarity comparision
oz.finalize()
self.assert_images_similar(oz, tol=0.1)
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892"
)
def test_rank2d_spearman(self):
"""
Test Rank2D using spearman metric
"""
oz = Rank2D(algorithm='spearman')
npt.assert_array_equal(oz.fit_transform(self.dataset), self.dataset)
# Check Ranking
expected = np.array([
[ 1. , -0.86889889, -0.77551755, 0.68520852, 0.11369937, -0.09489349, 0.02909991, 0.13840665],
[-0.86889889, 1. , 0.78232223, -0.87065107, -0.33450945, -0.25244524, -0.06166409, -0.15108512],
[-0.77551755, 0.78232223, 1. , -0.81636964, 0.26846685, 0.10348635, 0.00415713, -0.07783173],
[ 0.68520852, -0.87065107, -0.81636964, 1. , 0.16316832, 0.45167717, 0.03672131, 0.09191892],
[ 0.11369937, -0.33450945, 0.26846685, 0.16316832, 1. , 0.63986799, 0.02009279, 0.07290121],
[-0.09489349, -0.25244524, 0.10348635, 0.45167717, 0.63986799, 1. , 0.04503557, -0.02923092],
[ 0.02909991, -0.06166409, 0.00415713, 0.03672131, 0.02009279, 0.04503557, 1. , 0.18298883],
[ 0.13840665, -0.15108512, -0.07783173, 0.09191892, 0.07290121, -0.02923092, 0.18298883, 1. ]
])
assert hasattr(oz, 'ranks_')
assert oz.ranks_.shape == (self.dataset.shape[1], self.dataset.shape[1])
npt.assert_array_almost_equal(oz.ranks_, expected)
# Image similarity comparision
oz.finalize()
self.assert_images_similar(oz, tol=0.1)
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892"
)
def test_rank2d_kendalltau(self):
"""
Test Rank2D using kendalltau metric
"""
oz = Rank2D(algorithm='kendalltau')
npt.assert_array_equal(oz.fit_transform(self.dataset), self.dataset)
# Check Ranking
expected = np.array([
[1.0, -0.68, -0.57454545, 0.49858586, 0.07555556, -0.05858586, 0.02387848, 0.11357219],
[-0.68, 1.0, 0.58666667, -0.69090909, -0.22262626, -0.17171717, -0.05059964, -0.12397575],
[-0.57454545, 0.58666667, 1.0, -0.61050505, 0.18909091, 0.07515152, 0.00341121, -0.0638663],
[0.49858586, -0.69090909, -0.61050505, 1.0, 0.11070707, 0.3030303, 0.03013237, 0.07542581],
[ 0.07555556, -0.22262626, 0.18909091, 0.11070707, 1.0, 0.4610101, 0.01648752, 0.05982047],
[-0.05858586, -0.17171717, 0.07515152, 0.3030303, 0.4610101, 1.0, 0.03695479, -0.02398599],
[0.02387848, -0.05059964, 0.00341121, 0.03013237, 0.01648752, 0.03695479, 1.0, 0.18298883],
[0.11357219, -0.12397575, -0.0638663, 0.07542581, 0.05982047, -0.02398599, 0.18298883, 1.0]
])
assert hasattr(oz, 'ranks_')
assert oz.ranks_.shape == (self.dataset.shape[1], self.dataset.shape[1])
npt.assert_array_almost_equal(oz.ranks_, expected)
# Image similarity comparision
oz.finalize()
self.assert_images_similar(oz, tol=0.1)
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892"
)
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_rank2d_integrated_pandas(self):
"""
Test Rank2D on occupancy dataset with pandas DataFrame and Series
"""
data = load_occupancy(return_dataset=True)
X, y = data.to_pandas()
features = data.meta["features"]
assert isinstance(X, pd.DataFrame)
assert isinstance(y, pd.Series)
# Test the visualizer
oz = Rank2D(features=features, show_feature_names=True)
assert oz.fit(X, y) is oz
assert oz.transform(X) is X
oz.finalize()
# Image similarity testing
self.assert_images_similar(oz, tol=0.1)
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892"
)
def test_rank2d_integrated_numpy(self):
"""
Test Rank2D on occupancy dataset with numpy ndarray
"""
data = load_occupancy(return_dataset=True)
X, y = data.to_numpy()
features = data.meta["features"]
assert isinstance(X, np.ndarray)
assert isinstance(y, np.ndarray)
# Test the visualizer
oz = Rank2D(features=features, show_feature_names=True)
assert oz.fit(X, y) is oz
assert oz.transform(X) is X
oz.finalize()
# Image similarity testing
self.assert_images_similar(oz, tol=0.1)
| pdamodaran/yellowbrick | tests/test_features/test_rankd.py | Python | apache-2.0 | 15,507 | [
"Gaussian"
] | 9cd263c9735fb4ef1ab70bcf6d4c96dcf94d7e0a1c40a40b9b8f372ebf078170 |
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('search_repositories')
@click.argument("q", type=str)
@click.option(
"--page",
help="page requested",
default="1",
show_default=True,
type=int
)
@click.option(
"--page_size",
help="page size requested",
default="10",
show_default=True,
type=int
)
@pass_context
@custom_exception
@json_output
def cli(ctx, q, page=1, page_size=10):
"""Search for repositories in a Galaxy Tool Shed.
Output:
dictionary containing search hits as well as metadata for the
search.
For example::
{'hits': [{'matched_terms': [],
'repository': {'approved': 'no',
'description': 'Convert export file to fastq',
'full_last_updated': '2015-01-18 09:48 AM',
'homepage_url': '',
'id': 'bdfa208f0cf6504e',
'last_updated': 'less than a year',
'long_description': 'This is a simple too to convert Solexas Export files to FASTQ files.',
'name': 'export_to_fastq',
'remote_repository_url': '',
'repo_owner_username': 'louise',
'times_downloaded': 164},
'score': 4.92},
{'matched_terms': [],
'repository': {'approved': 'no',
'description': 'Convert BAM file to fastq',
'full_last_updated': '2015-04-07 11:57 AM',
'homepage_url': '',
'id': '175812cd7caaf439',
'last_updated': 'less than a month',
'long_description': 'Use Picards SamToFastq to convert a BAM file to fastq. Useful for storing reads as BAM in Galaxy and converting to fastq when needed for analysis.',
'name': 'bam_to_fastq',
'remote_repository_url': '',
'repo_owner_username': 'brad-chapman',
'times_downloaded': 138},
'score': 4.14}],
'hostname': 'https://testtoolshed.g2.bx.psu.edu/',
'page': '1',
'page_size': '2',
'total_results': '64'}
"""
return ctx.ti.repositories.search_repositories(q, page=page, page_size=page_size)
| galaxy-iuc/parsec | parsec/commands/toolshed_repositories/search_repositories.py | Python | apache-2.0 | 2,811 | [
"Galaxy"
] | ae06a7c33c124b98ab4238fd3079e47e2eac6035b2c1e4590bd9b3c10327f007 |
import sys
import os
import numpy as np
import h5py
import multiprocessing
import cPickle
import ephem
import matplotlib.pyplot as plt
import types
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import train_test_split
from sklearn import metrics, linear_model, tree, ensemble
# NOTE: endless empehm warnings
# DeprecationWarning: PyOS_ascii_strtod and PyOS_ascii_atof are deprecated. Use PyOS_string_to_double instead.
# https://github.com/brandon-rhodes/pyephem/issues/18
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
# runs on gp4 outputs
fMapper = {
"apcp_sfc" : "Total_precipitation",
"dlwrf_sfc" : "Downward_Long-Wave_Rad_Flux",
"dswrf_sfc" : "Downward_Short-Wave_Rad_Flux",
"pres_msl" : "Pressure",
"pwat_eatm" : "Precipitable_water",
"spfh_2m" : "Specific_humidity_height_above_ground",
"tcdc_eatm" : "Total_cloud_cover",
"tcolc_eatm" : "Total_Column-Integrated_Condensate",
"tmax_2m" : "Maximum_temperature",
"tmin_2m" : "Minimum_temperature",
"tmp_2m" : "Temperature_height_above_ground",
"tmp_sfc" : "Temperature_surface",
"ulwrf_sfc" : "Upward_Long-Wave_Rad_Flux_surface",
"ulwrf_tatm" : "Upward_Long-Wave_Rad_Flux",
"uswrf_sfc" : "Upward_Short-Wave_Rad_Flux"
}
fKeys = ("apcp_sfc", "dlwrf_sfc", "dswrf_sfc", "pres_msl", "pwat_eatm",
"spfh_2m", "tcdc_eatm", "tcolc_eatm", "tmax_2m", "tmin_2m",
"tmp_2m", "tmp_sfc", "ulwrf_sfc", "ulwrf_tatm", "uswrf_sfc")
NPTSt = 5113 # Train
NPTSp = 1796 # Predict
# Minimal script for gaussian process estimation
class Mesonet(object):
dtimet = np.recarray((NPTSt,), dtype={"names": ("time",),
"formats": ("datetime64[D]",)})
dtimep = np.recarray((NPTSp,), dtype={"names": ("time",),
"formats": ("datetime64[D]",)})
def __init__(self, stid, nlat, elon, elev):
self.stid = stid
self.nlat = nlat
self.elon = elon
self.elev = elev
# Measured data
self.datat = np.recarray((NPTSt,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
self.datap = np.recarray((NPTSp,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
def setAstro(self, time, data):
sun = ephem.Sun()
moon = ephem.Moon()
obs = ephem.Observer()
obs.lon = (self.elon * np.pi / 180) # need radians
obs.lat = (self.nlat * np.pi / 180) # need radians
obs.elevation = self.elev # meters
for i in range(len(time)):
obs.date = str(time[i])
sun.compute(obs)
moon.compute(obs)
# LOGIT ASTRO TERMS
# Sun Alt goes from 0 to 90
# Moon phase goes from 0 to 1
salt = float(180 / np.pi * sun.transit_alt)
salt /= 90.0
mphase = moon.moon_phase
data["sun_alt"][i] = np.log(salt / (1.0 - salt))
data["moon_phase"][i] = np.log(mphase / (1.0 - mphase))
def regressTest(feattr, featcv, fluxtr, fluxcv):
alphas = np.logspace(-5, 1, 6, base=10)
models = []
for alpha in alphas:
models.append(linear_model.Ridge(normalize=True, fit_intercept=True, alpha=alpha))
models.append(linear_model.Lasso(normalize=True, fit_intercept=True, alpha=alpha))
models.append(linear_model.LassoLars(normalize=True, fit_intercept=True, alpha=alpha))
models.append(ensemble.RandomForestRegressor())
models.append(ensemble.ExtraTreesRegressor())
models.append(ensemble.AdaBoostRegressor())
models.append(ensemble.GradientBoostingRegressor(loss="lad", n_estimators=100))
models.append(ensemble.GradientBoostingRegressor(loss="lad", n_estimators=1000))
models.append(tree.DecisionTreeRegressor())
models.append(tree.ExtraTreeRegressor())
maes = []
for m in range(len(models)):
model = models[m]
fit = model.fit(feattr, fluxtr)
preds = fit.predict(featcv)
mae = metrics.mean_absolute_error(fluxcv, preds)
print " MAE %d: %.1f" % (m, mae)
maes.append(mae)
idx = np.argsort(maes)
model = models[idx[0]]
print "BEST", maes[idx[0]], model
return model.fit(np.vstack((feattr, featcv)),
np.hstack((fluxtr, fluxcv))
) # fit all data
def sigclip(data, switch):
mean = np.mean(data, axis=1)
std = np.std(data, axis=1)
idx = np.where(std == 0.0)
std[idx] = 1e10
if switch:
nsig = np.abs(data - mean[:,np.newaxis,:]) / std[:,np.newaxis,:]
else:
nsig = np.abs(data - mean[:,np.newaxis]) / std[:,np.newaxis]
idx = np.where(nsig > 3.0)
ma = np.ma.array(data)
ma[idx] = np.ma.masked
return ma.mean(axis=1).data
if __name__ == "__main__":
suffix = sys.argv[1]
useAstro = 1
trainFile = "gp4_train_%s.pickle" % (suffix)
predFile = "gp4_test_%s.pickle" % (suffix)
if suffix.find("logit") > -1:
buff = open(trainFile, "rb")
train, fmin, fmax = cPickle.load(buff)
buff.close()
buff = open(predFile, "rb")
pred, fmin, fmax = cPickle.load(buff)
buff.close()
else:
buff = open(trainFile, "rb")
train = cPickle.load(buff)
buff.close()
buff = open(predFile, "rb")
pred = cPickle.load(buff)
buff.close()
# Need to load the positions and times of training data
sdata = np.loadtxt("../station_info.csv", delimiter=",", skiprows=1,
dtype = [("stid", np.str_, 4),
("nlat", np.float64),
("elon", np.float64),
("elev", np.float64)])
fields = np.loadtxt("../train.csv", skiprows=1, delimiter=",", dtype=np.int64)
dates = [np.datetime64(str(x)[:4]+"-"+str(x)[4:6]+"-"+str(x)[6:8]) for x in fields[:,0]]
Mesonet.dtimet = dates
mesonets = {}
for sidx in range(len(sdata)):
s = sdata[sidx]
station = Mesonet(s[0], s[1], s[2], s[3])
station.datat["flux"] = fields[:,sidx+1]
mesonets[s[0]] = station
# Dates of prediction data
fields = np.loadtxt("../sampleSubmission.csv", skiprows=1, delimiter=",", unpack=True).astype(np.int)
dates = [np.datetime64(str(x)[:4]+"-"+str(x)[4:6]+"-"+str(x)[6:8]) for x in fields[0]]
Mesonet.dtimep = dates
# Do we do Astro terms?
if useAstro:
for mesonet in mesonets.values():
mesonet.setAstro(mesonet.dtimet, mesonet.datat)
mesonet.setAstro(mesonet.dtimep, mesonet.datap)
nCv = 1000
nTr = NPTSt-nCv
# Regress each Mesonet site on its own
for mKey in mesonets.keys():
print "%s" % (mKey)
feattr = np.empty((nTr, len(fKeys) + 2 * useAstro))
featcv = np.empty((nCv, len(fKeys) + 2 * useAstro))
for f in range(len(fKeys)):
fKey = fKeys[f]
data1 = train[mKey].pdata[fKey]
data2 = np.mean(data1, axis=1) # Just use the average of the 5 terms
feattr[:,f] = data2[:-nCv]
featcv[:,f] = data2[-nCv:]
if useAstro:
feattr[:,len(fKeys)] = mesonets[mKey].datat["sun_alt"][:-nCv]
feattr[:,len(fKeys)+1] = mesonets[mKey].datat["moon_phase"][:-nCv]
featcv[:,len(fKeys)] = mesonets[mKey].datat["sun_alt"][-nCv:]
featcv[:,len(fKeys)+1] = mesonets[mKey].datat["moon_phase"][-nCv:]
fluxtr = mesonets[mKey].datat["flux"][:-nCv]
fluxcv = mesonets[mKey].datat["flux"][-nCv:]
regressTest(feattr, featcv, fluxtr, fluxcv)
##########3
##########3
##########3
##########3
# Now regress all sites at once
print "ALL"
feattr = np.empty((nTr * len(mesonets.keys()), len(fKeys) + 2 * useAstro))
fluxtr = np.empty((nTr * len(mesonets.keys())))
featcv = np.empty((nCv * len(mesonets.keys()), len(fKeys) + 2 * useAstro))
fluxcv = np.empty((nCv * len(mesonets.keys())))
fIdx = 0
for mKey in mesonets.keys():
for f in range(len(fKeys)):
fKey = fKeys[f]
data1 = train[mKey].pdata[fKey]
data2 = np.mean(data1, axis=1) # Just use the average of the 5 terms
feattr[fIdx*nTr:(fIdx*nTr + nTr),f] = data2[:-nCv]
featcv[fIdx*nCv:(fIdx*nCv + nCv),f] = data2[-nCv:]
if useAstro:
feattr[fIdx*nTr:(fIdx*nTr + nTr),len(fKeys)] = mesonets[mKey].datat["sun_alt"][:-nCv]
feattr[fIdx*nTr:(fIdx*nTr + nTr),len(fKeys)+1] = mesonets[mKey].datat["moon_phase"][:-nCv]
featcv[fIdx*nCv:(fIdx*nCv + nCv),len(fKeys)] = mesonets[mKey].datat["sun_alt"][-nCv:]
featcv[fIdx*nCv:(fIdx*nCv + nCv),len(fKeys)+1] = mesonets[mKey].datat["moon_phase"][-nCv:]
fluxtr[fIdx*nTr:(fIdx*nTr + nTr)] = mesonets[mKey].datat["flux"][:-nCv]
fluxcv[fIdx*nCv:(fIdx*nCv + nCv)] = mesonets[mKey].datat["flux"][-nCv:]
fIdx += 1
regressTest(feattr, featcv, fluxtr, fluxcv)
| acbecker/solar | gp4/regress11.py | Python | mit | 9,521 | [
"Gaussian"
] | 4f771d81e879fd3a32a615d13a3367b7a3e3569af38a41220aa6c964b847897e |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .script_interface import ScriptInterfaceHelper, script_interface_register
@script_interface_register
class CylindricalTransformationParameters(ScriptInterfaceHelper):
"""
Class to hold and validate the parameters needed for a cylindrical transformation.
The three parameters are available as attributes but are read-only.
Parameters
----------
center : (3,) array_like of :obj:`float`, default = [0, 0, 0]
Position of the origin of the cylindrical coordinate system.
axis : (3,) array_like of :obj:`float`, default = [0, 0, 1]
Orientation vector of the ``z``-axis of the cylindrical coordinate system.
orientation: (3,) array_like of :obj:`float`, default = [1, 0, 0]
The axis on which ``phi = 0``.
Notes
-----
If you provide no arguments, the defaults above are set.
If you provide only a ``center`` and an ``axis``, an ``orientation`` will be automatically generated that is orthogonal to ``axis``.
"""
_so_name = "CylindricalTransformationParameters"
| espressomd/espresso | src/python/espressomd/math.py | Python | gpl-3.0 | 1,754 | [
"ESPResSo"
] | b6d0c510784557d9288fa2d464dae0841493732b6cb2a494813d2cca739fea9e |
""" :mod: RequestTaskTests
=======================
.. module: RequestTaskTests
:synopsis: test cases for RequestTask class
test cases for RequestTask class
"""
from __future__ import print_function
__RCSID__ = "$Id $"
# #
# @file RequestTaskTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/27 15:59:40
# @brief Definition of RequestTaskTests class.
# # imports
import unittest
import importlib
from mock import Mock, MagicMock
# # SUT
from DIRAC.RequestManagementSystem.private.RequestTask import RequestTask
# # request client
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
ReqClient = Mock( spec = ReqClient )
# # from DIRAC
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
########################################################################
class RequestTaskTests( unittest.TestCase ):
"""
.. class:: RequestTaskTests
"""
def setUp( self ):
""" test case set up """
self.handlersDict = { "ForwardDISET" : "DIRAC/RequestManagementSystem/private/ForwardDISET" }
self.req = Request()
self.req.RequestName = "foobarbaz"
self.req.OwnerGroup = "lhcb_user"
self.req.OwnerDN = "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=chaen/CN=705305/CN=Christophe Haen"
self.op = Operation( { "Type": "ForwardDISET", "Arguments" : "tts10:helloWorldee" } )
self.req.addOperation( self.op )
self.task = None
self.mockRC = MagicMock()
self.mockObjectOps = MagicMock()
self.mockObjectOps.getSections.return_value = {'OK': True,
'Value': ['DataProcessing',
'DataManager']}
self.mockObjectOps.getOptionsDict.return_value = {'OK': True,
'Value': {'Group': 'lhcb_user', 'User': 'fstagni'}}
self.mockOps = MagicMock()
self.mockOps.return_value = self.mockObjectOps
def tearDown( self ):
""" test case tear down """
del self.req
del self.op
del self.task
def testAPI( self ):
""" test API
"""
rt = importlib.import_module( 'DIRAC.RequestManagementSystem.private.RequestTask' )
rt.gMonitor = MagicMock()
rt.Operations = self.mockOps
rt.CS = MagicMock()
self.task = RequestTask( self.req.toJSON()["Value"], self.handlersDict, 'csPath', 'RequestManagement/RequestExecutingAgent',
requestClient = self.mockRC )
self.task.requestClient = Mock( return_value = Mock( spec = ReqClient ) )
self.task.requestClient().updateRequest = Mock()
self.task.requestClient().updateRequest.return_value = { "OK" : True, "Value" : None }
ret = self.task()
self.assertEqual( ret["OK"], True , "call failed" )
ret = self.task.setupProxy()
print(ret)
# # tests execution
if __name__ == "__main__":
testLoader = unittest.TestLoader()
requestTaskTests = testLoader.loadTestsFromTestCase( RequestTaskTests )
suite = unittest.TestSuite( [ requestTaskTests ] )
unittest.TextTestRunner( verbosity = 3 ).run( suite )
| fstagni/DIRAC | RequestManagementSystem/private/test/RequestTaskTests.py | Python | gpl-3.0 | 3,166 | [
"DIRAC"
] | 3b40fb17666578a52c222b22b87ab505400ff670aa7b38f0c5d1bbb4933476ed |
#! /usr/bin/env python
from __future__ import print_function
import argparse, gzip, pysam, re, sys
def extract_reference_slices(faf_ref, f_fasta, f_out, f_bed=None):
for coord in get_coords(f_fasta, f_bed):
key="{}:{}-{}".format(*coord)
seq=faf_ref.fetch(region=key)
print(">{}".format(key), file=f_out)
while seq:
print(seq[:60], file=f_out)
seq = seq[60:]
def get_coords(f_fasta, f_bed=None):
coords = []
numSequences =0
# read BED file if available
if f_bed:
for line in f_bed:
if line.strip():
chrom, start, end = line.split()[:3]
coords.append((chrom, int(start), int(end)))
numSequences = len([l for l in f_fasta if l.startswith('>')])
# if no BED available
else:
for name, seq, qual in readfq(f_fasta):
# split on potential delimiters
numSequences += 1
for head in re.split(r'\s+|>|[|]|@', name):
match = re.match(r'^([a-zA-Z0-9_]+):(\d+)-(\d+)$', head)
if match:
coords.append((match.groups()[0],
int(match.groups()[1]),
int(match.groups()[2])))
break
if len(coords) != numSequences:
raise Exception("Number of coordinates provided does not match number "
"of reads. Please specify coordinates in BED format "
"or behind fasta headers.")
return coords
# External code
# taken form lh3
def readfq(fp): # this is a generator function
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:], [], None # EDIT: read whole name line
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Prepare reference slices for '
'assemblies to be visualized in maze. If you provide '
'a BED file, coordinates are taken from it in the '
'given order. Without BED, I look for a pattern of '
'chr:start-end in the fasta headers, seperated by '
'white spaces.')
parser.add_argument("-c", "--coords", metavar='BED', default=None,
type=argparse.FileType('r'), required=False,
help="Bed file with coordinates.")
parser.add_argument("-f", "--fasta", metavar='FA',
type=argparse.FileType('rb'), required=True,
help="Fasta containing assemblies/long reads.")
parser.add_argument("-r", "--ref", metavar='FA',
type=pysam.FastaFile, required=True,
help='Reference genome; index required.')
parser.add_argument("-o", "--out", metavar='FA', default='-',
type=argparse.FileType('w'), required=False,
help='Output file.')
args = parser.parse_args()
# .gz support for fasta
if args.fasta != sys.stdin and args.fasta.name.endswith(".gz"):
args.fasta = gzip.open(args.fasta.name, 'r')
extract_reference_slices(args.ref, args.fasta, args.out, args.coords)
| dellytools/maze | extract_reference_slices.py | Python | mit | 4,489 | [
"pysam"
] | dc70dacd3c7c755803c64f469d5cf5c15e5fd7a31f395459f43be153f2dbdc7b |
"""Tests for _sketches.py."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import clarkson_woodruff_transform
from numpy.testing import assert_
def make_random_dense_gaussian_matrix(n_rows, n_columns, mu=0, sigma=0.01):
"""
Make some random data with Gaussian distributed values
"""
np.random.seed(142352345)
res = np.random.normal(mu, sigma, n_rows*n_columns)
return np.reshape(res, (n_rows, n_columns))
class TestClarksonWoodruffTransform(object):
"""
Testing the Clarkson Woodruff Transform
"""
# Big dense matrix dimensions
n_matrix_rows = 2000
n_matrix_columns = 100
# Sketch matrix dimensions
n_sketch_rows = 100
# Error threshold
threshold = 0.1
dense_big_matrix = make_random_dense_gaussian_matrix(n_matrix_rows,
n_matrix_columns)
def test_sketch_dimensions(self):
sketch = clarkson_woodruff_transform(self.dense_big_matrix,
self.n_sketch_rows)
assert_(sketch.shape == (self.n_sketch_rows,
self.dense_big_matrix.shape[1]))
def test_sketch_rows_norm(self):
# Given the probabilistic nature of the sketches
# we run the 'test' multiple times and check that
# we pass all/almost all the tries
n_errors = 0
seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431,
1302443994, 1521083269, 1501189312, 1126232505, 1533465685]
for seed_ in seeds:
sketch = clarkson_woodruff_transform(self.dense_big_matrix,
self.n_sketch_rows, seed_)
# We could use other norms (like L2)
err = np.linalg.norm(self.dense_big_matrix) - np.linalg.norm(sketch)
if err > self.threshold:
n_errors += 1
assert_(n_errors == 0)
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/linalg/tests/test_sketches.py | Python | mit | 2,000 | [
"Gaussian"
] | e0cbbffae30d1128d9f7cf8a5baedbd0fc8dd5aa792f31cb38263eace482e400 |
# coding: utf-8
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from contextlib import contextmanager
from datetime import datetime, timedelta
from unittest.mock import patch
from odoo.addons.base.tests.common import HttpCaseWithUserDemo
from odoo.addons.website.tools import MockRequest
from odoo.addons.website.models.website_visitor import WebsiteVisitor
from odoo.tests import common, tagged
class MockVisitor(common.BaseCase):
@contextmanager
def mock_visitor_from_request(self, force_visitor=False):
def _get_visitor_from_request(model, *args, **kwargs):
return force_visitor
with patch.object(WebsiteVisitor, '_get_visitor_from_request',
autospec=True, wraps=WebsiteVisitor,
side_effect=_get_visitor_from_request) as _get_visitor_from_request_mock:
yield
@tagged('-at_install', 'post_install', 'website_visitor')
class WebsiteVisitorTests(MockVisitor, HttpCaseWithUserDemo):
def setUp(self):
super(WebsiteVisitorTests, self).setUp()
self.website = self.env['website'].search([
('company_id', '=', self.env.user.company_id.id)
], limit=1)
self.cookies = {}
untracked_view = self.env['ir.ui.view'].create({
'name': 'UntackedView',
'type': 'qweb',
'arch': '''<t name="Homepage" t-name="website.base_view">
<t t-call="website.layout">
I am a generic page²
</t>
</t>''',
'key': 'test.base_view',
'track': False,
})
tracked_view = self.env['ir.ui.view'].create({
'name': 'TrackedView',
'type': 'qweb',
'arch': '''<t name="Homepage" t-name="website.base_view">
<t t-call="website.layout">
I am a generic page
</t>
</t>''',
'key': 'test.base_view',
'track': True,
})
tracked_view_2 = self.env['ir.ui.view'].create({
'name': 'TrackedView2',
'type': 'qweb',
'arch': '''<t name="OtherPage" t-name="website.base_view">
<t t-call="website.layout">
I am a generic second page
</t>
</t>''',
'key': 'test.base_view',
'track': True,
})
[self.untracked_page, self.tracked_page, self.tracked_page_2] = self.env['website.page'].create([
{
'view_id': untracked_view.id,
'url': '/untracked_view',
'website_published': True,
},
{
'view_id': tracked_view.id,
'url': '/tracked_view',
'website_published': True,
},
{
'view_id': tracked_view_2.id,
'url': '/tracked_view_2',
'website_published': True,
},
])
self.user_portal = self.env['res.users'].search([('login', '=', 'portal')])
self.partner_portal = self.user_portal.partner_id
if not self.user_portal:
self.env['ir.config_parameter'].sudo().set_param('auth_password_policy.minlength', 4)
self.partner_portal = self.env['res.partner'].create({
'name': 'Joel Willis',
'email': 'joel.willis63@example.com',
})
self.user_portal = self.env['res.users'].create({
'login': 'portal',
'password': 'portal',
'partner_id': self.partner_portal.id,
'groups_id': [(6, 0, [self.env.ref('base.group_portal').id])],
})
def _get_last_visitor(self):
return self.env['website.visitor'].search([], limit=1, order="id DESC")
def assertPageTracked(self, visitor, page):
""" Check a page is in visitor tracking data """
self.assertIn(page, visitor.website_track_ids.page_id)
self.assertIn(page, visitor.page_ids)
def assertVisitorTracking(self, visitor, pages):
""" Check the whole tracking history of a visitor """
for page in pages:
self.assertPageTracked(visitor, page)
self.assertEqual(
len(visitor.website_track_ids),
len(pages)
)
def assertVisitorDeactivated(self, visitor, main_visitor):
""" Temporary method to check that a visitor has been de-activated / merged
with other visitor, notably in case of login (see User.authenticate() as
well as Visitor._link_to_visitor() ).
As final result depends on installed modules (see overrides) due to stable
improvements linked to EventOnline, this method contains a hack to avoid
doing too much overrides just for that behavior. """
if 'parent_id' in self.env['website.visitor']:
self.assertTrue(bool(visitor))
self.assertFalse(visitor.active)
self.assertTrue(main_visitor.active)
self.assertEqual(visitor.parent_id, main_visitor)
else:
self.assertFalse(visitor)
self.assertTrue(bool(main_visitor))
def test_visitor_creation_on_tracked_page(self):
""" Test various flows involving visitor creation and update. """
existing_visitors = self.env['website.visitor'].search([])
existing_tracks = self.env['website.track'].search([])
self.url_open(self.untracked_page.url)
self.url_open(self.tracked_page.url)
self.url_open(self.tracked_page.url)
new_visitor = self.env['website.visitor'].search([('id', 'not in', existing_visitors.ids)])
new_track = self.env['website.track'].search([('id', 'not in', existing_tracks.ids)])
self.assertEqual(len(new_visitor), 1, "1 visitor should be created")
self.assertEqual(len(new_track), 1, "There should be 1 tracked page")
self.assertEqual(new_visitor.visit_count, 1)
self.assertEqual(new_visitor.website_track_ids, new_track)
self.assertVisitorTracking(new_visitor, self.tracked_page)
# ------------------------------------------------------------
# Admin connects
# ------------------------------------------------------------
self.cookies = {'visitor_uuid': new_visitor.access_token}
with MockRequest(self.env, website=self.website, cookies=self.cookies):
self.authenticate(self.user_admin.login, 'admin')
visitor_admin = new_visitor
# visit a page
self.url_open(self.tracked_page_2.url)
# check tracking and visitor / user sync
self.assertVisitorTracking(visitor_admin, self.tracked_page | self.tracked_page_2)
self.assertEqual(visitor_admin.partner_id, self.partner_admin)
self.assertEqual(visitor_admin.name, self.partner_admin.name)
# ------------------------------------------------------------
# Portal connects
# ------------------------------------------------------------
with MockRequest(self.env, website=self.website, cookies=self.cookies):
self.authenticate(self.user_portal.login, 'portal')
self.assertFalse(
self.env['website.visitor'].search([('id', 'not in', (existing_visitors | visitor_admin).ids)]),
"No extra visitor should be created")
# visit a page
self.url_open(self.tracked_page.url)
self.url_open(self.untracked_page.url)
self.url_open(self.tracked_page_2.url)
self.url_open(self.tracked_page_2.url) # 2 time to be sure it does not record twice
# new visitor is created
new_visitors = self.env['website.visitor'].search([('id', 'not in', existing_visitors.ids)])
self.assertEqual(len(new_visitors), 2, "One extra visitor should be created")
visitor_portal = new_visitors[0]
self.assertEqual(visitor_portal.partner_id, self.partner_portal)
self.assertEqual(visitor_portal.name, self.partner_portal.name)
self.assertVisitorTracking(visitor_portal, self.tracked_page | self.tracked_page_2)
# ------------------------------------------------------------
# Back to anonymous
# ------------------------------------------------------------
# portal user disconnects
self.logout()
# visit some pages
self.url_open(self.tracked_page.url)
self.url_open(self.untracked_page.url)
self.url_open(self.tracked_page_2.url)
self.url_open(self.tracked_page_2.url) # 2 time to be sure it does not record twice
# new visitor is created
new_visitors = self.env['website.visitor'].search([('id', 'not in', existing_visitors.ids)])
self.assertEqual(len(new_visitors), 3, "One extra visitor should be created")
visitor_anonymous = new_visitors[0]
self.cookies['visitor_uuid'] = visitor_anonymous.access_token
self.assertFalse(visitor_anonymous.name)
self.assertFalse(visitor_anonymous.partner_id)
self.assertVisitorTracking(visitor_anonymous, self.tracked_page | self.tracked_page_2)
visitor_anonymous_tracks = visitor_anonymous.website_track_ids
# ------------------------------------------------------------
# Admin connects again
# ------------------------------------------------------------
with MockRequest(self.env, website=self.website, cookies=self.cookies):
self.authenticate(self.user_admin.login, 'admin')
# one visitor is deleted
visitor_anonymous = self.env['website.visitor'].with_context(active_test=False).search([('id', '=', visitor_anonymous.id)])
self.assertVisitorDeactivated(visitor_anonymous, visitor_admin)
new_visitors = self.env['website.visitor'].search([('id', 'not in', existing_visitors.ids)])
self.assertEqual(new_visitors, visitor_admin | visitor_portal)
visitor_admin = self.env['website.visitor'].search([('partner_id', '=', self.partner_admin.id)])
# tracks are linked
self.assertTrue(visitor_anonymous_tracks < visitor_admin.website_track_ids)
self.assertEqual(len(visitor_admin.website_track_ids), 4, "There should be 4 tracked page for the admin")
# ------------------------------------------------------------
# Back to anonymous
# ------------------------------------------------------------
# admin disconnects
self.logout()
# visit some pages
self.url_open(self.tracked_page.url)
self.url_open(self.untracked_page.url)
self.url_open(self.tracked_page_2.url)
self.url_open(self.tracked_page_2.url) # 2 time to be sure it does not record twice
# new visitor created
new_visitors = self.env['website.visitor'].search([('id', 'not in', existing_visitors.ids)])
self.assertEqual(len(new_visitors), 3, "One extra visitor should be created")
visitor_anonymous_2 = new_visitors[0]
self.cookies['visitor_uuid'] = visitor_anonymous_2.access_token
self.assertFalse(visitor_anonymous_2.name)
self.assertFalse(visitor_anonymous_2.partner_id)
self.assertVisitorTracking(visitor_anonymous_2, self.tracked_page | self.tracked_page_2)
visitor_anonymous_2_tracks = visitor_anonymous_2.website_track_ids
# ------------------------------------------------------------
# Portal connects again
# ------------------------------------------------------------
with MockRequest(self.env, website=self.website, cookies=self.cookies):
self.authenticate(self.user_portal.login, 'portal')
# one visitor is deleted
new_visitors = self.env['website.visitor'].search([('id', 'not in', existing_visitors.ids)])
self.assertEqual(new_visitors, visitor_admin | visitor_portal)
# tracks are linked
self.assertTrue(visitor_anonymous_2_tracks < visitor_portal.website_track_ids)
self.assertEqual(len(visitor_portal.website_track_ids), 4, "There should be 4 tracked page for the portal user")
# simulate the portal user comes back 30min later
for track in visitor_portal.website_track_ids:
track.write({'visit_datetime': track.visit_datetime - timedelta(minutes=30)})
# visit a page
self.url_open(self.tracked_page.url)
visitor_portal.invalidate_cache(fnames=['website_track_ids'])
# tracks are created
self.assertEqual(len(visitor_portal.website_track_ids), 5, "There should be 5 tracked page for the portal user")
# simulate the portal user comes back 8hours later
visitor_portal.write({'last_connection_datetime': visitor_portal.last_connection_datetime - timedelta(hours=8)})
self.url_open(self.tracked_page.url)
visitor_portal.invalidate_cache(fnames=['visit_count'])
# check number of visits
self.assertEqual(visitor_portal.visit_count, 2, "There should be 2 visits for the portal user")
def test_visitor_archive(self):
""" Test cron archiving inactive visitors and their re-activation when
authenticating an user. """
self.env['ir.config_parameter'].sudo().set_param('website.visitor.live.days', 7)
partner_demo = self.partner_demo
old_visitor = self.env['website.visitor'].create({
'lang_id': self.env.ref('base.lang_en').id,
'country_id': self.env.ref('base.be').id,
'website_id': 1,
'partner_id': partner_demo.id,
})
self.assertTrue(old_visitor.active)
self.assertEqual(partner_demo.visitor_ids, old_visitor, "Visitor and its partner should be synchronized")
# archive old visitor
old_visitor.last_connection_datetime = datetime.now() - timedelta(days=8)
self.env['website.visitor']._cron_archive_visitors()
self.assertEqual(old_visitor.active, False, "Visitor should be archived after inactivity")
# reconnect with new visitor.
self.url_open(self.tracked_page.url)
new_visitor = self._get_last_visitor()
self.assertFalse(new_visitor.partner_id)
self.assertTrue(new_visitor.id > old_visitor.id, "A new visitor should have been created.")
self.assertVisitorTracking(new_visitor, self.tracked_page)
with self.mock_visitor_from_request(force_visitor=new_visitor):
self.authenticate('demo', 'demo')
(new_visitor | old_visitor).flush()
partner_demo.flush()
partner_demo.invalidate_cache(fnames=['visitor_ids'])
self.assertEqual(partner_demo.visitor_ids, old_visitor, "The partner visitor should be back to the 'old' visitor.")
new_visitor = self.env['website.visitor'].search([('id', '=', new_visitor.id)])
self.assertEqual(len(new_visitor), 0, "The new visitor should be deleted when visitor authenticate once again.")
self.assertEqual(old_visitor.active, True, "The old visitor should be reactivated when visitor authenticates once again.")
| ygol/odoo | addons/website/tests/test_website_visitor.py | Python | agpl-3.0 | 15,250 | [
"VisIt"
] | 3b6b7c0f00001b878576f2d7eeb2fa8c05430f9aeca4f181b753f5480b975206 |
# -*- coding: utf-8 -*-
#
# balancedneuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Balanced neuron example
-----------------------
This script simulates a neuron driven by an excitatory and an
inhibitory population of neurons firing Poisson spike trains. The aim
is to find a firing rate for the inhibitory population that will make
the neuron fire at the same rate as the excitatory population.
Optimization is performed using the bisection method from Scipy,
simulating the network repeatedly.
This example is also shown in the article Eppler et al. (2009)
**PyNEST: A convenient interface to the NEST simulator**,
*Front. Neuroinform.* http://dx.doi.org/10.3389/neuro.11.012.2008
'''
# First, we import all necessary modules for simulation, analysis and
# plotting. Additionally, we set the verbosity to suppress info
# messages
from scipy.optimize import bisect
import nest
import nest.voltage_trace
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
# Second, the simulation parameters are assigned to variables.
t_sim = 25000.0 # how long we simulate
n_ex = 16000 # size of the excitatory population
n_in = 4000 # size of the inhibitory population
r_ex = 5.0 # mean rate of the excitatory population
r_in = 20.5 # initial rate of the inhibitory population
epsc = 45.0 # peak amplitude of excitatory synaptic currents
ipsc = -45.0 # peak amplitude of inhibitory synaptic currents
d = 1.0 # synaptic delay
lower = 15.0 # lower bound of the search interval
upper = 25.0 # upper bound of the search interval
prec = 0.01 # how close need the excitatory rates be
# Third, the nodes are created using `Create()`. We store the returned
# handles in variables for later reference.
neuron = nest.Create("iaf_neuron")
noise = nest.Create("poisson_generator", 2)
voltmeter = nest.Create("voltmeter")
spikedetector = nest.Create("spike_detector")
# Fourth, the excitatory Poisson generator (`noise[0]`) and the
# voltmeter are configured using `SetStatus()`, which expects a list
# of node handles and a list of parameter dictionaries. The rate of
# the inhibitory Poisson generator is set later. Note that we need not
# set parameters for the neuron and the spike detector, since they
# have satisfactory defaults.
nest.SetStatus(noise, [{"rate": n_ex * r_ex}, {"rate": n_in * r_in}])
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
# Fifth, the neuron is connected to the spike detector and the
# voltmeter, as are the two Poisson generators to the neuron. The
# command `Connect()` has different variants. Plain `Connect()` just
# takes the handles of pre- and post-synaptic nodes and uses the
# default values for weight and delay. `ConvergentConnect()` takes
# four arguments: lists of pre- and post-synaptic nodes and lists of
# weights and delays. Note that the connection direction for the
# voltmeter is reversed compared to the spike detector, because it
# observes the neuron instead of receiving events from it. Thus,
# `Connect()` reflects the direction of signal flow in the simulation
# kernel rather than the physical process of inserting an electrode
# into the neuron. The latter semantics is presently not available in
# NEST.
nest.Connect(neuron, spikedetector)
nest.Connect(voltmeter, neuron)
nest.ConvergentConnect(noise, neuron, [epsc, ipsc], 1.0)
# To determine the optimal rate of the neurons in the inhibitory
# population, the network is simulated several times for different
# values of the inhibitory rate while measuring the rate of the target
# neuron. This is done until the rate of the target neuron matches the
# rate of the neurons in the excitatory population with a certain
# accuracy. The algorithm is implemented in two steps:
# First, the function `output_rate()` is defined to measure the firing
# rate of the target neuron for a given rate of the inhibitory
# neurons.
def output_rate(guess):
print("Inhibitory rate estimate: %5.2f Hz" % guess)
rate = float(abs(n_in * guess))
nest.SetStatus([noise[1]], "rate", rate)
nest.SetStatus(spikedetector, "n_events", 0)
nest.Simulate(t_sim)
out = nest.GetStatus(spikedetector, "n_events")[0] * 1000.0 / t_sim
print(" -> Neuron rate: %6.2f Hz (goal: %4.2f Hz)" % (out, r_ex))
return out
# The function takes the firing rate of the inhibitory neurons as an
# argument. It scales the rate with the size of the inhibitory
# population and configures the inhibitory Poisson generator
# (`noise[1]`) accordingly. Then, the spike-counter of the spike
# detector is reset to zero. The network is simulated using
# `Simulate()`, which takes the desired simulation time in
# milliseconds and advances the network state by this amount of
# time. During simulation, the spike detector counts the spikes of the
# target neuron and the total number is read out at the end of the
# simulation period. The return value of `output_rate()` is the firing
# rate of the target neuron in Hz.
# Second, the scipy function `bisect()` is used to determine the
# optimal firing rate of the neurons of the inhibitory population.
in_rate = bisect(lambda x: output_rate(x) - r_ex, lower, upper, xtol=prec)
print("Optimal rate for the inhibitory population: %.2f Hz" % in_rate)
# The function `bisect()` takes four arguments: first a function whose
# zero crossing is to be determined. Here, the firing rate of the
# target neuron should equal the firing rate of the neurons of the
# excitatory population. Thus we define an anonymous function (using
# `lambda`) that returns the difference between the actual rate of the
# target neuron and the rate of the excitatory Poisson generator,
# given a rate for the inhibitory neurons. The next two arguments are
# the lower and upper bound of the interval in which to search for the
# zero crossing. The fourth argument of `bisect()` is the desired
# relative precision of the zero crossing.
# Finally, we plot the target neuron's membrane potential as a
# function of time.
nest.voltage_trace.from_device(voltmeter)
| INM-6/nest-git-migration | pynest/examples/balancedneuron.py | Python | gpl-2.0 | 6,690 | [
"NEURON"
] | 04799a46e6a8477fefea0cb8d7f55ab00ab8e520ca1c412796e3d91e8cdd33da |
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Author: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, as_float_array, check_random_state, deprecated
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w: array of shape(n), to be orthogonalized
W: array of shape(p, n), null space definition
j: int < p
caveats
-------
assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
K = np.dot(W, W.T)
s, u = linalg.eigh(K)
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
W = np.dot(np.dot(np.dot(u, np.diag(1.0 / np.sqrt(s))), u.T), W)
return W
def _ica_def(X, tol, g, gprime, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=float)
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
n_iterations = 0
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
while ((lim > tol) & (n_iterations < (max_iter - 1))):
wtx = np.dot(w.T, X)
nonlin = g(wtx, fun_args)
if isinstance(nonlin, tuple):
gwtx, g_wtx = nonlin
else:
if not callable(gprime):
raise ValueError('The function supplied does not return a '
'tuple. Therefore fun_prime has to be a '
'function, not %s' % str(type(gprime)))
warnings.warn("Passing g and gprime separately is deprecated "
"and will be removed in 0.14.",
DeprecationWarning, stacklevel=2)
gwtx = nonlin
g_wtx = gprime(wtx, fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
n_iterations = n_iterations + 1
W[j, :] = w
return W
def _ica_par(X, tol, g, gprime, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
n, p = X.shape
W = _sym_decorrelation(w_init)
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
it = 0
while ((lim > tol) and (it < (max_iter - 1))):
wtx = np.dot(W, X)
nonlin = g(wtx, fun_args)
if isinstance(nonlin, tuple):
gwtx, g_wtx = nonlin
else:
if not callable(gprime):
raise ValueError('The function supplied does not return a '
'tuple. Therefore fun_prime has to be a '
'function, not %s' % str(type(gprime)))
warnings.warn("Passing g and gprime separately is deprecated "
"and will be removed in 0.14.",
DeprecationWarning, stacklevel=2)
gwtx = nonlin
g_wtx = gprime(wtx, fun_args)
W1 = (np.dot(gwtx, X.T) / float(p)
- np.dot(np.diag(g_wtx.mean(axis=1)), W))
W1 = _sym_decorrelation(W1)
lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))
W = W1
it += 1
return W
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_prime='', fun_args={}, max_iter=200,
tol=1e-04, w_init=None, random_state=None):
"""Perform Fast Independent Component Analysis.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten: boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
Supplying the derivative through the `fun_prime` attribute is
still supported, but deprecated.
fun_prime : empty string ('') or function, optional, deprecated.
See fun.
fun_args: dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter: int, optional
Maximum number of iterations to perform
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged
w_init: (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used
source_only: boolean, optional
If True, only the sources matrix is returned.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
K: (n_components, p) array or None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n.comp principal components. If whiten is 'False', K is
'None'.
W: (n_components, n_components) array
estimated un-mixing matrix
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S: (n_components, n) array
estimated source matrix
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
# make interface compatible with other decompositions
X = array2d(X).T
alpha = fun_args.get('alpha', 1.0)
if (alpha < 1) or (alpha > 2):
raise ValueError("alpha must be in [1,2]")
gprime = None
if isinstance(fun, str):
# Some standard nonlinear functions
# XXX: these should be optimized, as they can be a bottleneck.
if fun == 'logcosh':
def g(x, fun_args):
alpha = fun_args.get('alpha', 1.0) # comment it out?
gx = np.tanh(alpha * x)
g_x = alpha * (1 - gx ** 2)
return gx, g_x
elif fun == 'exp':
def g(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x
elif fun == 'cube':
def g(x, fun_args):
return x ** 3, 3 * x ** 2
else:
raise ValueError('fun argument should be one of logcosh, exp or'
' cube')
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
if callable(fun_prime):
def gprime(x, fun_args):
return fun_prime(x, **fun_args)
else:
raise ValueError('fun argument should be either a string '
'(one of logcosh, exp or cube) or a function')
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X = X - X.mean(axis=-1)[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=True)
if w_init is None:
w_init = random_state.normal(size=(n_components, n_components))
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError("w_init has invalid shape -- should be %(shape)s"
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'gprime': gprime,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or' +
' `deflation`.')
del X1
if whiten:
S = np.dot(np.dot(W, K), X)
return K, W, S.T
else:
S = np.dot(W, X)
return None, W, S.T
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA; a fast algorithm for Independent Component Analysis
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
Supplying the derivative through the `fun_prime` attribute is
still supported, but deprecated.
fun_prime : empty string ('') or function, optional, deprecated.
See fun.
fun_args: dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations during fit
tol : float, optional
Tolerance on update at each iteration
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : 2D array, [n_components, n_features]
The unmixing matrix
`sources_`: 2D array, [n_samples, n_components]
The estimated latent sources of the data.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_prime='', fun_args=None, max_iter=200,
tol=1e-4, w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_prime = fun_prime
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def fit(self, X, y=None):
fun_args = {} if self.fun_args is None else self.fun_args
whitening_, unmixing_, sources_ = fastica(
X, self.n_components, self.algorithm, self.whiten, self.fun,
self.fun_prime, fun_args, self.max_iter, self.tol, self.w_init,
random_state=self.random_state)
if self.whiten:
self.components_ = np.dot(unmixing_, whitening_)
else:
self.components_ = unmixing_
self.sources_ = sources_
return self
def transform(self, X, y=None):
"""Apply un-mixing matrix "W" to X to recover the sources
S = X * W.T
"""
X = array2d(X)
return np.dot(X, self.components_.T)
def get_mixing_matrix(self):
"""Compute the mixing matrix
"""
return linalg.pinv(self.components_)
| lucidfrontier45/scikit-learn | sklearn/decomposition/fastica_.py | Python | bsd-3-clause | 14,751 | [
"Gaussian"
] | 79903664884c0cbcfb6831342ff56e8c9dfba7f182e45b683a65733d88a3cb05 |
# sql/compiler.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import contextlib
import re
from . import schema, sqltypes, operators, functions, visitors, \
elements, selectable, crud
from .. import util, exc
import itertools
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat': "%%(%(name)s)s",
'qmark': "?",
'format': "%%s",
'numeric': ":[_POSITION]",
'named': ":%(name)s"
}
OPERATORS = {
# binary
operators.and_: ' AND ',
operators.or_: ' OR ',
operators.add: ' + ',
operators.mul: ' * ',
operators.sub: ' - ',
operators.div: ' / ',
operators.mod: ' % ',
operators.truediv: ' / ',
operators.neg: '-',
operators.lt: ' < ',
operators.le: ' <= ',
operators.ne: ' != ',
operators.gt: ' > ',
operators.ge: ' >= ',
operators.eq: ' = ',
operators.concat_op: ' || ',
operators.match_op: ' MATCH ',
operators.notmatch_op: ' NOT MATCH ',
operators.in_op: ' IN ',
operators.notin_op: ' NOT IN ',
operators.comma_op: ', ',
operators.from_: ' FROM ',
operators.as_: ' AS ',
operators.is_: ' IS ',
operators.isnot: ' IS NOT ',
operators.collate: ' COLLATE ',
# unary
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
# modifiers
operators.desc_op: ' DESC',
operators.asc_op: ' ASC',
operators.nullsfirst_op: ' NULLS FIRST',
operators.nullslast_op: ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce: 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user: 'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: 'UNION',
selectable.CompoundSelect.UNION_ALL: 'UNION ALL',
selectable.CompoundSelect.EXCEPT: 'EXCEPT',
selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL',
selectable.CompoundSelect.INTERSECT: 'INTERSECT',
selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL'
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
def __init__(self, dialect, statement, bind=None,
compile_kwargs=util.immutabledict()):
"""Construct a new ``Compiled`` object.
:param dialect: ``Dialect`` to compile against.
:param statement: ``ClauseElement`` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
.. versionadded:: 0.8
"""
self.dialect = dialect
self.bind = bind
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated("0.7", ":class:`.Compiled` objects now compile "
"within the constructor.")
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_compiled(self, multiparams, params)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ''
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.")
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = 'visit_\w+'
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
return type_._compiler_dispatch(self, **kw)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
class SQLCompiler(Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# if False, means we can't be sure the list of entries
# in _result_columns is actually the rendered order. This
# gets flipped when we use TextAsFrom, for example.
self._ordered_columns = True
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = dialect.identifier_preparer
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if self.positional and dialect.paramstyle == 'numeric':
self._apply_numbered_params()
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns, self._ordered_columns)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry['need_result_map_for_nested'] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop('need_result_map_for_nested')
self._result_columns, self._ordered_columns = (
result_columns, ordered_columns)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r'\[_POSITION\]',
lambda m: str(util.next(poscount)),
self.string)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
((self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect))
for bindparam in self.bind_names)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]['selectable']
with_cols, only_froms = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = with_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if order_by_elem is not None and order_by_elem.name in \
resolve_dict:
kwargs['render_label_as_label'] = \
element.element._order_by_label_element
return self.process(
element.element, within_columns_clause=within_columns_clause,
**kwargs)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(
element._text_clause
)
selectable = self.stack[-1]['selectable']
with_cols, only_froms = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError:
# treat it like text()
util.warn_limited(
"Can't resolve label reference %r; converting to text()",
util.ellipses_string(element.element))
return self.process(
element._text_clause
)
else:
kwargs['render_label_as_label'] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs)
def visit_label(self, label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (within_columns_clause and not
within_label_clause)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname, ) + label._alt_names,
label.type
)
return label.element._compiler_dispatch(
self, within_columns_clause=True,
within_label_clause=True, **kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw)
def visit_column(self, column, add_to_result_map=None,
include_table=True, **kwargs):
name = orig_name = column.name
if name is None:
raise exc.CompileError("Cannot compile Column object until "
"its 'name' is assigned.")
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name,
orig_name,
(column, name, column.key),
column.type
)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
if table.schema:
schema_prefix = self.preparer.quote_schema(table.schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename) + \
"." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw['type_expression'] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam,
self.post_process_text(textclause.text))
)
def visit_text_as_from(self, taf,
compound_index=None,
asfrom=False,
parens=True, **kw):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
if populate_result_map:
self._ordered_columns = False
for c in taf.column_args:
self.process(c, within_columns_clause=True,
add_to_result_map=self._add_to_result_map)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'true'
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'false'
else:
return "0"
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(
c._compiler_dispatch(self, **kw)
for c in clauselist.clauses)
if s)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def visit_over(self, over, **kwargs):
return "%s OVER (%s)" % (
over.func._compiler_dispatch(self, **kwargs),
' '.join(
'%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
('PARTITION', over.partition_by),
('ORDER', over.order_by)
)
if clause is not None and len(clause)
)
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs)
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field, extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(
func.name, func.name, (), func.type
)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr': self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." %
self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=0, **kwargs):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or \
(compound_index == 0
and entry.get('need_result_map_for_compound', False))
self.stack.append(
{
'correlate_froms': entry['correlate_froms'],
'asfrom_froms': entry['asfrom_froms'],
'selectable': cs,
'need_result_map_for_compound': need_result_map
})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit_clause is not None
or cs._offset_clause is not None) and \
self.limit_clause(cs, **kwargs) or ""
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously")
disp = getattr(self, "visit_%s_unary_operator" %
unary.operator.__name__, None)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw)
elif unary.modifier:
disp = getattr(self, "visit_%s_unary_modifier" %
unary.modifier.__name__, None)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier")
def visit_istrue_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_notmatch_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op)
def visit_binary(self, binary, override_operator=None, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, elements.BindParameter) and \
isinstance(binary.right, elements.BindParameter):
kw['literal_binds'] = True
operator_ = override_operator or binary.operator
disp = getattr(self, "visit_%s_binary" % operator_.__name__, None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError:
raise exc.UnsupportedCompilationError(self, operator_)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_custom_op_binary(self, element, operator, **kw):
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw)
def _generate_generic_binary(self, binary, opstring, **kw):
return binary.left._compiler_dispatch(self, **kw) + \
opstring + \
binary.right._compiler_dispatch(self, **kw)
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC "
if symmetric else " BETWEEN ", **kw)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " NOT BETWEEN SYMMETRIC "
if symmetric else " NOT BETWEEN ", **kw)
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs):
if not skip_bind_expression and bindparam.type._has_bind_expression:
bind_expression = bindparam.type.bind_expression(bindparam)
return self.process(bind_expression,
skip_bind_expression=True)
if literal_binds or \
(within_columns_clause and
self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError("Bind parameter '%s' without a "
"renderable value not allowed here."
% bindparam.key)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
not existing.proxy_set.intersection(
bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')." %
(bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name, **kwargs)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name, positional_names=None, **kw):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
return self.bindtemplate % {'name': name}
def visit_cte(self, cte, asfrom=False, ashint=False,
fromhints=None,
**kwargs):
self._init_cte_state()
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
return self.preparer.format_alias(cte, cte_name)
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" %
cte_name)
self.ctes_by_name[cte_name] = cte
if cte._cte_alias is not None:
orig_cte = cte._cte_alias
if orig_cte not in self.ctes:
self.visit_cte(orig_cte, **kwargs)
cte_alias_name = cte._cte_alias.name
if isinstance(cte_alias_name, elements._truncated_label):
cte_alias_name = self._truncated_identifier(
"alias", cte_alias_name)
else:
orig_cte = cte
cte_alias_name = None
if not cte_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [c for c in
util.unique_list(col_source.inner_columns)
if c is not None]
text += "(%s)" % (", ".join(
self.preparer.format_column(ident)
for ident in recur_cols))
if self.positional:
kwargs['positional_names'] = self.cte_positional[cte] = []
text += " AS \n" + \
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs)
self.ctes[cte] = text
if asfrom:
if cte_alias_name:
text = self.preparer.format_alias(cte, cte_alias_name)
text += self.get_render_as_alias_suffix(cte_name)
else:
return self.preparer.format_alias(cte, cte_name)
return text
def visit_alias(self, alias, asfrom=False, ashint=False,
iscrud=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name))
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(ret, alias,
fromhints[alias], iscrud)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
objects + (column,), type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column, elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
elif (
not isinstance(column, elements.TextClause) and
(
not isinstance(column, elements.UnaryExpression) or
column.wraps_column_expression
) and
(
not hasattr(column, 'name') or
isinstance(column, functions.Function)
)
):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite the main one).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if newelem.is_selectable and newelem._is_join and \
isinstance(newelem.right, selectable.FromGrouping):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element],
use_labels=True).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw['transform_clue'] = 'select_container'
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = kw.get('transform_clue', None) == \
'select_container'
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw['transform_clue'] = 'inside_select'
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select):
inner_col = dict((c._key_label, c) for
c in transformed_select.inner_columns)
d = dict(
(inner_col[c._key_label], c)
for c in select.inner_columns
)
self._result_columns = [
(key, name, tuple([d.get(col, col) for col in objs]), typ)
for key, name, objs, typ in self._result_columns
]
_default_stack_entry = util.immutabledict([
('correlate_froms', frozenset()),
('asfrom_froms', frozenset())
])
def _display_froms_for_select(self, select, asfrom):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
return froms
def visit_select(self, select, asfrom=False, parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
**kwargs):
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and 'add_to_result_map' in kwargs:
del kwargs['add_to_result_map']
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return text
froms = self._setup_select_stack(select, entry, asfrom)
column_clause_args = kwargs.copy()
column_clause_args.update({
'within_label_clause': False,
'within_columns_clause': False
})
text = "SELECT " # we're off to a good start !
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(
select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
wrapped_inner_columns = set(select_wraps_for.inner_columns)
translate = dict(
(outer, inner.pop()) for outer, inner in [
(
outer,
outer.proxy_set.intersection(wrapped_inner_columns))
for outer in select.inner_columns
] if inner
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text, select, inner_columns, froms, byfrom, kwargs)
if select._statement_hints:
per_dialect = [
ht for (dialect_name, ht)
in select._statement_hints
if dialect_name in ('*', self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and self._is_toplevel_select(select):
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs)
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _is_toplevel_select(self, select):
"""Return True if the stack is placed at the given select, and
is also the outermost SELECT, meaning there is either no stack
before this one, or the enclosing stack is a topmost INSERT.
"""
return (
self.stack[-1]['selectable'] is select and
(
len(self.stack) == 1 or self.isinsert and len(self.stack) == 2
and self.statement is self.stack[0]['selectable']
)
)
def _setup_select_hints(self, select):
byfrom = dict([
(from_, hinttext % {
'name': from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.items()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(self, select, entry, asfrom):
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
'asfrom_froms': new_correlate_froms,
'correlate_froms': all_correlate_froms,
'selectable': select,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs):
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True,
fromhints=byfrom, **kwargs)
for f in froms])
else:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (select._limit_clause is not None or
select._offset_clause is not None):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or
dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = sum([
self.cte_positional[cte]
for cte in self.ctes], []) + \
self.positiontup
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join(
[txt for txt in self.ctes.values()]
)
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler.")
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
fromhints=None, use_schema=True, **kwargs):
if asfrom or ashint:
if use_schema and getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema) + \
"." + self.preparer.quote(table.name)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(ret, table,
fromhints[table], iscrud)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
(join.isouter and " LEFT OUTER JOIN " or " JOIN ") +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def visit_insert(self, insert_stmt, **kw):
self.stack.append(
{'correlate_froms': set(),
"asfrom_froms": set(),
"selectable": insert_stmt})
self.isinsert = True
crud_params = crud._get_crud_params(self, insert_stmt, **kw)
if not crud_params and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The '%s' dialect with current database "
"version settings does not support empty "
"inserts." %
self.dialect.name)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." %
self.dialect.name)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(insert_stmt,
insert_stmt._prefixes, **kw)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
insert_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if insert_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
insert_stmt.table,
dialect_hints[insert_stmt.table],
True
)
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in crud_params_single])
if self.returning or insert_stmt._returning:
self.returning = self.returning or insert_stmt._returning
returning_clause = self.returning_clause(
insert_stmt, self.returning)
if self.returning_precedes_values:
text += " " + returning_clause
if insert_stmt.select is not None:
text += " %s" % self.process(self._insert_from_select, **kw)
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (
', '.join(c[1] for c in crud_param_set)
)
for crud_param_set in crud_params
)
)
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in crud_params])
if self.returning and not self.returning_precedes_values:
text += " " + returning_clause
self.stack.pop(-1)
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
return from_table._compiler_dispatch(self, asfrom=True,
iscrud=True, **kw)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
def visit_update(self, update_stmt, **kw):
self.stack.append(
{'correlate_froms': set([update_stmt.table]),
"asfrom_froms": set([update_stmt.table]),
"selectable": update_stmt})
self.isupdate = True
extra_froms = update_stmt._extra_froms
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
crud_params = crud._get_crud_params(self, update_stmt, **kw)
if update_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
update_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if update_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
update_stmt.table,
dialect_hints[update_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
text += ', '.join(
c[0]._compiler_dispatch(self,
include_table=include_table) +
'=' + c[1] for c in crud_params
)
if self.returning or update_stmt._returning:
if not self.returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
self.stack.pop(-1)
return text
@util.memoized_property
def _key_getters_for_crud_column(self):
return crud._key_getters_for_crud_column(self)
def visit_delete(self, delete_stmt, **kw):
self.stack.append({'correlate_froms': set([delete_stmt.table]),
"asfrom_froms": set([delete_stmt.table]),
"selectable": delete_stmt})
self.isdelete = True
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(delete_stmt,
delete_stmt._prefixes, **kw)
text += "FROM "
table_text = delete_stmt.table._compiler_dispatch(
self, asfrom=True, iscrud=True)
if delete_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
delete_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if delete_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
delete_stmt.table,
dialect_hints[delete_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
self.returning = delete_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self)
if t:
text += " WHERE " + t
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
@property
def preparer(self):
return self.dialect.identifier_preparer
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.dialect.identifier_preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.dialect.identifier_preparer
text = "\n" + " ".join(['CREATE'] +
table._prefixes +
['TABLE',
preparer.format_table(table),
"("])
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(create_column,
first_pk=column.primary_key
and not first_pk)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s") %
(table.description, column.name, ce.args[0])
))
const = self.create_table_constraints(
table, _include_foreign_key_constraints=
create.include_foreign_key_constraints)
if const:
text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(
column,
first_pk=first_pk
)
const = " ".join(self.process(constraint)
for constraint in column.constraints)
if const:
text += " " + const
return text
def create_table_constraints(
self, table,
_include_foreign_key_constraints=None):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key and
c not in omit_fkcs])
return ", \n\t".join(
p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError("Index '%s' is not associated "
"with any table." % index.name)
def visit_create_index(self, create, include_schema=False,
include_table_schema=True):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table,
use_schema=include_table_schema),
', '.join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True) for
expr in index.expressions)
)
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
if include_schema and index.table is not None and index.table.schema:
schema = index.table.schema
schema_name = self.preparer.quote_schema(schema)
else:
schema_name = None
ident = index.name
if isinstance(ident, elements._truncated_label):
max_ = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max_:
ident = ident[0:max_ - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
self.dialect.validate_identifier(ident)
index_name = self.preparer.quote(ident)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
if create.element.minvalue is not None:
text += " MINVALUE %d" % create.element.minvalue
if create.element.maxvalue is not None:
text += " MAXVALUE %d" % create.element.maxvalue
if create.element.nominvalue is not None:
text += " NO MINVALUE"
if create.element.nomaxvalue is not None:
text += " NO MAXVALUE"
if create.element.cycle is not None:
text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return "'%s'" % column.server_default.arg
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext,
include_table=False,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name)
for f in constraint.elements),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name)
for f in constraint.elements)
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % \
{'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return 'TIMESTAMP'
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError("Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
the 'force' flag should be considered deprecated.
"""
return self.quote(schema, force)
def quote(self, ident, force=None):
"""Conditionally quote an identifier.
the 'force' flag should be considered deprecated.
"""
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
if (not self.omit_schema and use_schema and
sequence.schema is not None):
name = self.quote_schema(sequence.schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
return self.quote(name or savepoint.ident)
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table)
if name:
return self.quote(name)
elif isinstance(constraint.name, elements._defer_none_name):
return None
return self.quote(constraint.name)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
if not self.omit_schema and use_schema \
and getattr(table, "schema", None):
result = self.quote_schema(table.schema) + "." + result
return result
def format_schema(self, name, quote=None):
"""Prepare a quoted schema name."""
return self.quote(name, quote)
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + self.quote(name)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and \
getattr(table, 'schema', None):
return (self.quote_schema(table.schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{'initial': initial,
'final': final,
'escaped': escaped_final})
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
| thundernet8/WRGameVideos-API | venv/lib/python2.7/site-packages/sqlalchemy/sql/compiler.py | Python | gpl-2.0 | 100,608 | [
"VisIt"
] | ea1cad4d4d1aa54713c07248aee2674b6fac79b7c55cc005077626a742454e94 |
import os
from boxbranding import getMachineBrand, getMachineName
import xml.etree.cElementTree
from datetime import datetime
from time import ctime, time, strftime, localtime, mktime
from bisect import insort
from enigma import eActionMap, quitMainloop
from Components.config import config
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
from Tools import Directories, Notifications
from Tools.XMLTools import stringToXML
import timer
import NavigationInstance
#global variables begin
DSsave = False
RSsave = False
RBsave = False
aeDSsave = False
wasTimerWakeup = False
try:
from Screens.InfoBar import InfoBar
except Exception, e:
print "[PowerTimer] import from 'Screens.InfoBar import InfoBar' failed:", e
InfoBar = False
#+++
debug = False
#+++
#global variables end
#----------------------------------------------------------------------------------------------------
#Timer shutdown, reboot and restart priority
#1. wakeup
#2. wakeuptostandby -> (same as 1.)
#3. deepstandby -> DSsave
#4. deppstandby after event -> aeDSsave
#5. reboot system -> RBsave
#6. restart gui -> RSsave
#7. standby
#8. autostandby
#9. nothing (no function, only for suppress autodeepstandby timer)
#10. autodeepstandby
#-overlapping timers or next timer start is within 15 minutes, will only the high-order timer executed (at same types will executed the next timer)
#-autodeepstandby timer is only effective if no other timer is active or current time is in the time window
#-priority for repeated timer: shift from begin and end time only temporary, end-action priority is higher as the begin-action
#----------------------------------------------------------------------------------------------------
#reset wakeup state after ending timer
def resetTimerWakeup():
global wasTimerWakeup
if os.path.exists("/tmp/was_powertimer_wakeup"):
os.remove("/tmp/was_powertimer_wakeup")
if debug: print "[POWERTIMER] reset wakeup state"
wasTimerWakeup = False
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev):
begin = ev.getBeginTime()
end = begin + ev.getDuration()
return begin, end
class AFTEREVENT:
def __init__(self):
pass
NONE = 0
WAKEUP = 1
WAKEUPTOSTANDBY = 2
STANDBY = 3
DEEPSTANDBY = 4
class TIMERTYPE:
def __init__(self):
pass
NONE = 0
WAKEUP = 1
WAKEUPTOSTANDBY = 2
AUTOSTANDBY = 3
AUTODEEPSTANDBY = 4
STANDBY = 5
DEEPSTANDBY = 6
REBOOT = 7
RESTART = 8
# please do not translate log messages
class PowerTimerEntry(timer.TimerEntry, object):
def __init__(self, begin, end, disabled = False, afterEvent = AFTEREVENT.NONE, timerType = TIMERTYPE.WAKEUP, checkOldTimers = False, autosleepdelay = 60):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
self.dontSave = False
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.timerType = timerType
self.afterEvent = afterEvent
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.autosleepinstandbyonly = 'no'
self.autosleepdelay = autosleepdelay
self.autosleeprepeat = 'once'
self.autosleepwindow = 'no'
self.autosleepbegin = self.begin
self.autosleepend = self.end
self.nettraffic = 'no'
self.trafficlimit = 100
self.netip = 'no'
self.ipadress = "0.0.0.0"
self.log_entries = []
self.resetState()
self.messageBoxAnswerPending = False
#check autopowertimer
if (self.timerType == TIMERTYPE.AUTOSTANDBY or self.timerType == TIMERTYPE.AUTODEEPSTANDBY) and not self.disabled and time() > 3600 and self.begin > time():
self.begin = int(time()) #the begin is in the future -> set to current time = no start delay of this timer
def __repr__(self):
timertype = {
TIMERTYPE.NONE: "nothing",
TIMERTYPE.WAKEUP: "wakeup",
TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby",
TIMERTYPE.AUTOSTANDBY: "autostandby",
TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby",
TIMERTYPE.STANDBY: "standby",
TIMERTYPE.DEEPSTANDBY: "deepstandby",
TIMERTYPE.REBOOT: "reboot",
TIMERTYPE.RESTART: "restart"
}[self.timerType]
if not self.disabled:
return "PowerTimerEntry(type=%s, begin=%s)" % (timertype, ctime(self.begin))
else:
return "PowerTimerEntry(type=%s, begin=%s Disabled)" % (timertype, ctime(self.begin))
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
def do_backoff(self):
if Screens.Standby.inStandby and not wasTimerWakeup or RSsave or RBsave or aeDSsave or DSsave:
self.backoff = 300
else:
if self.backoff == 0:
self.backoff = 300
else:
self.backoff += 300
if self.backoff > 900:
self.backoff = 900
self.log(10, "backoff: retry in %d minutes" % (int(self.backoff)/60))
def activate(self):
global RSsave, RBsave, DSsave, aeDSsave, wasTimerWakeup, InfoBar
if not InfoBar:
try:
from Screens.InfoBar import InfoBar
except Exception, e:
print "[PowerTimer] import from 'Screens.InfoBar import InfoBar' failed:", e
isRecTimerWakeup = breakPT = shiftPT = False
now = time()
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == self.StatePrepared and (self.timerType == TIMERTYPE.AUTOSTANDBY or self.timerType == TIMERTYPE.AUTODEEPSTANDBY):
eActionMap.getInstance().bindAction('', -0x7FFFFFFF, self.keyPressed)
if self.autosleepwindow == 'yes':
ltm = localtime(now)
asb = strftime("%H:%M", localtime(self.autosleepbegin)).split(':')
ase = strftime("%H:%M", localtime(self.autosleepend)).split(':')
self.autosleepbegin = int(mktime(datetime(ltm.tm_year, ltm.tm_mon, ltm.tm_mday, int(asb[0]), int(asb[1])).timetuple()))
self.autosleepend = int(mktime(datetime(ltm.tm_year, ltm.tm_mon, ltm.tm_mday, int(ase[0]), int(ase[1])).timetuple()))
if self.autosleepend <= self.autosleepbegin:
self.autosleepbegin -= 86400
if self.getAutoSleepWindow():
if now < self.autosleepbegin and now > self.autosleepbegin - self.prepare_time - 3: #begin is in prepare time window
self.begin = self.end = self.autosleepbegin + int(self.autosleepdelay)*60
else:
self.begin = self.end = int(now) + int(self.autosleepdelay)*60
else:
return False
if self.timerType == TIMERTYPE.AUTODEEPSTANDBY:
self.getNetworkTraffic(getInitialValue = True)
if (next_state == self.StateRunning or next_state == self.StateEnded) and NavigationInstance.instance.PowerTimer is None:
#TODO: running/ended timer at system start has no nav instance
#First fix: crash in getPriorityCheck (NavigationInstance.instance.PowerTimer...)
#Second fix: suppress the message (A finished powertimer wants to ...)
if debug: print "*****NavigationInstance.instance.PowerTimer is None*****", self.timerType, self.state, ctime(self.begin), ctime(self.end)
return True
elif next_state == self.StateRunning and abs(self.begin - now) > 900: return True
elif next_state == self.StateEnded and abs(self.end - now) > 900: return True
if next_state == self.StateRunning or next_state == self.StateEnded:
if NavigationInstance.instance.isRecordTimerImageStandard:
isRecTimerWakeup = NavigationInstance.instance.RecordTimer.isRecTimerWakeup()
if isRecTimerWakeup:
wasTimerWakeup = True
elif os.path.exists("/tmp/was_powertimer_wakeup") and not wasTimerWakeup:
wasTimerWakeup = int(open("/tmp/was_powertimer_wakeup", "r").read()) and True or False
if next_state == self.StatePrepared:
self.log(6, "prepare ok, waiting for begin: %s" % ctime(self.begin))
self.backoff = 0
return True
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.failed:
return True
if self.timerType == TIMERTYPE.NONE:
return True
elif self.timerType == TIMERTYPE.WAKEUP:
if Screens.Standby.inStandby:
Screens.Standby.inStandby.Power()
return True
elif self.timerType == TIMERTYPE.WAKEUPTOSTANDBY:
return True
elif self.timerType == TIMERTYPE.STANDBY:
if debug: print "self.timerType == TIMERTYPE.STANDBY:"
prioPT = [TIMERTYPE.WAKEUP,TIMERTYPE.RESTART,TIMERTYPE.REBOOT,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.WAKEUP,AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
if not Screens.Standby.inStandby and not breakPT: # not already in standby
callback = self.sendStandbyNotification
message = _("A finished powertimer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName())
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 180
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
return True
elif self.timerType == TIMERTYPE.AUTOSTANDBY:
if debug: print "self.timerType == TIMERTYPE.AUTOSTANDBY:"
if not self.getAutoSleepWindow():
return False
if not Screens.Standby.inStandby and not self.messageBoxAnswerPending: # not already in standby
self.messageBoxAnswerPending = True
callback = self.sendStandbyNotification
message = _("A finished powertimer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName())
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 180
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
if self.autosleeprepeat == "once":
eActionMap.getInstance().unbindAction('', self.keyPressed)
return True
else:
self.begin = self.end = int(now) + int(self.autosleepdelay)*60
else:
self.begin = self.end = int(now) + int(self.autosleepdelay)*60
elif self.timerType == TIMERTYPE.AUTODEEPSTANDBY:
if debug: print "self.timerType == TIMERTYPE.AUTODEEPSTANDBY:"
if not self.getAutoSleepWindow():
return False
if isRecTimerWakeup or (self.autosleepinstandbyonly == 'yes' and not Screens.Standby.inStandby) \
or NavigationInstance.instance.PowerTimer.isProcessing() or abs(NavigationInstance.instance.PowerTimer.getNextPowerManagerTime() - now) <= 900 or self.getNetworkAdress() or self.getNetworkTraffic() \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
self.do_backoff()
# retry
self.begin = self.end = int(now) + self.backoff
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.autosleeprepeat == "once":
self.disabled = True
if Screens.Standby.inStandby or self.autosleepinstandbyonly == 'noquery': # in standby or option 'without query' is enabled
print "[PowerTimer] quitMainloop #1"
quitMainloop(1)
return True
elif not self.messageBoxAnswerPending:
self.messageBoxAnswerPending = True
callback = self.sendTryQuitMainloopNotification
message = _("A finished powertimer wants to shutdown your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName())
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 180
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
if self.autosleeprepeat == "once":
eActionMap.getInstance().unbindAction('', self.keyPressed)
return True
self.begin = self.end = int(now) + int(self.autosleepdelay)*60
elif self.timerType == TIMERTYPE.RESTART:
if debug: print "self.timerType == TIMERTYPE.RESTART:"
#check priority
prioPT = [TIMERTYPE.RESTART,TIMERTYPE.REBOOT,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
#a timer with higher priority was shifted - no execution of current timer
if RBsave or aeDSsave or DSsave:
if debug: print "break#1"
breakPT = True
#a timer with lower priority was shifted - shift now current timer and wait for restore the saved time values from other timer
if False:
if debug: print "shift#1"
breakPT = False
shiftPT = True
#shift or break
if isRecTimerWakeup or shiftPT or breakPT \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
if self.repeated and not RSsave:
self.savebegin = self.begin
self.saveend = self.end
RSsave = True
if not breakPT:
self.do_backoff()
#check difference begin to end before shift begin time
if RSsave and self.end - self.begin > 3 and self.end - now - self.backoff <= 240: breakPT = True
#breakPT
if breakPT:
if self.repeated and RSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
RSsave = False
return True
# retry
oldbegin = self.begin
self.begin = int(now) + self.backoff
if abs(self.end - oldbegin) <= 3:
self.end = self.begin
else:
if not self.repeated and self.end < self.begin + 300:
self.end = self.begin + 300
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.repeated and RSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
if Screens.Standby.inStandby: # in standby
print "[PowerTimer] quitMainloop #4"
quitMainloop(3)
else:
callback = self.sendTryToRestartNotification
message = _("A finished powertimer wants to restart the user interface.\nDo that now?") % (getMachineBrand(), getMachineName())
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 180
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
RSsave = False
return True
elif self.timerType == TIMERTYPE.REBOOT:
if debug: print "self.timerType == TIMERTYPE.REBOOT:"
#check priority
prioPT = [TIMERTYPE.REBOOT,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
#a timer with higher priority was shifted - no execution of current timer
if aeDSsave or DSsave:
if debug: print "break#1"
breakPT = True
#a timer with lower priority was shifted - shift now current timer and wait for restore the saved time values from other timer
if RSsave:
if debug: print "shift#1"
breakPT = False
shiftPT = True
#shift or break
if isRecTimerWakeup or shiftPT or breakPT \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
if self.repeated and not RBsave:
self.savebegin = self.begin
self.saveend = self.end
RBsave = True
if not breakPT:
self.do_backoff()
#check difference begin to end before shift begin time
if RBsave and self.end - self.begin > 3 and self.end - now - self.backoff <= 240: breakPT = True
#breakPT
if breakPT:
if self.repeated and RBsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
RBsave = False
return True
# retry
oldbegin = self.begin
self.begin = int(now) + self.backoff
if abs(self.end - oldbegin) <= 3:
self.end = self.begin
else:
if not self.repeated and self.end < self.begin + 300:
self.end = self.begin + 300
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.repeated and RBsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
if Screens.Standby.inStandby: # in standby
print "[PowerTimer] quitMainloop #3"
quitMainloop(2)
else:
callback = self.sendTryToRebootNotification
message = _("A finished powertimer wants to reboot your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName())
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 180
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
RBsave = False
return True
elif self.timerType == TIMERTYPE.DEEPSTANDBY:
if debug: print "self.timerType == TIMERTYPE.DEEPSTANDBY:"
#check priority
prioPT = [TIMERTYPE.WAKEUP,TIMERTYPE.WAKEUPTOSTANDBY,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.WAKEUP,AFTEREVENT.WAKEUPTOSTANDBY,AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
#a timer with higher priority was shifted - no execution of current timer
if False:
if debug: print "break#1"
breakPT = True
#a timer with lower priority was shifted - shift now current timer and wait for restore the saved time values from other timer
if RSsave or RBsave or aeDSsave:
if debug: print "shift#1"
breakPT = False
shiftPT = True
#shift or break
if isRecTimerWakeup or shiftPT or breakPT \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
if self.repeated and not DSsave:
self.savebegin = self.begin
self.saveend = self.end
DSsave = True
if not breakPT:
self.do_backoff()
#check difference begin to end before shift begin time
if DSsave and self.end - self.begin > 3 and self.end - now - self.backoff <= 240: breakPT = True
#breakPT
if breakPT:
if self.repeated and DSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
DSsave = False
return True
# retry
oldbegin = self.begin
self.begin = int(now) + self.backoff
if abs(self.end - oldbegin) <= 3:
self.end = self.begin
else:
if not self.repeated and self.end < self.begin + 300:
self.end = self.begin + 300
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.repeated and DSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
if Screens.Standby.inStandby: # in standby
print "[PowerTimer] quitMainloop #2"
quitMainloop(1)
else:
callback = self.sendTryQuitMainloopNotification
message = _("A finished powertimer wants to shutdown your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName())
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 180
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
DSsave = False
return True
elif next_state == self.StateEnded:
if self.afterEvent == AFTEREVENT.WAKEUP:
if Screens.Standby.inStandby:
Screens.Standby.inStandby.Power()
elif self.afterEvent == AFTEREVENT.STANDBY:
if not Screens.Standby.inStandby: # not already in standby
callback = self.sendStandbyNotification
message = _("A finished powertimer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName())
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 180
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
elif self.afterEvent == AFTEREVENT.DEEPSTANDBY:
if debug: print "self.afterEvent == AFTEREVENT.DEEPSTANDBY:"
#check priority
prioPT = [TIMERTYPE.WAKEUP,TIMERTYPE.WAKEUPTOSTANDBY,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.WAKEUP,AFTEREVENT.WAKEUPTOSTANDBY,AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
#a timer with higher priority was shifted - no execution of current timer
if DSsave:
if debug: print "break#1"
breakPT = True
#a timer with lower priority was shifted - shift now current timer and wait for restore the saved time values
if RSsave or RBsave:
if debug: print "shift#1"
breakPT = False
shiftPT = True
#shift or break
runningPT = False
#option: check other powertimer is running (current disabled)
#runningPT = NavigationInstance.instance.PowerTimer.isProcessing(exceptTimer = TIMERTYPE.NONE, endedTimer = self.timerType)
if isRecTimerWakeup or shiftPT or breakPT or runningPT \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
if self.repeated and not aeDSsave:
self.savebegin = self.begin
self.saveend = self.end
aeDSsave = True
if not breakPT: self.do_backoff()
#breakPT
if breakPT:
if self.repeated and aeDSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
aeDSsave = False
return True
# retry
self.end = int(now) + self.backoff
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.repeated and aeDSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
if Screens.Standby.inStandby: # in standby
print "[PowerTimer] quitMainloop #5"
quitMainloop(1)
else:
callback = self.sendTryQuitMainloopNotification
message = _("A finished powertimer wants to shutdown your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName())
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 180
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
aeDSsave = False
NavigationInstance.instance.PowerTimer.saveTimer()
resetTimerWakeup()
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin - 30
dummyentry = PowerTimerEntry(self.begin, new_end, disabled=True, afterEvent = self.afterEvent, timerType = self.timerType)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.PowerManager.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
if new_end <= time():
return False
self.end = new_end
return True
def sendStandbyNotification(self, answer):
self.messageBoxAnswerPending = False
if answer:
session = Screens.Standby.Standby
option = None
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarSession(session, option)
else:
Notifications.AddNotification(session)
def sendTryQuitMainloopNotification(self, answer):
self.messageBoxAnswerPending = False
if answer:
session = Screens.Standby.TryQuitMainloop
option = 1
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarSession(session, option)
else:
Notifications.AddNotification(session, option)
def sendTryToRebootNotification(self, answer):
if answer:
session = Screens.Standby.TryQuitMainloop
option = 2
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarSession(session, option)
else:
Notifications.AddNotification(session, option)
def sendTryToRestartNotification(self, answer):
if answer:
session = Screens.Standby.TryQuitMainloop
option = 3
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarSession(session, option)
else:
Notifications.AddNotification(session, option)
def keyPressed(self, key, tag):
if self.getAutoSleepWindow():
self.begin = self.end = int(time()) + int(self.autosleepdelay)*60
def getAutoSleepWindow(self):
now = time()
if self.autosleepwindow == 'yes':
if now < self.autosleepbegin and now < self.autosleepend:
self.begin = self.autosleepbegin
self.end = self.autosleepend
elif now > self.autosleepbegin and now > self.autosleepend:
while self.autosleepend < now:
self.autosleepend += 86400
while self.autosleepbegin + 86400 < self.autosleepend:
self.autosleepbegin += 86400
self.begin = self.autosleepbegin
self.end = self.autosleepend
if not (now > self.autosleepbegin - self.prepare_time - 3 and now < self.autosleepend):
eActionMap.getInstance().unbindAction('', self.keyPressed)
self.state = 0
self.timeChanged()
return False
return True
def getPriorityCheck(self,prioPT,prioPTae):
shiftPT = breakPT = False
nextPTlist = NavigationInstance.instance.PowerTimer.getNextPowerManagerTime(getNextTimerTyp = True)
for entry in nextPTlist:
#check timers within next 15 mins will started or ended
if abs(entry[0] - time()) > 900:
continue
#faketime
if entry[1] is None and entry[2] is None and entry[3] is None:
if debug: print "shift#2 - entry is faketime", ctime(entry[0]), entry
shiftPT = True
continue
#is timer in list itself?
if entry[0] == self.begin and entry[1] == self.timerType and entry[2] is None and entry[3] == self.state \
or entry[0] == self.end and entry[1] is None and entry[2] == self.afterEvent and entry[3] == self.state:
if debug: print "entry is itself", ctime(entry[0]), entry
nextPTitself = True
else:
nextPTitself = False
if (entry[1] in prioPT or entry[2] in prioPTae) and not nextPTitself:
if debug: print "break#2 <= 900", ctime(entry[0]), entry
breakPT = True
break
return shiftPT, breakPT
def getNextActivation(self):
if self.state == self.StateEnded or self.state == self.StateFailed:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def getNextWakeup(self, getNextStbPowerOn = False):
next_state = self.state + 1
if getNextStbPowerOn:
if next_state == 3 and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY or self.afterEvent == AFTEREVENT.WAKEUP or self.afterEvent == AFTEREVENT.WAKEUPTOSTANDBY):
if self.start_prepare > time() and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY): #timer start time is later as now - begin time was changed while running timer
return self.start_prepare
elif self.begin > time() and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY): #timer start time is later as now - begin time was changed while running timer
return self.begin
if self.afterEvent == AFTEREVENT.WAKEUP or self.afterEvent == AFTEREVENT.WAKEUPTOSTANDBY:
return self.end
next_day = 0
count_day = 0
wd_timer = datetime.fromtimestamp(self.begin).isoweekday()*-1
wd_repeated = bin(128+self.repeated)
for s in range(wd_timer-1,-8,-1):
count_day +=1
if int(wd_repeated[s]):
next_day = s
break
if next_day == 0:
for s in range(-1,wd_timer-1,-1):
count_day +=1
if int(wd_repeated[s]):
next_day = s
break
#return self.begin + 86400 * count_day
return self.start_prepare + 86400 * count_day
elif next_state == 2 and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY):
return self.begin
elif next_state == 1 and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY):
return self.start_prepare
elif next_state < 3 and (self.afterEvent == AFTEREVENT.WAKEUP or self.afterEvent == AFTEREVENT.WAKEUPTOSTANDBY):
return self.end
else:
return -1
if self.state == self.StateEnded or self.state == self.StateFailed:
return self.end
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end}[next_state]
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) > 60 and int(old_prepare) != int(self.start_prepare):
self.log(15, "time changed, start prepare is now: %s" % ctime(self.start_prepare))
def getNetworkAdress(self):
ret = False
if self.netip == 'yes':
try:
for ip in self.ipadress.split(','):
if not os.system("ping -q -w1 -c1 " + ip):
ret = True
break
except:
print '[PowerTimer] Error reading ip! -> %s' % self.ipadress
return ret
def getNetworkTraffic(self, getInitialValue = False):
now = time()
newbytes = 0
if self.nettraffic == 'yes':
try:
if os.path.exists('/proc/net/dev'):
f = open('/proc/net/dev', 'r')
temp = f.readlines()
f.close()
for lines in temp:
lisp = lines.split()
if lisp[0].endswith(':') and (lisp[0].startswith('eth') or lisp[0].startswith('wlan')):
newbytes += long(lisp[1]) + long(lisp[9])
if getInitialValue:
self.netbytes = newbytes
self.netbytes_time = now
print '[PowerTimer] Receive/Transmit initialBytes=%d, time is %s' % (self.netbytes, ctime(self.netbytes_time))
return
oldbytes = self.netbytes
seconds = int(now-self.netbytes_time)
self.netbytes = newbytes
self.netbytes_time = now
diffbytes = float(newbytes - oldbytes) * 8 / 1024 / seconds #in kbit/s
if diffbytes < 0:
print '[PowerTimer] Receive/Transmit -> overflow interface counter, waiting for next value'
return True
else:
print '[PowerTimer] Receive/Transmit kilobits per second: %0.2f (%0.2f MByte in %d seconds), actualBytes=%d, time is %s' % (diffbytes, diffbytes/8/1024*seconds, seconds, self.netbytes, ctime(self.netbytes_time))
if diffbytes > self.trafficlimit:
return True
except:
print '[PowerTimer] Receive/Transmit Bytes: Error reading values! Use "cat /proc/net/dev" for testing on command line.'
return False
def createTimer(xml):
timertype = str(xml.get("timertype") or "wakeup")
timertype = {
"nothing": TIMERTYPE.NONE,
"wakeup": TIMERTYPE.WAKEUP,
"wakeuptostandby": TIMERTYPE.WAKEUPTOSTANDBY,
"autostandby": TIMERTYPE.AUTOSTANDBY,
"autodeepstandby": TIMERTYPE.AUTODEEPSTANDBY,
"standby": TIMERTYPE.STANDBY,
"deepstandby": TIMERTYPE.DEEPSTANDBY,
"reboot": TIMERTYPE.REBOOT,
"restart": TIMERTYPE.RESTART
}[timertype]
begin = int(xml.get("begin"))
end = int(xml.get("end"))
repeated = xml.get("repeated").encode("utf-8")
disabled = long(xml.get("disabled") or "0")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"wakeup": AFTEREVENT.WAKEUP,
"wakeuptostandby": AFTEREVENT.WAKEUPTOSTANDBY,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY
}[afterevent]
autosleepinstandbyonly = str(xml.get("autosleepinstandbyonly") or "no")
autosleepdelay = str(xml.get("autosleepdelay") or "0")
autosleeprepeat = str(xml.get("autosleeprepeat") or "once")
autosleepwindow = str(xml.get("autosleepwindow") or "no")
autosleepbegin = int(xml.get("autosleepbegin") or begin)
autosleepend = int(xml.get("autosleepend") or end)
nettraffic = str(xml.get("nettraffic") or "no")
trafficlimit = int(xml.get("trafficlimit") or 100)
netip = str(xml.get("netip") or "no")
ipadress = str(xml.get("ipadress") or "0.0.0.0")
entry = PowerTimerEntry(begin, end, disabled, afterevent, timertype)
entry.repeated = int(repeated)
entry.autosleepinstandbyonly = autosleepinstandbyonly
entry.autosleepdelay = int(autosleepdelay)
entry.autosleeprepeat = autosleeprepeat
entry.autosleepwindow = autosleepwindow
entry.autosleepbegin = autosleepbegin
entry.autosleepend = autosleepend
entry.nettraffic = nettraffic
entry.trafficlimit = trafficlimit
entry.netip = netip
entry.ipadress = ipadress
for l in xml.findall("log"):
ltime = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((ltime, code, msg))
return entry
class PowerTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "pm_timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = PowerTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
try:
self.timer_list.remove(w)
except:
print '[PowerManager]: Remove list failed'
# did this timer reached the last state?
if w.state < PowerTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = PowerTimerEntry.StateWaiting
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
file = open(self.Filename, 'r')
doc = xml.etree.cElementTree.parse(file)
file.close()
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (pm_timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "pm_timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "pm_timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in pm_timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
savedays = 3600 * 24 * 7 #logs older 7 Days will not saved
list = ['<?xml version="1.0" ?>\n', '<timers>\n']
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' timertype="' + str(stringToXML({
TIMERTYPE.NONE: "nothing",
TIMERTYPE.WAKEUP: "wakeup",
TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby",
TIMERTYPE.AUTOSTANDBY: "autostandby",
TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby",
TIMERTYPE.STANDBY: "standby",
TIMERTYPE.DEEPSTANDBY: "deepstandby",
TIMERTYPE.REBOOT: "reboot",
TIMERTYPE.RESTART: "restart"
}[timer.timerType])) + '"')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.WAKEUP: "wakeup",
AFTEREVENT.WAKEUPTOSTANDBY: "wakeuptostandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby"
}[timer.afterEvent])) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' autosleepinstandbyonly="' + str(timer.autosleepinstandbyonly) + '"')
list.append(' autosleepdelay="' + str(timer.autosleepdelay) + '"')
list.append(' autosleeprepeat="' + str(timer.autosleeprepeat) + '"')
list.append(' autosleepwindow="' + str(timer.autosleepwindow) + '"')
list.append(' autosleepbegin="' + str(int(timer.autosleepbegin)) + '"')
list.append(' autosleepend="' + str(int(timer.autosleepend)) + '"')
list.append(' nettraffic="' + str(timer.nettraffic) + '"')
list.append(' trafficlimit="' + str(int(timer.trafficlimit)) + '"')
list.append(' netip="' + str(timer.netip) + '"')
list.append(' ipadress="' + str(timer.ipadress) + '"')
list.append('>\n')
for ltime, code, msg in timer.log_entries:
if ltime > time() - savedays:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(ltime) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
for x in list:
file.write(x)
file.flush()
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def isAutoDeepstandbyEnabled(self):
ret = True
now = time()
for timer in self.timer_list:
if timer.timerType == TIMERTYPE.AUTODEEPSTANDBY:
if timer.begin <= now + 900:
ret = not (timer.getNetworkTraffic() or timer.getNetworkAdress())
elif timer.autosleepwindow == 'yes':
ret = timer.autosleepbegin <= now + 900
if not ret:
break
return ret
def isProcessing(self, exceptTimer = None, endedTimer = None):
isRunning = False
for timer in self.timer_list:
if timer.timerType != TIMERTYPE.AUTOSTANDBY and timer.timerType != TIMERTYPE.AUTODEEPSTANDBY and timer.timerType != exceptTimer and timer.timerType != endedTimer:
if timer.isRunning():
isRunning = True
break
return isRunning
def getNextZapTime(self):
now = time()
for timer in self.timer_list:
if timer.begin < now:
continue
return timer.begin
return -1
def getNextPowerManagerTimeOld(self, getNextStbPowerOn = False):
now = int(time())
nextPTlist = [(-1,None,None,None)]
for timer in self.timer_list:
if timer.timerType != TIMERTYPE.AUTOSTANDBY and timer.timerType != TIMERTYPE.AUTODEEPSTANDBY:
next_act = timer.getNextWakeup(getNextStbPowerOn)
if next_act + 3 < now:
continue
if getNextStbPowerOn and debug:
print "[powertimer] next stb power up", strftime("%a, %Y/%m/%d %H:%M", localtime(next_act))
next_timertype = next_afterevent = None
if nextPTlist[0][0] == -1:
if abs(next_act - timer.begin) <= 30:
next_timertype = timer.timerType
elif abs(next_act - timer.end) <= 30:
next_afterevent = timer.afterEvent
nextPTlist = [(next_act,next_timertype,next_afterevent,timer.state)]
else:
if abs(next_act - timer.begin) <= 30:
next_timertype = timer.timerType
elif abs(next_act - timer.end) <= 30:
next_afterevent = timer.afterEvent
nextPTlist.append((next_act,next_timertype,next_afterevent,timer.state))
nextPTlist.sort()
return nextPTlist
def getNextPowerManagerTime(self, getNextStbPowerOn = False, getNextTimerTyp = False):
#getNextStbPowerOn = True returns tuple -> (timer.begin, set standby)
#getNextTimerTyp = True returns next timer list -> [(timer.begin, timer.timerType, timer.afterEvent, timer.state)]
global DSsave, RSsave, RBsave, aeDSsave
nextrectime = self.getNextPowerManagerTimeOld(getNextStbPowerOn)
faketime = int(time()) + 300
if getNextStbPowerOn:
if config.timeshift.isRecording.value:
if 0 < nextrectime[0][0] < faketime:
return nextrectime[0][0], int(nextrectime[0][1] == 2 or nextrectime[0][2] == 2)
else:
return faketime, 0
else:
return nextrectime[0][0], int(nextrectime[0][1] == 2 or nextrectime[0][2] == 2)
elif getNextTimerTyp:
#check entrys and plausibility of shift state (manual canceled timer has shift/save state not reset)
tt = ae = []
now = time()
if debug: print "+++++++++++++++"
for entry in nextrectime:
if entry[0] < now + 900: tt.append(entry[1])
if entry[0] < now + 900: ae.append(entry[2])
if debug: print ctime(entry[0]), entry
if not TIMERTYPE.RESTART in tt: RSsave = False
if not TIMERTYPE.REBOOT in tt: RBsave = False
if not TIMERTYPE.DEEPSTANDBY in tt: DSsave = False
if not AFTEREVENT.DEEPSTANDBY in ae: aeDSsave = False
if debug: print "RSsave=%s, RBsave=%s, DSsave=%s, aeDSsave=%s, wasTimerWakeup=%s" %(RSsave, RBsave, DSsave, aeDSsave, wasTimerWakeup)
if debug: print "+++++++++++++++"
###
if config.timeshift.isRecording.value:
if 0 < nextrectime[0][0] < faketime:
return nextrectime
else:
nextrectime.append((faketime,None,None,None))
nextrectime.sort()
return nextrectime
else:
return nextrectime
else:
if config.timeshift.isRecording.value:
if 0 < nextrectime[0][0] < faketime:
return nextrectime[0][0]
else:
return faketime
else:
return nextrectime[0][0]
def isNextPowerManagerAfterEventActionAuto(self):
for timer in self.timer_list:
if timer.timerType == TIMERTYPE.WAKEUPTOSTANDBY or timer.afterEvent == AFTEREVENT.WAKEUPTOSTANDBY or timer.timerType == TIMERTYPE.WAKEUP or timer.afterEvent == AFTEREVENT.WAKEUP:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): #wird von loadTimer mit dosave=False aufgerufen
entry.timeChanged()
print "[PowerTimer]",str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def removeEntry(self, entry):
print "[PowerTimer] Remove",str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
# print "state: ", entry.state
# print "in processed: ", entry in self.processed_timers
# print "in running: ", entry in self.timer_list
# disable timer first
if entry.state != 3:
entry.disable()
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
if entry in self.processed_timers:
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
| idrogeno/FusionOE | PowerTimer.py | Python | gpl-2.0 | 44,704 | [
"ASE"
] | 16d3f2c2bdd0e2c7793d24bf2dd271eee0830158340b4ba537ac56d57ee0b171 |
#! /usr/bin/env python
# ===========================================================================================#
# This script tests the offset angle dependence of the instrumental response function.
#
# ===========================================================================================#
from gammalib import *
# ====================== #
# Set point source model #
# ====================== #
def ptsrc_model(ra=0.0, dec=0.0):
"""
Set shell model.
"""
# Set shell centre
pos = GSkyDir()
pos.radec_deg(ra, dec)
# Set spatial model
spatial = GModelSpatialPtsrc(pos)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelPointSource(spatial, spectral)
# Return model
return model
# =============== #
# Set shell model #
# =============== #
def shell_model(ra=0.3, dec=0.3, radius=0.3, width=0.1):
"""
Set shell model.
"""
# Set shell centre
center = GSkyDir()
center.radec_deg(ra, dec)
# Set radial model
radial = GModelRadialShell(center, radius, width, False)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelExtendedSource(radial, spectral)
# Return model
return model
# =============== #
# Set disk model #
# =============== #
def disk_model(ra=359.6, dec=-0.2, radius=0.4):
"""
Set disk model.
"""
# Set disk centre
center = GSkyDir()
center.radec_deg(ra, dec)
# Set radial model
radial = GModelRadialDisk(center, radius)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelExtendedSource(radial, spectral)
# Return model
return model
# ================== #
# Set Gaussian model #
# ================== #
def gauss_model(ra=359.6, dec=+0.1, sigma=0.2):
"""
Set Gaussian model.
"""
# Set Gaussian centre
center = GSkyDir()
center.radec_deg(ra, dec)
# Set radial model
radial = GModelRadialGauss(center, sigma)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelExtendedSource(radial, spectral)
# Return model
return model
# ========================== #
# Set binned CTA observation #
# ========================== #
def observation(ra=0.0, dec=0.0, binsz=0.05, npix=200, ebins=10):
"""
Set binned CTA observation.
"""
# Allocate observation
obs = GCTAObservation()
# Set response
obs.response("kb_E_50h_v3", "../caldb")
# Set pointing
dir = GSkyDir()
pnt = GCTAPointing()
dir.radec_deg(ra, dec)
pnt.dir(dir)
obs.pointing(pnt)
# Set
ebounds = GEbounds()
emin = GEnergy()
emax = GEnergy()
emin.TeV(0.1)
emax.TeV(100.0)
ebounds.setlog(emin, emax, ebins)
gti = GGti()
tmin = GTime()
tmax = GTime()
tmin.met(0.0)
tmax.met(1800.0)
gti.append(tmin, tmax)
map = GSkymap("CAR", "CEL", ra, dec, -binsz, binsz, npix, npix, ebins)
cube = GCTAEventCube(map, ebounds, gti)
obs.events(cube)
# Optionally show observation
# print obs
# Return observation
return obs
# ================ #
# Create model map #
# ================ #
def modmap(obs, models, phi=0, theta=0, filename="modmap.fits"):
"""
Create model map.
"""
# Loop over all bins
for bin in obs.events():
# Cast to CTA bin
bin = cast_GCTAEventBin(bin)
# Set bin energy and time as source energy and time (no dispersion)
srcDir = bin.dir()
srcEng = bin.energy()
srcTime = bin.time()
# Compute IRF
irf = 0.0
for model in models:
irf += obs.response().irf(bin, model, srcEng, srcTime, obs) * bin.size()
# Set bin
bin.counts(irf)
# Save observation
obs.save(filename, True)
# Return
return
#==========================#
# Main routine entry point #
#==========================#
if __name__ == '__main__':
"""
Test offset angle dependence of IRF.
"""
# Dump header
print
print "***************************************"
print "* Test offset angle dependence of IRF *"
print "***************************************"
# Set set
set = 2
# Set CTA observation
obs = observation()
print obs
# Set offset angle range
# offsets = [0.0, 1.0, 2.0, 3.0]
offsets = [0.0]
# Loop over offset angles
for offset in offsets:
# Set models
if set == 1:
model1 = ptsrc_model(ra=0.0, dec=offset)
model2 = ptsrc_model(ra=1.0, dec=0.0)
model3 = ptsrc_model(ra=2.0, dec=0.0)
model4 = ptsrc_model(ra=3.0, dec=0.0)
model5 = ptsrc_model(ra=4.0, dec=0.0)
models = [model1, model2, model3, model4, model5]
elif set == 2:
model1 = disk_model(ra=0.0, dec=offset)
model2 = disk_model(ra=1.0, dec=0.0)
model3 = disk_model(ra=2.0, dec=0.0)
model4 = disk_model(ra=3.0, dec=0.0)
model5 = disk_model(ra=4.0, dec=0.0)
models = [model1, model2, model3, model4, model5]
# model = shell_model(ra=0.0, dec=offset)
# model = disk_model(ra=0.0, dec=offset)
# model = gauss_model(ra=0.0, dec=offset)
# Print model
# print model
# Set filename
filename = "modmap_theta%2.2d.fits" % (int(offset * 10.0))
# Create model map
modmap(obs, models, phi=0.0, theta=0.0, filename=filename)
| cdeil/gammalib | inst/cta/test/test_irf_offset.py | Python | gpl-3.0 | 5,601 | [
"Gaussian"
] | 8f40a8a9d0f579792a635e18acc406108490cd94782fa7d35c12b26f8f6ca4e7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.