code
stringlengths 1
199k
|
|---|
"""
Boiler Pre-treatment for Heat Processing
At the moment, process heat is excluded form the optimization process.
It is considered that whenever the case, the most competitive alternative is to have a dedicated natural gas boiler
"""
import pandas as pd
from cea.technologies import boiler
from cea.technologies.constants import BOILER_ETA_HP
from cea.constants import HOURS_IN_YEAR, WH_TO_J
def calc_pareto_Qhp(locator, total_demand, prices, lca):
"""
This function calculates the contribution to the pareto optimal results of process heating,
:param locator: locator class
:param total_demand: dataframe with building demand
:type locator: class
:type total_demand: class
:return: hpCosts, hpCO2, hpPrim
:rtype: tuple
"""
hpCosts = 0
hpCO2 = 0
hpPrim = 0
boiler_cost_data = pd.read_excel(locator.get_database_conversion_systems(), sheet_name="Boiler")
if total_demand["Qhpro_sys_MWhyr"].sum()>0:
df = total_demand[total_demand.Qhpro_sys_MWhyr != 0]
for name in df.Name :
# Extract process heat needs
Qhpro_sys_kWh = pd.read_csv(locator.get_demand_results_file(name), usecols=["Qhpro_sys_kWh"]).Qhpro_sys_kWh.values
Qnom_Wh = 0
Qannual_Wh = 0
# Operation costs / CO2 / Prim
for i in range(HOURS_IN_YEAR):
Qgas_Wh = Qhpro_sys_kWh[i] * 1E3 / BOILER_ETA_HP # [Wh] Assumed 0.9 efficiency
if Qgas_Wh < Qnom_Wh:
Qnom_Wh = Qgas_Wh
Qannual_Wh += Qgas_Wh
hpCosts += Qgas_Wh * prices.NG_PRICE # [CHF]
hpCO2 += Qgas_Wh * WH_TO_J / 1.0E6 * lca.NG_BACKUPBOILER_TO_CO2_STD / 1E3 # [ton CO2]
hpPrim += Qgas_Wh * WH_TO_J / 1.0E6 * lca.NG_BACKUPBOILER_TO_OIL_STD # [MJ-oil-eq]
# Investment costs
Capex_a_hp_USD, Opex_fixed_hp_USD, Capex_hp_USD = boiler.calc_Cinv_boiler(Qnom_Wh, 'BO1', boiler_cost_data)
hpCosts += (Capex_a_hp_USD + Opex_fixed_hp_USD)
else:
hpCosts = hpCO2 = hpPrim = 0
return hpCosts, hpCO2, hpPrim
|
"""Get storage details for a virtual server."""
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
@click.command()
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""Get storage details for a virtual server."""
vsi = SoftLayer.VSManager(env.client)
vsi_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS')
iscsi_storage_data = vsi.get_storage_details(vsi_id, "ISCSI")
nas_storage_data = vsi.get_storage_details(vsi_id, "NAS")
storage_credentials = vsi.get_storage_credentials(vsi_id)
portable_storage = vsi.get_portable_storage(vsi_id)
local_disks = vsi.get_local_disks(vsi_id)
table_credentials = formatting.Table(['Username', 'Password', 'IQN'], title="Block Storage Details \n iSCSI")
if storage_credentials:
table_credentials.add_row([storage_credentials['credential']['username'],
storage_credentials['credential']['password'],
storage_credentials['name']])
table_iscsi = formatting.Table(['LUN name', 'capacity', 'Target address', 'Location', 'Notes'])
for iscsi in iscsi_storage_data:
table_iscsi.add_row([iscsi['username'], iscsi['capacityGb'],
iscsi['serviceResourceBackendIpAddress'],
iscsi['allowedVirtualGuests'][0]['datacenter']['longName'],
iscsi.get('notes', None)])
table_portable = formatting.Table(['Description', 'Capacity'], title="Portable Storage")
for portable in portable_storage:
table_portable.add_row([portable.get('description', None), portable.get('capacity', None)])
table_nas = formatting.Table(['Volume name', 'capacity', 'Host Name', 'Location', 'Notes'],
title="File Storage Details")
for nas in nas_storage_data:
table_nas.add_row([nas['username'], nas['capacityGb'],
nas['serviceResourceBackendIpAddress'],
nas['allowedVirtualGuests'][0]['datacenter']['longName'],
nas.get('notes', None)])
table_local_disks = get_local_storage_table(local_disks)
table_local_disks.title = "Other storage details"
env.fout(table_credentials)
env.fout(table_iscsi)
env.fout(table_portable)
env.fout(table_nas)
env.fout(table_local_disks)
def get_local_type(disks):
"""Returns the virtual server local disk type.
:param disks: virtual server local disks.
"""
disk_type = 'System'
if 'SWAP' in disks.get('diskImage', {}).get('description', []):
disk_type = 'Swap'
return disk_type
def get_local_storage_table(local_disks):
"""Returns a formatting local disk table
:param local_disks: virtual server local disks.
"""
table_local_disks = formatting.Table(['Type', 'Name', 'Drive', 'Capacity'])
for disk in local_disks:
if 'diskImage' in disk:
table_local_disks.add_row([
get_local_type(disk),
disk['mountType'],
disk['device'],
"{capacity} {unit}".format(capacity=disk['diskImage']['capacity'],
unit=disk['diskImage']['units'])
])
return table_local_disks
|
"""
Class for representing hierarchical language structures, such as
syntax trees and morphological trees.
"""
from __future__ import print_function, unicode_literals
import re
from nltk.grammar import Production, Nonterminal
from nltk.probability import ProbabilisticMixIn
from nltk.util import slice_bounds
from nltk.compat import string_types, python_2_unicode_compatible, unicode_repr
from nltk.internals import raise_unorderable_types
@python_2_unicode_compatible
class Tree(list):
"""
A Tree represents a hierarchical grouping of leaves and subtrees.
For example, each constituent in a syntax tree is represented by a single Tree.
A tree's children are encoded as a list of leaves and subtrees,
where a leaf is a basic (non-tree) value; and a subtree is a
nested Tree.
>>> from nltk.tree import Tree
>>> print(Tree(1, [2, Tree(3, [4]), 5]))
(1 2 (3 4) 5)
>>> vp = Tree('VP', [Tree('V', ['saw']),
... Tree('NP', ['him'])])
>>> s = Tree('S', [Tree('NP', ['I']), vp])
>>> print(s)
(S (NP I) (VP (V saw) (NP him)))
>>> print(s[1])
(VP (V saw) (NP him))
>>> print(s[1,1])
(NP him)
>>> t = Tree("(S (NP I) (VP (V saw) (NP him)))")
>>> s == t
True
>>> t[1][1].node = "X"
>>> print(t)
(S (NP I) (VP (V saw) (X him)))
>>> t[0], t[1,1] = t[1,1], t[0]
>>> print(t)
(S (X him) (VP (V saw) (NP I)))
The length of a tree is the number of children it has.
>>> len(t)
2
Any other properties that a Tree defines are known as node
properties, and are used to add information about individual
hierarchical groupings. For example, syntax trees use a NODE
property to label syntactic constituents with phrase tags, such as
"NP" and "VP".
Several Tree methods use "tree positions" to specify
children or descendants of a tree. Tree positions are defined as
follows:
- The tree position *i* specifies a Tree's *i*\ th child.
- The tree position ``()`` specifies the Tree itself.
- If *p* is the tree position of descendant *d*, then
*p+i* specifies the *i*\ th child of *d*.
I.e., every tree position is either a single index *i*,
specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*,
specifying ``tree[i1][i2]...[iN]``.
Construct a new tree. This constructor can be called in one
of two ways:
- ``Tree(node, children)`` constructs a new tree with the
specified node value and list of children.
- ``Tree(s)`` constructs a new tree by parsing the string ``s``.
It is equivalent to calling the class method ``Tree.parse(s)``.
"""
def __init__(self, node_or_str, children=None):
if children is None:
if not isinstance(node_or_str, string_types):
raise TypeError("%s: Expected a node value and child list "
"or a single string" % type(self).__name__)
tree = type(self).parse(node_or_str)
list.__init__(self, tree)
self.node = tree.node
elif isinstance(children, string_types):
raise TypeError("%s() argument 2 should be a list, not a "
"string" % type(self).__name__)
else:
list.__init__(self, children)
self.node = node_or_str
#////////////////////////////////////////////////////////////
# Comparison operators
#////////////////////////////////////////////////////////////
def __eq__(self, other):
return (self.__class__ is other.__class__ and
(self.node, list(self)) == (other.node, list(other)))
def __lt__(self, other):
if not isinstance(other, Tree):
# raise_unorderable_types("<", self, other)
# Sometimes children can be pure strings,
# so we need to be able to compare with non-trees:
return self.__class__.__name__ < other.__class__.__name__
elif self.__class__ is other.__class__:
return (self.node, list(self)) < (other.node, list(other))
else:
return self.__class__.__name__ < other.__class__.__name__
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ne__ = lambda self, other: not self == other
__gt__ = lambda self, other: not (self < other or self == other)
__le__ = lambda self, other: self < other or self == other
__ge__ = lambda self, other: not self < other
#////////////////////////////////////////////////////////////
# Disabled list operations
#////////////////////////////////////////////////////////////
def __mul__(self, v):
raise TypeError('Tree does not support multiplication')
def __rmul__(self, v):
raise TypeError('Tree does not support multiplication')
def __add__(self, v):
raise TypeError('Tree does not support addition')
def __radd__(self, v):
raise TypeError('Tree does not support addition')
#////////////////////////////////////////////////////////////
# Indexing (with support for tree positions)
#////////////////////////////////////////////////////////////
def __getitem__(self, index):
if isinstance(index, (int, slice)):
return list.__getitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
return self
elif len(index) == 1:
return self[index[0]]
else:
return self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __setitem__(self, index, value):
if isinstance(index, (int, slice)):
return list.__setitem__(self, index, value)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError('The tree position () may not be '
'assigned to.')
elif len(index) == 1:
self[index[0]] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __delitem__(self, index):
if isinstance(index, (int, slice)):
return list.__delitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError('The tree position () may not be deleted.')
elif len(index) == 1:
del self[index[0]]
else:
del self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
#////////////////////////////////////////////////////////////
# Basic tree operations
#////////////////////////////////////////////////////////////
def leaves(self):
"""
Return the leaves of the tree.
>>> t = Tree("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.leaves()
['the', 'dog', 'chased', 'the', 'cat']
:return: a list containing this tree's leaves.
The order reflects the order of the
leaves in the tree's hierarchical structure.
:rtype: list
"""
leaves = []
for child in self:
if isinstance(child, Tree):
leaves.extend(child.leaves())
else:
leaves.append(child)
return leaves
def flatten(self):
"""
Return a flat version of the tree, with all non-root non-terminals removed.
>>> t = Tree("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> print(t.flatten())
(S the dog chased the cat)
:return: a tree consisting of this tree's root connected directly to
its leaves, omitting all intervening non-terminal nodes.
:rtype: Tree
"""
return Tree(self.node, self.leaves())
def height(self):
"""
Return the height of the tree.
>>> t = Tree("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.height()
5
>>> print(t[0,0])
(D the)
>>> t[0,0].height()
2
:return: The height of this tree. The height of a tree
containing no children is 1; the height of a tree
containing only leaves is 2; and the height of any other
tree is one plus the maximum of its children's
heights.
:rtype: int
"""
max_child_height = 0
for child in self:
if isinstance(child, Tree):
max_child_height = max(max_child_height, child.height())
else:
max_child_height = max(max_child_height, 1)
return 1 + max_child_height
def treepositions(self, order='preorder'):
"""
>>> t = Tree("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.treepositions() # doctest: +ELLIPSIS
[(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...]
>>> for pos in t.treepositions('leaves'):
... t[pos] = t[pos][::-1].upper()
>>> print(t)
(S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC))))
:param order: One of: ``preorder``, ``postorder``, ``bothorder``,
``leaves``.
"""
positions = []
if order in ('preorder', 'bothorder'): positions.append( () )
for i, child in enumerate(self):
if isinstance(child, Tree):
childpos = child.treepositions(order)
positions.extend((i,)+p for p in childpos)
else:
positions.append( (i,) )
if order in ('postorder', 'bothorder'): positions.append( () )
return positions
def subtrees(self, filter=None):
"""
Generate all the subtrees of this tree, optionally restricted
to trees matching the filter function.
>>> t = Tree("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> for s in t.subtrees(lambda t: t.height() == 2):
... print(s)
(D the)
(N dog)
(V chased)
(D the)
(N cat)
:type filter: function
:param filter: the function to filter all local trees
"""
if not filter or filter(self):
yield self
for child in self:
if isinstance(child, Tree):
for subtree in child.subtrees(filter):
yield subtree
def productions(self):
"""
Generate the productions that correspond to the non-terminal nodes of the tree.
For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the
form P -> C1 C2 ... Cn.
>>> t = Tree("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.productions()
[S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased',
NP -> D N, D -> 'the', N -> 'cat']
:rtype: list(Production)
"""
if not isinstance(self.node, string_types):
raise TypeError('Productions can only be generated from trees having node labels that are strings')
prods = [Production(Nonterminal(self.node), _child_names(self))]
for child in self:
if isinstance(child, Tree):
prods += child.productions()
return prods
def pos(self):
"""
Return a sequence of pos-tagged words extracted from the tree.
>>> t = Tree("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.pos()
[('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')]
:return: a list of tuples containing leaves and pre-terminals (part-of-speech tags).
The order reflects the order of the leaves in the tree's hierarchical structure.
:rtype: list(tuple)
"""
pos = []
for child in self:
if isinstance(child, Tree):
pos.extend(child.pos())
else:
pos.append((child, self.node))
return pos
def leaf_treeposition(self, index):
"""
:return: The tree position of the ``index``-th leaf in this
tree. I.e., if ``tp=self.leaf_treeposition(i)``, then
``self[tp]==self.leaves()[i]``.
:raise IndexError: If this tree contains fewer than ``index+1``
leaves, or if ``index<0``.
"""
if index < 0: raise IndexError('index must be non-negative')
stack = [(self, ())]
while stack:
value, treepos = stack.pop()
if not isinstance(value, Tree):
if index == 0: return treepos
else: index -= 1
else:
for i in range(len(value)-1, -1, -1):
stack.append( (value[i], treepos+(i,)) )
raise IndexError('index must be less than or equal to len(self)')
def treeposition_spanning_leaves(self, start, end):
"""
:return: The tree position of the lowest descendant of this
tree that dominates ``self.leaves()[start:end]``.
:raise ValueError: if ``end <= start``
"""
if end <= start:
raise ValueError('end must be greater than start')
# Find the tree positions of the start & end leaves, and
# take the longest common subsequence.
start_treepos = self.leaf_treeposition(start)
end_treepos = self.leaf_treeposition(end-1)
# Find the first index where they mismatch:
for i in range(len(start_treepos)):
if i == len(end_treepos) or start_treepos[i] != end_treepos[i]:
return start_treepos[:i]
return start_treepos
#////////////////////////////////////////////////////////////
# Transforms
#////////////////////////////////////////////////////////////
def chomsky_normal_form(self, factor = "right", horzMarkov = None, vertMarkov = 0, childChar = "|", parentChar = "^"):
"""
This method can modify a tree in three ways:
1. Convert a tree into its Chomsky Normal Form (CNF)
equivalent -- Every subtree has either two non-terminals
or one terminal as its children. This process requires
the creation of more"artificial" non-terminal nodes.
2. Markov (vertical) smoothing of children in new artificial
nodes
3. Horizontal (parent) annotation of nodes
:param factor: Right or left factoring method (default = "right")
:type factor: str = [left|right]
:param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings)
:type horzMarkov: int | None
:param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation)
:type vertMarkov: int | None
:param childChar: A string used in construction of the artificial nodes, separating the head of the
original subtree from the child nodes that have yet to be expanded (default = "|")
:type childChar: str
:param parentChar: A string used to separate the node representation from its vertical annotation
:type parentChar: str
"""
from .treetransforms import chomsky_normal_form
chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar)
def un_chomsky_normal_form(self, expandUnary = True, childChar = "|", parentChar = "^", unaryChar = "+"):
"""
This method modifies the tree in three ways:
1. Transforms a tree in Chomsky Normal Form back to its
original structure (branching greater than two)
2. Removes any parent annotation (if it exists)
3. (optional) expands unary subtrees (if previously
collapsed with collapseUnary(...) )
:param expandUnary: Flag to expand unary or not (default = True)
:type expandUnary: bool
:param childChar: A string separating the head node from its children in an artificial node (default = "|")
:type childChar: str
:param parentChar: A sting separating the node label from its parent annotation (default = "^")
:type parentChar: str
:param unaryChar: A string joining two non-terminals in a unary production (default = "+")
:type unaryChar: str
"""
from .treetransforms import un_chomsky_normal_form
un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar)
def collapse_unary(self, collapsePOS = False, collapseRoot = False, joinChar = "+"):
"""
Collapse subtrees with a single child (ie. unary productions)
into a new non-terminal (Tree node) joined by 'joinChar'.
This is useful when working with algorithms that do not allow
unary productions, and completely removing the unary productions
would require loss of useful information. The Tree is modified
directly (since it is passed by reference) and no value is returned.
:param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie.
Part-of-Speech tags) since they are always unary productions
:type collapsePOS: bool
:param collapseRoot: 'False' (default) will not modify the root production
if it is unary. For the Penn WSJ treebank corpus, this corresponds
to the TOP -> productions.
:type collapseRoot: bool
:param joinChar: A string used to connect collapsed node values (default = "+")
:type joinChar: str
"""
from .treetransforms import collapse_unary
collapse_unary(self, collapsePOS, collapseRoot, joinChar)
#////////////////////////////////////////////////////////////
# Convert, copy
#////////////////////////////////////////////////////////////
@classmethod
def convert(cls, tree):
"""
Convert a tree between different subtypes of Tree. ``cls`` determines
which class will be used to encode the new tree.
:type tree: Tree
:param tree: The tree that should be converted.
:return: The new Tree.
"""
if isinstance(tree, Tree):
children = [cls.convert(child) for child in tree]
return cls(tree.node, children)
else:
return tree
def copy(self, deep=False):
if not deep: return type(self)(self.node, self)
else: return type(self).convert(self)
def _frozen_class(self): return ImmutableTree
def freeze(self, leaf_freezer=None):
frozen_class = self._frozen_class()
if leaf_freezer is None:
newcopy = frozen_class.convert(self)
else:
newcopy = self.copy(deep=True)
for pos in newcopy.treepositions('leaves'):
newcopy[pos] = leaf_freezer(newcopy[pos])
newcopy = frozen_class.convert(newcopy)
hash(newcopy) # Make sure the leaves are hashable.
return newcopy
#////////////////////////////////////////////////////////////
# Parsing
#////////////////////////////////////////////////////////////
@classmethod
def parse(cls, s, brackets='()', parse_node=None, parse_leaf=None,
node_pattern=None, leaf_pattern=None,
remove_empty_top_bracketing=False):
"""
Parse a bracketed tree string and return the resulting tree.
Trees are represented as nested brackettings, such as::
(S (NP (NNP John)) (VP (V runs)))
:type s: str
:param s: The string to parse
:type brackets: str (length=2)
:param brackets: The bracket characters used to mark the
beginning and end of trees and subtrees.
:type parse_node: function
:type parse_leaf: function
:param parse_node, parse_leaf: If specified, these functions
are applied to the substrings of ``s`` corresponding to
nodes and leaves (respectively) to obtain the values for
those nodes and leaves. They should have the following
signature:
parse_node(str) -> value
For example, these functions could be used to parse nodes
and leaves whose values should be some type other than
string (such as ``FeatStruct``).
Note that by default, node strings and leaf strings are
delimited by whitespace and brackets; to override this
default, use the ``node_pattern`` and ``leaf_pattern``
arguments.
:type node_pattern: str
:type leaf_pattern: str
:param node_pattern, leaf_pattern: Regular expression patterns
used to find node and leaf substrings in ``s``. By
default, both nodes patterns are defined to match any
sequence of non-whitespace non-bracket characters.
:type remove_empty_top_bracketing: bool
:param remove_empty_top_bracketing: If the resulting tree has
an empty node label, and is length one, then return its
single child instead. This is useful for treebank trees,
which sometimes contain an extra level of bracketing.
:return: A tree corresponding to the string representation ``s``.
If this class method is called using a subclass of Tree,
then it will return a tree of that type.
:rtype: Tree
"""
if not isinstance(brackets, string_types) or len(brackets) != 2:
raise TypeError('brackets must be a length-2 string')
if re.search('\s', brackets):
raise TypeError('whitespace brackets not allowed')
# Construct a regexp that will tokenize the string.
open_b, close_b = brackets
open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b))
if node_pattern is None:
node_pattern = '[^\s%s%s]+' % (open_pattern, close_pattern)
if leaf_pattern is None:
leaf_pattern = '[^\s%s%s]+' % (open_pattern, close_pattern)
token_re = re.compile('%s\s*(%s)?|%s|(%s)' % (
open_pattern, node_pattern, close_pattern, leaf_pattern))
# Walk through each token, updating a stack of trees.
stack = [(None, [])] # list of (node, children) tuples
for match in token_re.finditer(s):
token = match.group()
# Beginning of a tree/subtree
if token[0] == open_b:
if len(stack) == 1 and len(stack[0][1]) > 0:
cls._parse_error(s, match, 'end-of-string')
node = token[1:].lstrip()
if parse_node is not None: node = parse_node(node)
stack.append((node, []))
# End of a tree/subtree
elif token == close_b:
if len(stack) == 1:
if len(stack[0][1]) == 0:
cls._parse_error(s, match, open_b)
else:
cls._parse_error(s, match, 'end-of-string')
node, children = stack.pop()
stack[-1][1].append(cls(node, children))
# Leaf node
else:
if len(stack) == 1:
cls._parse_error(s, match, open_b)
if parse_leaf is not None: token = parse_leaf(token)
stack[-1][1].append(token)
# check that we got exactly one complete tree.
if len(stack) > 1:
cls._parse_error(s, 'end-of-string', close_b)
elif len(stack[0][1]) == 0:
cls._parse_error(s, 'end-of-string', open_b)
else:
assert stack[0][0] is None
assert len(stack[0][1]) == 1
tree = stack[0][1][0]
# If the tree has an extra level with node='', then get rid of
# it. E.g.: "((S (NP ...) (VP ...)))"
if remove_empty_top_bracketing and tree.node == '' and len(tree) == 1:
tree = tree[0]
# return the tree.
return tree
@classmethod
def _parse_error(cls, s, match, expecting):
"""
Display a friendly error message when parsing a tree string fails.
:param s: The string we're parsing.
:param match: regexp match of the problem token.
:param expecting: what we expected to see instead.
"""
# Construct a basic error message
if match == 'end-of-string':
pos, token = len(s), 'end-of-string'
else:
pos, token = match.start(), match.group()
msg = '%s.parse(): expected %r but got %r\n%sat index %d.' % (
cls.__name__, expecting, token, ' '*12, pos)
# Add a display showing the error token itsels:
s = s.replace('\n', ' ').replace('\t', ' ')
offset = pos
if len(s) > pos+10:
s = s[:pos+10]+'...'
if pos > 10:
s = '...'+s[pos-10:]
offset = 13
msg += '\n%s"%s"\n%s^' % (' '*16, s, ' '*(17+offset))
raise ValueError(msg)
#////////////////////////////////////////////////////////////
# Visualization & String Representation
#////////////////////////////////////////////////////////////
def draw(self):
"""
Open a new window containing a graphical diagram of this tree.
"""
from nltk.draw.tree import draw_trees
draw_trees(self)
def __repr__(self):
childstr = ", ".join(unicode_repr(c) for c in self)
return '%s(%s, [%s])' % (type(self).__name__, unicode_repr(self.node), childstr)
def __str__(self):
return self.pprint()
def pprint(self, margin=70, indent=0, nodesep='', parens='()', quotes=False):
"""
:return: A pretty-printed string representation of this tree.
:rtype: str
:param margin: The right margin at which to do line-wrapping.
:type margin: int
:param indent: The indentation level at which printing
begins. This number is used to decide how far to indent
subsequent lines.
:type indent: int
:param nodesep: A string that is used to separate the node
from the children. E.g., the default value ``':'`` gives
trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``.
"""
# Try writing it on one line.
s = self._pprint_flat(nodesep, parens, quotes)
if len(s)+indent < margin:
return s
# If it doesn't fit on one line, then write it on multi-lines.
if isinstance(self.node, string_types):
s = '%s%s%s' % (parens[0], self.node, nodesep)
else:
s = '%s%s%s' % (parens[0], unicode_repr(self.node), nodesep)
for child in self:
if isinstance(child, Tree):
s += '\n'+' '*(indent+2)+child.pprint(margin, indent+2,
nodesep, parens, quotes)
elif isinstance(child, tuple):
s += '\n'+' '*(indent+2)+ "/".join(child)
elif isinstance(child, string_types) and not quotes:
s += '\n'+' '*(indent+2)+ '%s' % child
else:
s += '\n'+' '*(indent+2)+ unicode_repr(child)
return s+parens[1]
def pprint_latex_qtree(self):
r"""
Returns a representation of the tree compatible with the
LaTeX qtree package. This consists of the string ``\Tree``
followed by the parse tree represented in bracketed notation.
For example, the following result was generated from a parse tree of
the sentence ``The announcement astounded us``::
\Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ]
[.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ]
See http://www.ling.upenn.edu/advice/latex.html for the LaTeX
style file for the qtree package.
:return: A latex qtree representation of this tree.
:rtype: str
"""
reserved_chars = re.compile('([#\$%&~_\{\}])')
pprint = self.pprint(indent=6, nodesep='', parens=('[.', ' ]'))
return r'\Tree ' + re.sub(reserved_chars, r'\\\1', pprint)
def _pprint_flat(self, nodesep, parens, quotes):
childstrs = []
for child in self:
if isinstance(child, Tree):
childstrs.append(child._pprint_flat(nodesep, parens, quotes))
elif isinstance(child, tuple):
childstrs.append("/".join(child))
elif isinstance(child, string_types) and not quotes:
childstrs.append('%s' % child)
else:
childstrs.append(unicode_repr(child))
if isinstance(self.node, string_types):
return '%s%s%s %s%s' % (parens[0], self.node, nodesep,
" ".join(childstrs), parens[1])
else:
return '%s%s%s %s%s' % (parens[0], unicode_repr(self.node), nodesep,
" ".join(childstrs), parens[1])
class ImmutableTree(Tree):
def __init__(self, node_or_str, children=None):
super(ImmutableTree, self).__init__(node_or_str, children)
# Precompute our hash value. This ensures that we're really
# immutable. It also means we only have to calculate it once.
try:
self._hash = hash((self.node, tuple(self)))
except (TypeError, ValueError):
raise ValueError("%s: node value and children "
"must be immutable" % type(self).__name__)
def __setitem__(self, index, value):
raise ValueError('%s may not be modified' % type(self).__name__)
def __setslice__(self, i, j, value):
raise ValueError('%s may not be modified' % type(self).__name__)
def __delitem__(self, index):
raise ValueError('%s may not be modified' % type(self).__name__)
def __delslice__(self, i, j):
raise ValueError('%s may not be modified' % type(self).__name__)
def __iadd__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def __imul__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def append(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def extend(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def pop(self, v=None):
raise ValueError('%s may not be modified' % type(self).__name__)
def remove(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def reverse(self):
raise ValueError('%s may not be modified' % type(self).__name__)
def sort(self):
raise ValueError('%s may not be modified' % type(self).__name__)
def __hash__(self):
return self._hash
def _get_node(self):
"""Get the node value"""
return self._node
def _set_node(self, value):
"""
Set the node value. This will only succeed the first time the
node value is set, which should occur in ImmutableTree.__init__().
"""
if hasattr(self, 'node'):
raise ValueError('%s may not be modified' % type(self).__name__)
self._node = value
node = property(_get_node, _set_node)
class AbstractParentedTree(Tree):
"""
An abstract base class for a ``Tree`` that automatically maintains
pointers to parent nodes. These parent pointers are updated
whenever any change is made to a tree's structure. Two subclasses
are currently defined:
- ``ParentedTree`` is used for tree structures where each subtree
has at most one parent. This class should be used in cases
where there is no"sharing" of subtrees.
- ``MultiParentedTree`` is used for tree structures where a
subtree may have zero or more parents. This class should be
used in cases where subtrees may be shared.
Subclassing
===========
The ``AbstractParentedTree`` class redefines all operations that
modify a tree's structure to call two methods, which are used by
subclasses to update parent information:
- ``_setparent()`` is called whenever a new child is added.
- ``_delparent()`` is called whenever a child is removed.
"""
def __init__(self, node_or_str, children=None):
super(AbstractParentedTree, self).__init__(node_or_str, children)
# If children is None, the tree is parsed from node_or_str, and
# all parents will be set during parsing.
if children is not None:
# Otherwise we have to set the parent of the children.
# Iterate over self, and *not* children, because children
# might be an iterator.
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i, dry_run=True)
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i)
#////////////////////////////////////////////////////////////
# Parent management
#////////////////////////////////////////////////////////////
def _setparent(self, child, index, dry_run=False):
"""
Update the parent pointer of ``child`` to point to ``self``. This
method is only called if the type of ``child`` is ``Tree``;
i.e., it is not called when adding a leaf to a tree. This method
is always called before the child is actually added to the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
:raise TypeError: If ``child`` is a tree with an impropriate
type. Typically, if ``child`` is a tree, then its type needs
to match the type of ``self``. This prevents mixing of
different tree types (single-parented, multi-parented, and
non-parented).
:param dry_run: If true, the don't actually set the child's
parent pointer; just check for any error conditions, and
raise an exception if one is found.
"""
raise NotImplementedError()
def _delparent(self, child, index):
"""
Update the parent pointer of ``child`` to not point to self. This
method is only called if the type of ``child`` is ``Tree``; i.e., it
is not called when removing a leaf from a tree. This method
is always called before the child is actually removed from the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
"""
raise NotImplementedError()
#////////////////////////////////////////////////////////////
# Methods that add/remove children
#////////////////////////////////////////////////////////////
# Every method that adds or removes a child must make
# appropriate calls to _setparent() and _delparent().
def __delitem__(self, index):
# del ptree[start:stop]
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# Clear all the children pointers.
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# Delete the children from our child list.
super(AbstractParentedTree, self).__delitem__(index)
# del ptree[i]
elif isinstance(index, int):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
# Clear the child's parent pointer.
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Remove the child from our child list.
super(AbstractParentedTree, self).__delitem__(index)
elif isinstance(index, (list, tuple)):
# del ptree[()]
if len(index) == 0:
raise IndexError('The tree position () may not be deleted.')
# del ptree[(i,)]
elif len(index) == 1:
del self[index[0]]
# del ptree[i1, i2, i3]
else:
del self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __setitem__(self, index, value):
# ptree[start:stop] = value
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# make a copy of value, in case it's an iterator
if not isinstance(value, (list, tuple)):
value = list(value)
# Check for any error conditions, so we can avoid ending
# up in an inconsistent state if an error does occur.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i*step, dry_run=True)
# clear the child pointers of all parents we're removing
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# set the child pointers of the new children. We do this
# after clearing *all* child pointers, in case we're e.g.
# reversing the elements in a tree.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i*step)
# finally, update the content of the child list itself.
super(AbstractParentedTree, self).__setitem__(index, value)
# ptree[i] = value
elif isinstance(index, int):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
# if the value is not changing, do nothing.
if value is self[index]:
return
# Set the new child's parent pointer.
if isinstance(value, Tree):
self._setparent(value, index)
# Remove the old child's parent pointer
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Update our child list.
super(AbstractParentedTree, self).__setitem__(index, value)
elif isinstance(index, (list, tuple)):
# ptree[()] = value
if len(index) == 0:
raise IndexError('The tree position () may not be assigned to.')
# ptree[(i,)] = value
elif len(index) == 1:
self[index[0]] = value
# ptree[i1, i2, i3] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def append(self, child):
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def extend(self, children):
for child in children:
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def insert(self, index, child):
# Handle negative indexes. Note that if index < -len(self),
# we do *not* raise an IndexError, unlike __getitem__. This
# is done for consistency with list.__getitem__ and list.index.
if index < 0: index += len(self)
if index < 0: index = 0
# Set the child's parent, and update our child list.
if isinstance(child, Tree):
self._setparent(child, index)
super(AbstractParentedTree, self).insert(index, child)
def pop(self, index=-1):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
if isinstance(self[index], Tree):
self._delparent(self[index], index)
return super(AbstractParentedTree, self).pop(index)
# n.b.: like `list`, this is done by equality, not identity!
# To remove a specific child, use del ptree[i].
def remove(self, child):
index = self.index(child)
if isinstance(self[index], Tree):
self._delparent(self[index], index)
super(AbstractParentedTree, self).remove(child)
# We need to implement __getslice__ and friends, even though
# they're deprecated, because otherwise list.__getslice__ will get
# called (since we're subclassing from list). Just delegate to
# __getitem__ etc., but use max(0, start) and max(0, stop) because
# because negative indices are already handled *before*
# __getslice__ is called; and we don't want to double-count them.
if hasattr(list, '__getslice__'):
def __getslice__(self, start, stop):
return self.__getitem__(slice(max(0, start), max(0, stop)))
def __delslice__(self, start, stop):
return self.__delitem__(slice(max(0, start), max(0, stop)))
def __setslice__(self, start, stop, value):
return self.__setitem__(slice(max(0, start), max(0, stop)), value)
class ParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
single-parented trees. The following are methods for querying
the structure of a parented tree: ``parent``, ``parent_index``,
``left_sibling``, ``right_sibling``, ``root``, ``treeposition``.
Each ``ParentedTree`` may have at most one parent. In
particular, subtrees may not be shared. Any attempt to reuse a
single ``ParentedTree`` as a child of more than one parent (or
as multiple children of the same parent) will cause a
``ValueError`` exception to be raised.
``ParentedTrees`` should never be used in the same tree as ``Trees``
or ``MultiParentedTrees``. Mixing tree implementations may result
in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node_or_str, children=None):
self._parent = None
"""The parent of this Tree, or None if it has no parent."""
super(ParentedTree, self).__init__(node_or_str, children)
if children is None:
# If children is None, the tree is parsed from node_or_str.
# After parsing, the parent of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parent = None
self._setparent(child, i)
def _frozen_class(self): return ImmutableParentedTree
#/////////////////////////////////////////////////////////////////
# Methods
#/////////////////////////////////////////////////////////////////
def parent(self):
"""The parent of this tree, or None if it has no parent."""
return self._parent
def parent_index(self):
"""
The index of this tree in its parent. I.e.,
``ptree.parent()[ptree.parent_index()] is ptree``. Note that
``ptree.parent_index()`` is not necessarily equal to
``ptree.parent.index(ptree)``, since the ``index()`` method
returns the first child that is equal to its argument.
"""
if self._parent is None: return None
for i, child in enumerate(self._parent):
if child is self: return i
assert False, 'expected to find self in self._parent!'
def left_sibling(self):
"""The left sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index > 0:
return self._parent[parent_index-1]
return None # no left sibling
def right_sibling(self):
"""The right sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index < (len(self._parent)-1):
return self._parent[parent_index+1]
return None # no right sibling
def root(self):
"""
The root of this tree. I.e., the unique ancestor of this tree
whose parent is None. If ``ptree.parent()`` is None, then
``ptree`` is its own root.
"""
root = self
while root.parent() is not None:
root = root.parent()
return root
def treeposition(self):
"""
The tree position of this tree, relative to the root of the
tree. I.e., ``ptree.root[ptree.treeposition] is ptree``.
"""
if self.parent() is None:
return ()
else:
return self.parent().treeposition() + (self.parent_index(),)
#/////////////////////////////////////////////////////////////////
# Parent Management
#/////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, ParentedTree)
assert self[index] is child
assert child._parent is self
# Delete child's parent pointer.
child._parent = None
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, ParentedTree):
raise TypeError('Can not insert a non-ParentedTree '+
'into a ParentedTree')
# If child already has a parent, then complain.
if child._parent is not None:
raise ValueError('Can not insert a subtree that already '
'has a parent.')
# Set child's parent pointer & index.
if not dry_run:
child._parent = self
class MultiParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
multi-parented trees. The following are methods for querying the
structure of a multi-parented tree: ``parents()``, ``parent_indices()``,
``left_siblings()``, ``right_siblings()``, ``roots``, ``treepositions``.
Each ``MultiParentedTree`` may have zero or more parents. In
particular, subtrees may be shared. If a single
``MultiParentedTree`` is used as multiple children of the same
parent, then that parent will appear multiple times in its
``parents()`` method.
``MultiParentedTrees`` should never be used in the same tree as
``Trees`` or ``ParentedTrees``. Mixing tree implementations may
result in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node_or_str, children=None):
self._parents = []
"""A list of this tree's parents. This list should not
contain duplicates, even if a parent contains this tree
multiple times."""
super(MultiParentedTree, self).__init__(node_or_str, children)
if children is None:
# If children is None, the tree is parsed from node_or_str.
# After parsing, the parent(s) of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parents = []
self._setparent(child, i)
def _frozen_class(self): return ImmutableMultiParentedTree
#/////////////////////////////////////////////////////////////////
# Methods
#/////////////////////////////////////////////////////////////////
def parents(self):
"""
The set of parents of this tree. If this tree has no parents,
then ``parents`` is the empty set. To check if a tree is used
as multiple children of the same parent, use the
``parent_indices()`` method.
:type: list(MultiParentedTree)
"""
return list(self._parents)
def left_siblings(self):
"""
A list of all left siblings of this tree, in any of its parent
trees. A tree may be its own left sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the left sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [parent[index-1]
for (parent, index) in self._get_parent_indices()
if index > 0]
def right_siblings(self):
"""
A list of all right siblings of this tree, in any of its parent
trees. A tree may be its own right sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the right sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [parent[index+1]
for (parent, index) in self._get_parent_indices()
if index < (len(parent)-1)]
def _get_parent_indices(self):
return [(parent, index)
for parent in self._parents
for index, child in enumerate(parent)
if child is self]
def roots(self):
"""
The set of all roots of this tree. This set is formed by
tracing all possible parent paths until trees with no parents
are found.
:type: list(MultiParentedTree)
"""
return list(self._get_roots_helper({}).values())
def _get_roots_helper(self, result):
if self._parents:
for parent in self._parents:
parent._get_roots_helper(result)
else:
result[id(self)] = self
return result
def parent_indices(self, parent):
"""
Return a list of the indices where this tree occurs as a child
of ``parent``. If this child does not occur as a child of
``parent``, then the empty list is returned. The following is
always true::
for parent_index in ptree.parent_indices(parent):
parent[parent_index] is ptree
"""
if parent not in self._parents: return []
else: return [index for (index, child) in enumerate(parent)
if child is self]
def treepositions(self, root):
"""
Return a list of all tree positions that can be used to reach
this multi-parented tree starting from ``root``. I.e., the
following is always true::
for treepos in ptree.treepositions(root):
root[treepos] is ptree
"""
if self is root:
return [()]
else:
return [treepos+(index,)
for parent in self._parents
for treepos in parent.treepositions(root)
for (index, child) in enumerate(parent) if child is self]
#/////////////////////////////////////////////////////////////////
# Parent Management
#/////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, MultiParentedTree)
assert self[index] is child
assert len([p for p in child._parents if p is self]) == 1
# If the only copy of child in self is at index, then delete
# self from child's parent list.
for i, c in enumerate(self):
if c is child and i != index: break
else:
child._parents.remove(self)
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, MultiParentedTree):
raise TypeError('Can not insert a non-MultiParentedTree '+
'into a MultiParentedTree')
# Add self as a parent pointer if it's not already listed.
if not dry_run:
for parent in child._parents:
if parent is self: break
else:
child._parents.append(self)
class ImmutableParentedTree(ImmutableTree, ParentedTree):
pass
class ImmutableMultiParentedTree(ImmutableTree, MultiParentedTree):
pass
@python_2_unicode_compatible
class ProbabilisticTree(Tree, ProbabilisticMixIn):
def __init__(self, node_or_str, children=None, **prob_kwargs):
Tree.__init__(self, node_or_str, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s (p=%r)' % (Tree.unicode_repr(self), self.prob())
def __str__(self):
return '%s (p=%.6g)' % (self.pprint(margin=60), self.prob())
def copy(self, deep=False):
if not deep: return type(self)(self.node, self, prob=self.prob())
else: return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val.node, children, prob=val.prob())
else:
return cls(val.node, children, prob=1.0)
else:
return val
def __eq__(self, other):
return (self.__class__ is other.__class__ and
(self.node, list(self), self.prob()) ==
(other.node, list(other), other.prob()))
def __lt__(self, other):
if not isinstance(other, Tree):
raise_unorderable_types("<", self, other)
if self.__class__ is other.__class__:
return ((self.node, list(self), self.prob()) <
(other.node, list(other), other.prob()))
else:
return self.__class__.__name__ < other.__class__.__name__
@python_2_unicode_compatible
class ImmutableProbabilisticTree(ImmutableTree, ProbabilisticMixIn):
def __init__(self, node_or_str, children=None, **prob_kwargs):
ImmutableTree.__init__(self, node_or_str, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
self._hash = hash((self.node, tuple(self), self.prob()))
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s [%s]' % (Tree.unicode_repr(self), self.prob())
def __str__(self):
return '%s [%s]' % (self.pprint(margin=60), self.prob())
def copy(self, deep=False):
if not deep: return type(self)(self.node, self, prob=self.prob())
else: return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val.node, children, prob=val.prob())
else:
return cls(val.node, children, prob=1.0)
else:
return val
def _child_names(tree):
names = []
for child in tree:
if isinstance(child, Tree):
names.append(Nonterminal(child.node))
else:
names.append(child)
return names
def bracket_parse(s):
"""
Use Tree.parse(s, remove_empty_top_bracketing=True) instead.
"""
raise NameError("Use Tree.parse(s, remove_empty_top_bracketing=True) instead.")
def sinica_parse(s):
"""
Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings,
as shown in the following example (X represents a Chinese character):
S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY)
:return: A tree corresponding to the string representation.
:rtype: Tree
:param s: The string to be converted
:type s: str
"""
tokens = re.split(r'([()| ])', s)
for i in range(len(tokens)):
if tokens[i] == '(':
tokens[i-1], tokens[i] = tokens[i], tokens[i-1] # pull nonterminal inside parens
elif ':' in tokens[i]:
fields = tokens[i].split(':')
if len(fields) == 2: # non-terminal
tokens[i] = fields[1]
else:
tokens[i] = "(" + fields[-2] + " " + fields[-1] + ")"
elif tokens[i] == '|':
tokens[i] = ''
treebank_string = " ".join(tokens)
return Tree.parse(treebank_string, remove_empty_top_bracketing=True)
def demo():
"""
A demonstration showing how Trees and Trees can be
used. This demonstration creates a Tree, and loads a
Tree from the Treebank corpus,
and shows the results of calling several of their methods.
"""
from nltk import tree
# Demonstrate tree parsing.
s = '(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))'
t = Tree(s)
print("Convert bracketed string into tree:")
print(t)
print(t.__repr__())
print("Display tree properties:")
print(t.node) # tree's constituent type
print(t[0]) # tree's first child
print(t[1]) # tree's second child
print(t.height())
print(t.leaves())
print(t[1])
print(t[1,1])
print(t[1,1,0])
# Demonstrate tree modification.
the_cat = t[0]
the_cat.insert(1, tree.Tree.parse('(JJ big)'))
print("Tree modification:")
print(t)
t[1,1,1] = tree.Tree.parse('(NN cake)')
print(t)
print()
# Tree transforms
print("Collapse unary:")
t.collapse_unary()
print(t)
print("Chomsky normal form:")
t.chomsky_normal_form()
print(t)
print()
# Demonstrate probabilistic trees.
pt = tree.ProbabilisticTree('x', ['y', 'z'], prob=0.5)
print("Probabilistic Tree:")
print(pt)
print()
# Demonstrate parsing of treebank output format.
t = tree.Tree.parse(t.pprint())
print("Convert tree to bracketed string and back again:")
print(t)
print()
# Demonstrate LaTeX output
print("LaTeX output:")
print(t.pprint_latex_qtree())
print()
# Demonstrate Productions
print("Production output:")
print(t.productions())
print()
# Demonstrate tree nodes containing objects other than strings
t.node = ('test', 3)
print(t)
__all__ = ['ImmutableProbabilisticTree', 'ImmutableTree', 'ProbabilisticMixIn',
'ProbabilisticTree', 'Tree', 'bracket_parse',
'sinica_parse', 'ParentedTree', 'MultiParentedTree',
'ImmutableParentedTree', 'ImmutableMultiParentedTree']
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
__version__ = '1.3.1'
|
from senmolib.components.fusion import Fusion
import numpy as np
class exampleFusion(Fusion):
def fuse(self, arr):
print(len(arr))
hr_sp = np.ediff1d(arr)
bpm_mv_avg = 100/np.average(hr_sp) * 60
return bpm_mv_avg
exampleFusion(10,True).start()
|
"""Handles low-level interfacing for files' tags. Wraps Mutagen to
automatically detect file types and provide a unified interface for a
useful subset of music files' tags.
Usage:
>>> f = MediaFile('Lucy.mp3')
>>> f.title
u'Lucy in the Sky with Diamonds'
>>> f.artist = 'The Beatles'
>>> f.save()
A field will always return a reasonable value of the correct type, even
if no tag is present. If no value is available, the value will be false
(e.g., zero or the empty string).
Internally ``MediaFile`` uses ``MediaField`` descriptors to access the
data from the tags. In turn ``MediaField`` uses a number of
``StorageStyle`` strategies to handle format specific logic.
"""
from __future__ import division, absolute_import, print_function
import mutagen
import mutagen.id3
import mutagen.mp3
import mutagen.mp4
import mutagen.flac
import mutagen.asf
import mutagen._util
import base64
import binascii
import codecs
import datetime
import enum
import functools
import imghdr
import logging
import math
import os
import re
import six
import struct
import traceback
__version__ = '0.9.0'
__all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile']
log = logging.getLogger(__name__)
TYPES = {
'mp3': 'MP3',
'aac': 'AAC',
'alac': 'ALAC',
'ogg': 'OGG',
'opus': 'Opus',
'flac': 'FLAC',
'ape': 'APE',
'wv': 'WavPack',
'mpc': 'Musepack',
'asf': 'Windows Media',
'aiff': 'AIFF',
'dsf': 'DSD Stream File',
'wav': 'WAVE',
}
PREFERRED_IMAGE_EXTENSIONS = {'jpeg': 'jpg'}
class UnreadableFileError(Exception):
"""Mutagen is not able to extract information from the file.
"""
def __init__(self, filename, msg):
Exception.__init__(self, msg if msg else repr(filename))
class FileTypeError(UnreadableFileError):
"""Reading this type of file is not supported.
If passed the `mutagen_type` argument this indicates that the
mutagen type is not supported by `Mediafile`.
"""
def __init__(self, filename, mutagen_type=None):
if mutagen_type is None:
msg = u'{0!r}: not in a recognized format'.format(filename)
else:
msg = u'{0}: of mutagen type {1}'.format(
repr(filename), mutagen_type
)
Exception.__init__(self, msg)
class MutagenError(UnreadableFileError):
"""Raised when Mutagen fails unexpectedly---probably due to a bug.
"""
def __init__(self, filename, mutagen_exc):
msg = u'{0}: {1}'.format(repr(filename), mutagen_exc)
Exception.__init__(self, msg)
def mutagen_call(action, filename, func, *args, **kwargs):
"""Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `filename` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`.
"""
try:
return func(*args, **kwargs)
except mutagen.MutagenError as exc:
log.debug(u'%s failed: %s', action, six.text_type(exc))
raise UnreadableFileError(filename, six.text_type(exc))
except UnreadableFileError:
# Reraise our errors without changes.
# Used in case of decorating functions (e.g. by `loadfile`).
raise
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug(u'%s', traceback.format_exc())
log.error(u'uncaught Mutagen exception in %s: %s', action, exc)
raise MutagenError(filename, exc)
def loadfile(method=True, writable=False, create=False):
"""A decorator that works like `mutagen._util.loadfile` but with
additional error handling.
Opens a file and passes a `mutagen._utils.FileThing` to the
decorated function. Should be used as a decorator for functions
using a `filething` parameter.
"""
def decorator(func):
f = mutagen._util.loadfile(method, writable, create)(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return mutagen_call('loadfile', '', f, *args, **kwargs)
return wrapper
return decorator
def _update_filething(filething):
"""Reopen a `filething` if it's a local file.
A filething that is *not* an actual file is left unchanged; a
filething with a filename is reopened and a new object is returned.
"""
if filething.filename:
return mutagen._util.FileThing(
None, filething.filename, filething.name
)
else:
return filething
def _safe_cast(out_type, val):
"""Try to covert val to out_type but never raise an exception.
If the value does not exist, return None. Or, if the value
can't be converted, then a sensible default value is returned.
out_type should be bool, int, or unicode; otherwise, the value
is just passed through.
"""
if val is None:
return None
if out_type == int:
if isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
elif not isinstance(val, six.string_types):
val = six.text_type(val)
# Get a number from the front of the string.
match = re.match(r'[\+-]?[0-9]+', val.strip())
return int(match.group(0)) if match else 0
elif out_type == bool:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == six.text_type:
if isinstance(val, bytes):
return val.decode('utf-8', 'ignore')
elif isinstance(val, six.text_type):
return val
else:
return six.text_type(val)
elif out_type == float:
if isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
else:
val = six.text_type(val)
match = re.match(r'[\+-]?([0-9]+\.?[0-9]*|[0-9]*\.[0-9]+)',
val.strip())
if match:
val = match.group(0)
if val:
return float(val)
return 0.0
else:
return val
def _unpack_asf_image(data):
"""Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
"""
type, size = struct.unpack_from('<bi', data)
pos = 5
mime = b''
while data[pos:pos + 2] != b'\x00\x00':
mime += data[pos:pos + 2]
pos += 2
pos += 2
description = b''
while data[pos:pos + 2] != b'\x00\x00':
description += data[pos:pos + 2]
pos += 2
pos += 2
image_data = data[pos:pos + size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le"))
def _pack_asf_image(mime, data, type=3, description=""):
"""Pack image data for a WM/Picture tag.
"""
tag_data = struct.pack('<bi', type, len(data))
tag_data += mime.encode("utf-16-le") + b'\x00\x00'
tag_data += description.encode("utf-16-le") + b'\x00\x00'
tag_data += data
return tag_data
def _sc_decode(soundcheck):
"""Convert a Sound Check bytestring value to a (gain, peak) tuple as
used by ReplayGain.
"""
# We decode binary data. If one of the formats gives us a text
# string, interpret it as UTF-8.
if isinstance(soundcheck, six.text_type):
soundcheck = soundcheck.encode('utf-8')
# SoundCheck tags consist of 10 numbers, each represented by 8
# characters of ASCII hex preceded by a space.
try:
soundcheck = codecs.decode(soundcheck.replace(b' ', b''), 'hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except (struct.error, TypeError, binascii.Error):
# SoundCheck isn't in the format we expect, so return default
# values.
return 0.0, 0.0
# SoundCheck stores absolute calculated/measured RMS value in an
# unknown unit. We need to find the ratio of this measurement
# compared to a reference value of 1000 to get our gain in dB. We
# play it safe by using the larger of the two values (i.e., the most
# attenuation).
maxgain = max(soundcheck[:2])
if maxgain > 0:
gain = math.log10(maxgain / 1000.0) * -10
else:
# Invalid gain value found.
gain = 0.0
# SoundCheck stores peak values as the actual value of the sample,
# and again separately for the left and right channels. We need to
# convert this to a percentage of full scale, which is 32768 for a
# 16 bit sample. Once again, we play it safe by using the larger of
# the two values.
peak = max(soundcheck[6:8]) / 32768.0
return round(gain, 2), round(peak, 6)
def _sc_encode(gain, peak):
"""Encode ReplayGain gain/peak values as a Sound Check string.
"""
# SoundCheck stores the peak value as the actual value of the
# sample, rather than the percentage of full scale that RG uses, so
# we do a simple conversion assuming 16 bit samples.
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses. We can calculate these absolute values
# from the gain ratio using a reference value of 1000 units. We also
# enforce the maximum and minimum value here, which is equivalent to
# about -18.2dB and 30.0dB.
g1 = int(min(round((10 ** (gain / -10)) * 1000), 65534)) or 1
# Same as above, except our reference level is 2500 units.
g2 = int(min(round((10 ** (gain / -10)) * 2500), 65534)) or 1
# The purpose of these values are unknown, but they also seem to be
# unused so we just use zero.
uk = 0
values = (g1, g1, g2, g2, uk, uk, int(peak), int(peak), uk, uk)
return (u' %08X' * 10) % values
def _imghdr_what_wrapper(data):
"""A wrapper around imghdr.what to account for jpeg files that can only be
identified as such using their magic bytes
See #1545
See https://github.com/file/file/blob/master/magic/Magdir/jpeg#L12
"""
# imghdr.what returns none for jpegs with only the magic bytes, so
# _wider_test_jpeg is run in that case. It still returns None if it didn't
# match such a jpeg file.
return imghdr.what(None, h=data) or _wider_test_jpeg(data)
def _wider_test_jpeg(data):
"""Test for a jpeg file following the UNIX file implementation which
uses the magic bytes rather than just looking for the bytes that
represent 'JFIF' or 'EXIF' at a fixed position.
"""
if data[:2] == b'\xff\xd8':
return 'jpeg'
def image_mime_type(data):
"""Return the MIME type of the image data (a bytestring).
"""
# This checks for a jpeg file with only the magic bytes (unrecognized by
# imghdr.what). imghdr.what returns none for that type of file, so
# _wider_test_jpeg is run in that case. It still returns None if it didn't
# match such a jpeg file.
kind = _imghdr_what_wrapper(data)
if kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']:
return 'image/{0}'.format(kind)
elif kind == 'pgm':
return 'image/x-portable-graymap'
elif kind == 'pbm':
return 'image/x-portable-bitmap'
elif kind == 'ppm':
return 'image/x-portable-pixmap'
elif kind == 'xbm':
return 'image/x-xbitmap'
else:
return 'image/x-{0}'.format(kind)
def image_extension(data):
ext = _imghdr_what_wrapper(data)
return PREFERRED_IMAGE_EXTENSIONS.get(ext, ext)
class ImageType(enum.Enum):
"""Indicates the kind of an `Image` stored in a file's tag.
"""
other = 0
icon = 1
other_icon = 2
front = 3
back = 4
leaflet = 5
media = 6
lead_artist = 7
artist = 8
conductor = 9
group = 10
composer = 11
lyricist = 12
recording_location = 13
recording_session = 14
performance = 15
screen_capture = 16
fish = 17
illustration = 18
artist_logo = 19
publisher_logo = 20
class Image(object):
"""Structure representing image data and metadata that can be
stored and retrieved from tags.
The structure has four properties.
* ``data`` The binary data of the image
* ``desc`` An optional description of the image
* ``type`` An instance of `ImageType` indicating the kind of image
* ``mime_type`` Read-only property that contains the mime type of
the binary data
"""
def __init__(self, data, desc=None, type=None):
assert isinstance(data, bytes)
if desc is not None:
assert isinstance(desc, six.text_type)
self.data = data
self.desc = desc
if isinstance(type, int):
try:
type = list(ImageType)[type]
except IndexError:
log.debug(u"ignoring unknown image type index %s", type)
type = ImageType.other
self.type = type
@property
def mime_type(self):
if self.data:
return image_mime_type(self.data)
@property
def type_index(self):
if self.type is None:
# This method is used when a tag format requires the type
# index to be set, so we return "other" as the default value.
return 0
return self.type.value
class StorageStyle(object):
"""A strategy for storing a value for a certain tag format (or set
of tag formats). This basic StorageStyle describes simple 1:1
mapping from raw values to keys in a Mutagen file object; subclasses
describe more sophisticated translations or format-specific access
strategies.
MediaFile uses a StorageStyle via three methods: ``get()``,
``set()``, and ``delete()``. It passes a Mutagen file object to
each.
Internally, the StorageStyle implements ``get()`` and ``set()``
using two steps that may be overridden by subtypes. To get a value,
the StorageStyle first calls ``fetch()`` to retrieve the value
corresponding to a key and then ``deserialize()`` to convert the raw
Mutagen value to a consumable Python value. Similarly, to set a
field, we call ``serialize()`` to encode the value and then
``store()`` to assign the result into the Mutagen object.
Each StorageStyle type has a class-level `formats` attribute that is
a list of strings indicating the formats that the style applies to.
MediaFile only uses StorageStyles that apply to the correct type for
a given audio file.
"""
formats = ['FLAC', 'OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac', 'APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio']
"""List of mutagen classes the StorageStyle can handle.
"""
def __init__(self, key, as_type=six.text_type, suffix=None,
float_places=2, read_only=False):
"""Create a basic storage strategy. Parameters:
- `key`: The key on the Mutagen file object used to access the
field's data.
- `as_type`: The Python type that the value is stored as
internally (`unicode`, `int`, `bool`, or `bytes`).
- `suffix`: When `as_type` is a string type, append this before
storing the value.
- `float_places`: When the value is a floating-point number and
encoded as a string, the number of digits to store after the
decimal point.
- `read_only`: When true, writing to this field is disabled.
Primary use case is so wrongly named fields can be addressed
in a graceful manner. This does not block the delete method.
"""
self.key = key
self.as_type = as_type
self.suffix = suffix
self.float_places = float_places
self.read_only = read_only
# Convert suffix to correct string type.
if self.suffix and self.as_type is six.text_type \
and not isinstance(self.suffix, six.text_type):
self.suffix = self.suffix.decode('utf-8')
# Getter.
def get(self, mutagen_file):
"""Get the value for the field using this style.
"""
return self.deserialize(self.fetch(mutagen_file))
def fetch(self, mutagen_file):
"""Retrieve the raw value of for this tag from the Mutagen file
object.
"""
try:
return mutagen_file[self.key][0]
except (KeyError, IndexError):
return None
def deserialize(self, mutagen_value):
"""Given a raw value stored on a Mutagen object, decode and
return the represented value.
"""
if self.suffix and isinstance(mutagen_value, six.text_type) \
and mutagen_value.endswith(self.suffix):
return mutagen_value[:-len(self.suffix)]
else:
return mutagen_value
# Setter.
def set(self, mutagen_file, value):
"""Assign the value for the field using this style.
"""
self.store(mutagen_file, self.serialize(value))
def store(self, mutagen_file, value):
"""Store a serialized value in the Mutagen file object.
"""
mutagen_file[self.key] = [value]
def serialize(self, value):
"""Convert the external Python value to a type that is suitable for
storing in a Mutagen file object.
"""
if isinstance(value, float) and self.as_type is six.text_type:
value = u'{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is six.text_type:
if isinstance(value, bool):
# Store bools as 1/0 instead of True/False.
value = six.text_type(int(bool(value)))
elif isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
else:
value = six.text_type(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value
def delete(self, mutagen_file):
"""Remove the tag from the file.
"""
if self.key in mutagen_file:
del mutagen_file[self.key]
class ListStorageStyle(StorageStyle):
"""Abstract storage style that provides access to lists.
The ListMediaField descriptor uses a ListStorageStyle via two
methods: ``get_list()`` and ``set_list()``. It passes a Mutagen file
object to each.
Subclasses may overwrite ``fetch`` and ``store``. ``fetch`` must
return a (possibly empty) list and ``store`` receives a serialized
list of values as the second argument.
The `serialize` and `deserialize` methods (from the base
`StorageStyle`) are still called with individual values. This class
handles packing and unpacking the values into lists.
"""
def get(self, mutagen_file):
"""Get the first value in the field's value list.
"""
try:
return self.get_list(mutagen_file)[0]
except IndexError:
return None
def get_list(self, mutagen_file):
"""Get a list of all values for the field using this style.
"""
return [self.deserialize(item) for item in self.fetch(mutagen_file)]
def fetch(self, mutagen_file):
"""Get the list of raw (serialized) values.
"""
try:
return mutagen_file[self.key]
except KeyError:
return []
def set(self, mutagen_file, value):
"""Set an individual value as the only value for the field using
this style.
"""
self.set_list(mutagen_file, [value])
def set_list(self, mutagen_file, values):
"""Set all values for the field using this style. `values`
should be an iterable.
"""
self.store(mutagen_file, [self.serialize(value) for value in values])
def store(self, mutagen_file, values):
"""Set the list of all raw (serialized) values for this field.
"""
mutagen_file[self.key] = values
class SoundCheckStorageStyleMixin(object):
"""A mixin for storage styles that read and write iTunes SoundCheck
analysis values. The object must have an `index` field that
indicates which half of the gain/peak pair---0 or 1---the field
represents.
"""
def get(self, mutagen_file):
data = self.fetch(mutagen_file)
if data is not None:
return _sc_decode(data)[self.index]
def set(self, mutagen_file, value):
data = self.fetch(mutagen_file)
if data is None:
gain_peak = [0, 0]
else:
gain_peak = list(_sc_decode(data))
gain_peak[self.index] = value or 0
data = self.serialize(_sc_encode(*gain_peak))
self.store(mutagen_file, data)
class ASFStorageStyle(ListStorageStyle):
"""A general storage style for Windows Media/ASF files.
"""
formats = ['ASF']
def deserialize(self, data):
if isinstance(data, mutagen.asf.ASFBaseAttribute):
data = data.value
return data
class MP4StorageStyle(StorageStyle):
"""A general storage style for MPEG-4 tags.
"""
formats = ['MP4']
def serialize(self, value):
value = super(MP4StorageStyle, self).serialize(value)
if self.key.startswith('----:') and isinstance(value, six.text_type):
value = value.encode('utf-8')
return value
class MP4TupleStorageStyle(MP4StorageStyle):
"""A style for storing values as part of a pair of numbers in an
MPEG-4 file.
"""
def __init__(self, key, index=0, **kwargs):
super(MP4TupleStorageStyle, self).__init__(key, **kwargs)
self.index = index
def deserialize(self, mutagen_value):
items = mutagen_value or []
packing_length = 2
return list(items) + [0] * (packing_length - len(items))
def get(self, mutagen_file):
value = super(MP4TupleStorageStyle, self).get(mutagen_file)[self.index]
if value == 0:
# The values are always present and saved as integers. So we
# assume that "0" indicates it is not set.
return None
else:
return value
def set(self, mutagen_file, value):
if value is None:
value = 0
items = self.deserialize(self.fetch(mutagen_file))
items[self.index] = int(value)
self.store(mutagen_file, items)
def delete(self, mutagen_file):
if self.index == 0:
super(MP4TupleStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP4ListStorageStyle(ListStorageStyle, MP4StorageStyle):
pass
class MP4SoundCheckStorageStyle(SoundCheckStorageStyleMixin, MP4StorageStyle):
def __init__(self, key, index=0, **kwargs):
super(MP4SoundCheckStorageStyle, self).__init__(key, **kwargs)
self.index = index
class MP4BoolStorageStyle(MP4StorageStyle):
"""A style for booleans in MPEG-4 files. (MPEG-4 has an atom type
specifically for representing booleans.)
"""
def get(self, mutagen_file):
try:
return mutagen_file[self.key]
except KeyError:
return None
def get_list(self, mutagen_file):
raise NotImplementedError(u'MP4 bool storage does not support lists')
def set(self, mutagen_file, value):
mutagen_file[self.key] = value
def set_list(self, mutagen_file, values):
raise NotImplementedError(u'MP4 bool storage does not support lists')
class MP4ImageStorageStyle(MP4ListStorageStyle):
"""Store images as MPEG-4 image atoms. Values are `Image` objects.
"""
def __init__(self, **kwargs):
super(MP4ImageStorageStyle, self).__init__(key='covr', **kwargs)
def deserialize(self, data):
return Image(data)
def serialize(self, image):
if image.mime_type == 'image/png':
kind = mutagen.mp4.MP4Cover.FORMAT_PNG
elif image.mime_type == 'image/jpeg':
kind = mutagen.mp4.MP4Cover.FORMAT_JPEG
else:
raise ValueError(u'MP4 files only supports PNG and JPEG images')
return mutagen.mp4.MP4Cover(image.data, kind)
class MP3StorageStyle(StorageStyle):
"""Store data in ID3 frames.
"""
formats = ['MP3', 'AIFF', 'DSF', 'WAVE']
def __init__(self, key, id3_lang=None, **kwargs):
"""Create a new ID3 storage style. `id3_lang` is the value for
the language field of newly created frames.
"""
self.id3_lang = id3_lang
super(MP3StorageStyle, self).__init__(key, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text[0]
except (KeyError, IndexError):
return None
def store(self, mutagen_file, value):
frame = mutagen.id3.Frames[self.key](encoding=3, text=[value])
mutagen_file.tags.setall(self.key, [frame])
class MP3PeopleStorageStyle(MP3StorageStyle):
"""Store list of people in ID3 frames.
"""
def __init__(self, key, involvement='', **kwargs):
self.involvement = involvement
super(MP3PeopleStorageStyle, self).__init__(key, **kwargs)
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
# Try modifying in place.
found = False
for frame in frames:
if frame.encoding == mutagen.id3.Encoding.UTF8:
for pair in frame.people:
if pair[0].lower() == self.involvement.lower():
pair[1] = value
found = True
# Try creating a new frame.
if not found:
frame = mutagen.id3.Frames[self.key](
encoding=mutagen.id3.Encoding.UTF8,
people=[[self.involvement, value]]
)
mutagen_file.tags.add(frame)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
for pair in frame.people:
if pair[0].lower() == self.involvement.lower():
try:
return pair[1]
except IndexError:
return None
class MP3ListStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Store lists of data in multiple ID3 frames.
"""
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text
except KeyError:
return []
def store(self, mutagen_file, values):
frame = mutagen.id3.Frames[self.key](encoding=3, text=values)
mutagen_file.tags.setall(self.key, [frame])
class MP3UFIDStorageStyle(MP3StorageStyle):
"""Store string data in a UFID ID3 frame with a particular owner.
"""
def __init__(self, owner, **kwargs):
self.owner = owner
super(MP3UFIDStorageStyle, self).__init__('UFID:' + owner, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].data
except KeyError:
return None
def store(self, mutagen_file, value):
# This field type stores text data as encoded data.
assert isinstance(value, six.text_type)
value = value.encode('utf-8')
frames = mutagen_file.tags.getall(self.key)
for frame in frames:
# Replace existing frame data.
if frame.owner == self.owner:
frame.data = value
else:
# New frame.
frame = mutagen.id3.UFID(owner=self.owner, data=value)
mutagen_file.tags.setall(self.key, [frame])
class MP3DescStorageStyle(MP3StorageStyle):
"""Store data in a TXXX (or similar) ID3 frame. The frame is
selected based its ``desc`` field.
``attr`` allows to specify name of data accessor property in the frame.
Most of frames use `text`.
``multispec`` specifies if frame data is ``mutagen.id3.MultiSpec``
which means that the data is being packed in the list.
"""
def __init__(self, desc=u'', key='TXXX', attr='text', multispec=True,
**kwargs):
assert isinstance(desc, six.text_type)
self.description = desc
self.attr = attr
self.multispec = multispec
super(MP3DescStorageStyle, self).__init__(key=key, **kwargs)
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
if self.multispec:
value = [value]
# Try modifying in place.
found = False
for frame in frames:
if frame.desc.lower() == self.description.lower():
setattr(frame, self.attr, value)
frame.encoding = mutagen.id3.Encoding.UTF8
found = True
# Try creating a new frame.
if not found:
frame = mutagen.id3.Frames[self.key](
desc=self.description,
encoding=mutagen.id3.Encoding.UTF8,
**{self.attr: value}
)
if self.id3_lang:
frame.lang = self.id3_lang
mutagen_file.tags.add(frame)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
if not self.multispec:
return getattr(frame, self.attr)
try:
return getattr(frame, self.attr)[0]
except IndexError:
return None
def delete(self, mutagen_file):
found_frame = None
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
found_frame = frame
break
if found_frame is not None:
del mutagen_file[frame.HashKey]
class MP3ListDescStorageStyle(MP3DescStorageStyle, ListStorageStyle):
def __init__(self, desc=u'', key='TXXX', split_v23=False, **kwargs):
self.split_v23 = split_v23
super(MP3ListDescStorageStyle, self).__init__(
desc=desc, key=key, **kwargs
)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
if mutagen_file.tags.version == (2, 3, 0) and self.split_v23:
return sum((el.split('/') for el in frame.text), [])
else:
return frame.text
return []
def store(self, mutagen_file, values):
self.delete(mutagen_file)
frame = mutagen.id3.Frames[self.key](
desc=self.description,
text=values,
encoding=mutagen.id3.Encoding.UTF8,
)
if self.id3_lang:
frame.lang = self.id3_lang
mutagen_file.tags.add(frame)
class MP3SlashPackStorageStyle(MP3StorageStyle):
"""Store value as part of pair that is serialized as a slash-
separated string.
"""
def __init__(self, key, pack_pos=0, **kwargs):
super(MP3SlashPackStorageStyle, self).__init__(key, **kwargs)
self.pack_pos = pack_pos
def _fetch_unpacked(self, mutagen_file):
data = self.fetch(mutagen_file)
if data:
items = six.text_type(data).split('/')
else:
items = []
packing_length = 2
return list(items) + [None] * (packing_length - len(items))
def get(self, mutagen_file):
return self._fetch_unpacked(mutagen_file)[self.pack_pos]
def set(self, mutagen_file, value):
items = self._fetch_unpacked(mutagen_file)
items[self.pack_pos] = value
if items[0] is None:
items[0] = ''
if items[1] is None:
items.pop() # Do not store last value
self.store(mutagen_file, '/'.join(map(six.text_type, items)))
def delete(self, mutagen_file):
if self.pack_pos == 0:
super(MP3SlashPackStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP3ImageStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Converts between APIC frames and ``Image`` instances.
The `get_list` method inherited from ``ListStorageStyle`` returns a
list of ``Image``s. Similarly, the `set_list` method accepts a
list of ``Image``s as its ``values`` argument.
"""
def __init__(self):
super(MP3ImageStorageStyle, self).__init__(key='APIC')
self.as_type = bytes
def deserialize(self, apic_frame):
"""Convert APIC frame into Image."""
return Image(data=apic_frame.data, desc=apic_frame.desc,
type=apic_frame.type)
def fetch(self, mutagen_file):
return mutagen_file.tags.getall(self.key)
def store(self, mutagen_file, frames):
mutagen_file.tags.setall(self.key, frames)
def delete(self, mutagen_file):
mutagen_file.tags.delall(self.key)
def serialize(self, image):
"""Return an APIC frame populated with data from ``image``.
"""
assert isinstance(image, Image)
frame = mutagen.id3.Frames[self.key]()
frame.data = image.data
frame.mime = image.mime_type
frame.desc = image.desc or u''
# For compatibility with OS X/iTunes prefer latin-1 if possible.
# See issue #899
try:
frame.desc.encode("latin-1")
except UnicodeEncodeError:
frame.encoding = mutagen.id3.Encoding.UTF16
else:
frame.encoding = mutagen.id3.Encoding.LATIN1
frame.type = image.type_index
return frame
class MP3SoundCheckStorageStyle(SoundCheckStorageStyleMixin,
MP3DescStorageStyle):
def __init__(self, index=0, **kwargs):
super(MP3SoundCheckStorageStyle, self).__init__(**kwargs)
self.index = index
class ASFImageStorageStyle(ListStorageStyle):
"""Store images packed into Windows Media/ASF byte array attributes.
Values are `Image` objects.
"""
formats = ['ASF']
def __init__(self):
super(ASFImageStorageStyle, self).__init__(key='WM/Picture')
def deserialize(self, asf_picture):
mime, data, type, desc = _unpack_asf_image(asf_picture.value)
return Image(data, desc=desc, type=type)
def serialize(self, image):
pic = mutagen.asf.ASFByteArrayAttribute()
pic.value = _pack_asf_image(image.mime_type, image.data,
type=image.type_index,
description=image.desc or u'')
return pic
class VorbisImageStorageStyle(ListStorageStyle):
"""Store images in Vorbis comments. Both legacy COVERART fields and
modern METADATA_BLOCK_PICTURE tags are supported. Data is
base64-encoded. Values are `Image` objects.
"""
formats = ['OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac']
def __init__(self):
super(VorbisImageStorageStyle, self).__init__(
key='metadata_block_picture'
)
self.as_type = bytes
def fetch(self, mutagen_file):
images = []
if 'metadata_block_picture' not in mutagen_file:
# Try legacy COVERART tags.
if 'coverart' in mutagen_file:
for data in mutagen_file['coverart']:
images.append(Image(base64.b64decode(data)))
return images
for data in mutagen_file["metadata_block_picture"]:
try:
pic = mutagen.flac.Picture(base64.b64decode(data))
except (TypeError, AttributeError):
continue
images.append(Image(data=pic.data, desc=pic.desc,
type=pic.type))
return images
def store(self, mutagen_file, image_data):
# Strip all art, including legacy COVERART.
if 'coverart' in mutagen_file:
del mutagen_file['coverart']
if 'coverartmime' in mutagen_file:
del mutagen_file['coverartmime']
super(VorbisImageStorageStyle, self).store(mutagen_file, image_data)
def serialize(self, image):
"""Turn a Image into a base64 encoded FLAC picture block.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
# Encoding with base64 returns bytes on both Python 2 and 3.
# Mutagen requires the data to be a Unicode string, so we decode
# it before passing it along.
return base64.b64encode(pic.write()).decode('ascii')
class FlacImageStorageStyle(ListStorageStyle):
"""Converts between ``mutagen.flac.Picture`` and ``Image`` instances.
"""
formats = ['FLAC']
def __init__(self):
super(FlacImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
return mutagen_file.pictures
def deserialize(self, flac_picture):
return Image(data=flac_picture.data, desc=flac_picture.desc,
type=flac_picture.type)
def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances.
"""
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic)
def serialize(self, image):
"""Turn a Image into a mutagen.flac.Picture.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
return pic
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
mutagen_file.clear_pictures()
class APEv2ImageStorageStyle(ListStorageStyle):
"""Store images in APEv2 tags. Values are `Image` objects.
"""
formats = ['APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio', 'OptimFROG']
TAG_NAMES = {
ImageType.other: 'Cover Art (other)',
ImageType.icon: 'Cover Art (icon)',
ImageType.other_icon: 'Cover Art (other icon)',
ImageType.front: 'Cover Art (front)',
ImageType.back: 'Cover Art (back)',
ImageType.leaflet: 'Cover Art (leaflet)',
ImageType.media: 'Cover Art (media)',
ImageType.lead_artist: 'Cover Art (lead)',
ImageType.artist: 'Cover Art (artist)',
ImageType.conductor: 'Cover Art (conductor)',
ImageType.group: 'Cover Art (band)',
ImageType.composer: 'Cover Art (composer)',
ImageType.lyricist: 'Cover Art (lyricist)',
ImageType.recording_location: 'Cover Art (studio)',
ImageType.recording_session: 'Cover Art (recording)',
ImageType.performance: 'Cover Art (performance)',
ImageType.screen_capture: 'Cover Art (movie scene)',
ImageType.fish: 'Cover Art (colored fish)',
ImageType.illustration: 'Cover Art (illustration)',
ImageType.artist_logo: 'Cover Art (band logo)',
ImageType.publisher_logo: 'Cover Art (publisher logo)',
}
def __init__(self):
super(APEv2ImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
images = []
for cover_type, cover_tag in self.TAG_NAMES.items():
try:
frame = mutagen_file[cover_tag]
text_delimiter_index = frame.value.find(b'\x00')
if text_delimiter_index > 0:
comment = frame.value[0:text_delimiter_index]
comment = comment.decode('utf-8', 'replace')
else:
comment = None
image_data = frame.value[text_delimiter_index + 1:]
images.append(Image(data=image_data, type=cover_type,
desc=comment))
except KeyError:
pass
return images
def set_list(self, mutagen_file, values):
self.delete(mutagen_file)
for image in values:
image_type = image.type or ImageType.other
comment = image.desc or ''
image_data = comment.encode('utf-8') + b'\x00' + image.data
cover_tag = self.TAG_NAMES[image_type]
mutagen_file[cover_tag] = image_data
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
for cover_tag in self.TAG_NAMES.values():
try:
del mutagen_file[cover_tag]
except KeyError:
pass
class MediaField(object):
"""A descriptor providing access to a particular (abstract) metadata
field.
"""
def __init__(self, *styles, **kwargs):
"""Creates a new MediaField.
:param styles: `StorageStyle` instances that describe the strategy
for reading and writing the field in particular
formats. There must be at least one style for
each possible file format.
:param out_type: the type of the value that should be returned when
getting this property.
"""
self.out_type = kwargs.get('out_type', six.text_type)
self._styles = styles
def styles(self, mutagen_file):
"""Yields the list of storage styles of this field that can
handle the MediaFile's format.
"""
for style in self._styles:
if mutagen_file.__class__.__name__ in style.formats:
yield style
def __get__(self, mediafile, owner=None):
out = None
for style in self.styles(mediafile.mgfile):
out = style.get(mediafile.mgfile)
if out:
break
return _safe_cast(self.out_type, out)
def __set__(self, mediafile, value):
if value is None:
value = self._none_value()
for style in self.styles(mediafile.mgfile):
if not style.read_only:
style.set(mediafile.mgfile, value)
def __delete__(self, mediafile):
for style in self.styles(mediafile.mgfile):
style.delete(mediafile.mgfile)
def _none_value(self):
"""Get an appropriate "null" value for this field's type. This
is used internally when setting the field to None.
"""
if self.out_type == int:
return 0
elif self.out_type == float:
return 0.0
elif self.out_type == bool:
return False
elif self.out_type == six.text_type:
return u''
class ListMediaField(MediaField):
"""Property descriptor that retrieves a list of multiple values from
a tag.
Uses ``get_list`` and set_list`` methods of its ``StorageStyle``
strategies to do the actual work.
"""
def __get__(self, mediafile, _):
values = []
for style in self.styles(mediafile.mgfile):
values.extend(style.get_list(mediafile.mgfile))
return [_safe_cast(self.out_type, value) for value in values]
def __set__(self, mediafile, values):
for style in self.styles(mediafile.mgfile):
if not style.read_only:
style.set_list(mediafile.mgfile, values)
def single_field(self):
"""Returns a ``MediaField`` descriptor that gets and sets the
first item.
"""
options = {'out_type': self.out_type}
return MediaField(*self._styles, **options)
class DateField(MediaField):
"""Descriptor that handles serializing and deserializing dates
The getter parses value from tags into a ``datetime.date`` instance
and setter serializes such an instance into a string.
For granular access to year, month, and day, use the ``*_field``
methods to create corresponding `DateItemField`s.
"""
def __init__(self, *date_styles, **kwargs):
"""``date_styles`` is a list of ``StorageStyle``s to store and
retrieve the whole date from. The ``year`` option is an
additional list of fallback styles for the year. The year is
always set on this style, but is only retrieved if the main
storage styles do not return a value.
"""
super(DateField, self).__init__(*date_styles)
year_style = kwargs.get('year', None)
if year_style:
self._year_field = MediaField(*year_style)
def __get__(self, mediafile, owner=None):
year, month, day = self._get_date_tuple(mediafile)
if not year:
return None
try:
return datetime.date(
year,
month or 1,
day or 1
)
except ValueError: # Out of range values.
return None
def __set__(self, mediafile, date):
if date is None:
self._set_date_tuple(mediafile, None, None, None)
else:
self._set_date_tuple(mediafile, date.year, date.month, date.day)
def __delete__(self, mediafile):
super(DateField, self).__delete__(mediafile)
if hasattr(self, '_year_field'):
self._year_field.__delete__(mediafile)
def _get_date_tuple(self, mediafile):
"""Get a 3-item sequence representing the date consisting of a
year, month, and day number. Each number is either an integer or
None.
"""
# Get the underlying data and split on hyphens and slashes.
datestring = super(DateField, self).__get__(mediafile, None)
if isinstance(datestring, six.string_types):
datestring = re.sub(r'[Tt ].*$', '', six.text_type(datestring))
items = re.split('[-/]', six.text_type(datestring))
else:
items = []
# Ensure that we have exactly 3 components, possibly by
# truncating or padding.
items = items[:3]
if len(items) < 3:
items += [None] * (3 - len(items))
# Use year field if year is missing.
if not items[0] and hasattr(self, '_year_field'):
items[0] = self._year_field.__get__(mediafile)
# Convert each component to an integer if possible.
items_ = []
for item in items:
try:
items_.append(int(item))
except (TypeError, ValueError):
items_.append(None)
return items_
def _set_date_tuple(self, mediafile, year, month=None, day=None):
"""Set the value of the field given a year, month, and day
number. Each number can be an integer or None to indicate an
unset component.
"""
if year is None:
self.__delete__(mediafile)
return
date = [u'{0:04d}'.format(int(year))]
if month:
date.append(u'{0:02d}'.format(int(month)))
if month and day:
date.append(u'{0:02d}'.format(int(day)))
date = map(six.text_type, date)
super(DateField, self).__set__(mediafile, u'-'.join(date))
if hasattr(self, '_year_field'):
self._year_field.__set__(mediafile, year)
def year_field(self):
return DateItemField(self, 0)
def month_field(self):
return DateItemField(self, 1)
def day_field(self):
return DateItemField(self, 2)
class DateItemField(MediaField):
"""Descriptor that gets and sets constituent parts of a `DateField`:
the month, day, or year.
"""
def __init__(self, date_field, item_pos):
self.date_field = date_field
self.item_pos = item_pos
def __get__(self, mediafile, _):
return self.date_field._get_date_tuple(mediafile)[self.item_pos]
def __set__(self, mediafile, value):
items = self.date_field._get_date_tuple(mediafile)
items[self.item_pos] = value
self.date_field._set_date_tuple(mediafile, *items)
def __delete__(self, mediafile):
self.__set__(mediafile, None)
class CoverArtField(MediaField):
"""A descriptor that provides access to the *raw image data* for the
cover image on a file. This is used for backwards compatibility: the
full `ImageListField` provides richer `Image` objects.
When there are multiple images we try to pick the most likely to be a front
cover.
"""
def __init__(self):
pass
def __get__(self, mediafile, _):
candidates = mediafile.images
if candidates:
return self.guess_cover_image(candidates).data
else:
return None
@staticmethod
def guess_cover_image(candidates):
if len(candidates) == 1:
return candidates[0]
try:
return next(c for c in candidates if c.type == ImageType.front)
except StopIteration:
return candidates[0]
def __set__(self, mediafile, data):
if data:
mediafile.images = [Image(data=data)]
else:
mediafile.images = []
def __delete__(self, mediafile):
delattr(mediafile, 'images')
class QNumberField(MediaField):
"""Access integer-represented Q number fields.
Access a fixed-point fraction as a float. The stored value is shifted by
`fraction_bits` binary digits to the left and then rounded, yielding a
simple integer.
"""
def __init__(self, fraction_bits, *args, **kwargs):
super(QNumberField, self).__init__(out_type=int, *args, **kwargs)
self.__fraction_bits = fraction_bits
def __get__(self, mediafile, owner=None):
q_num = super(QNumberField, self).__get__(mediafile, owner)
if q_num is None:
return None
return q_num / pow(2, self.__fraction_bits)
def __set__(self, mediafile, value):
q_num = round(value * pow(2, self.__fraction_bits))
q_num = int(q_num) # needed for py2.7
super(QNumberField, self).__set__(mediafile, q_num)
class ImageListField(ListMediaField):
"""Descriptor to access the list of images embedded in tags.
The getter returns a list of `Image` instances obtained from
the tags. The setter accepts a list of `Image` instances to be
written to the tags.
"""
def __init__(self):
# The storage styles used here must implement the
# `ListStorageStyle` interface and get and set lists of
# `Image`s.
super(ImageListField, self).__init__(
MP3ImageStorageStyle(),
MP4ImageStorageStyle(),
ASFImageStorageStyle(),
VorbisImageStorageStyle(),
FlacImageStorageStyle(),
APEv2ImageStorageStyle(),
out_type=Image,
)
class MediaFile(object):
"""Represents a multimedia file on disk and provides access to its
metadata.
"""
@loadfile()
def __init__(self, filething, id3v23=False):
"""Constructs a new `MediaFile` reflecting the provided file.
`filething` can be a path to a file (i.e., a string) or a
file-like object.
May throw `UnreadableFileError`.
By default, MP3 files are saved with ID3v2.4 tags. You can use
the older ID3v2.3 standard by specifying the `id3v23` option.
"""
self.filething = filething
self.mgfile = mutagen_call(
'open', self.filename, mutagen.File, filething
)
if self.mgfile is None:
# Mutagen couldn't guess the type
raise FileTypeError(self.filename)
elif type(self.mgfile).__name__ in ['M4A', 'MP4']:
info = self.mgfile.info
if info.codec and info.codec.startswith('alac'):
self.type = 'alac'
else:
self.type = 'aac'
elif type(self.mgfile).__name__ in ['ID3', 'MP3']:
self.type = 'mp3'
elif type(self.mgfile).__name__ == 'FLAC':
self.type = 'flac'
elif type(self.mgfile).__name__ == 'OggOpus':
self.type = 'opus'
elif type(self.mgfile).__name__ == 'OggVorbis':
self.type = 'ogg'
elif type(self.mgfile).__name__ == 'MonkeysAudio':
self.type = 'ape'
elif type(self.mgfile).__name__ == 'WavPack':
self.type = 'wv'
elif type(self.mgfile).__name__ == 'Musepack':
self.type = 'mpc'
elif type(self.mgfile).__name__ == 'ASF':
self.type = 'asf'
elif type(self.mgfile).__name__ == 'AIFF':
self.type = 'aiff'
elif type(self.mgfile).__name__ == 'DSF':
self.type = 'dsf'
elif type(self.mgfile).__name__ == 'WAVE':
self.type = 'wav'
else:
raise FileTypeError(self.filename, type(self.mgfile).__name__)
# Add a set of tags if it's missing.
if self.mgfile.tags is None:
self.mgfile.add_tags()
# Set the ID3v2.3 flag only for MP3s.
self.id3v23 = id3v23 and self.type == 'mp3'
@property
def filename(self):
"""The name of the file.
This is the path if this object was opened from the filesystem,
or the name of the file-like object.
"""
return self.filething.name
@filename.setter
def filename(self, val):
"""Silently skips setting filename.
Workaround for `mutagen._util._openfile` setting instance's filename.
"""
pass
@property
def path(self):
"""The path to the file.
This is `None` if the data comes from a file-like object instead
of a filesystem path.
"""
return self.filething.filename
@property
def filesize(self):
"""The size (in bytes) of the underlying file.
"""
if self.filething.filename:
return os.path.getsize(self.filething.filename)
if hasattr(self.filething.fileobj, '__len__'):
return len(self.filething.fileobj)
else:
tell = self.filething.fileobj.tell()
filesize = self.filething.fileobj.seek(0, 2)
self.filething.fileobj.seek(tell)
return filesize
def save(self, **kwargs):
"""Write the object's tags back to the file.
May throw `UnreadableFileError`. Accepts keyword arguments to be
passed to Mutagen's `save` function.
"""
# Possibly save the tags to ID3v2.3.
if self.id3v23:
id3 = self.mgfile
if hasattr(id3, 'tags'):
# In case this is an MP3 object, not an ID3 object.
id3 = id3.tags
id3.update_to_v23()
kwargs['v2_version'] = 3
mutagen_call('save', self.filename, self.mgfile.save,
_update_filething(self.filething), **kwargs)
def delete(self):
"""Remove the current metadata tag from the file. May
throw `UnreadableFileError`.
"""
mutagen_call('delete', self.filename, self.mgfile.delete,
_update_filething(self.filething))
# Convenient access to the set of available fields.
@classmethod
def fields(cls):
"""Get the names of all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`).
"""
for property, descriptor in cls.__dict__.items():
if isinstance(descriptor, MediaField):
if isinstance(property, bytes):
# On Python 2, class field names are bytes. This method
# produces text strings.
yield property.decode('utf8', 'ignore')
else:
yield property
@classmethod
def _field_sort_name(cls, name):
"""Get a sort key for a field name that determines the order
fields should be written in.
Fields names are kept unchanged, unless they are instances of
:class:`DateItemField`, in which case `year`, `month`, and `day`
are replaced by `date0`, `date1`, and `date2`, respectively, to
make them appear in that order.
"""
if isinstance(cls.__dict__[name], DateItemField):
name = re.sub('year', 'date0', name)
name = re.sub('month', 'date1', name)
name = re.sub('day', 'date2', name)
return name
@classmethod
def sorted_fields(cls):
"""Get the names of all writable metadata fields, sorted in the
order that they should be written.
This is a lexicographic order, except for instances of
:class:`DateItemField`, which are sorted in year-month-day
order.
"""
for property in sorted(cls.fields(), key=cls._field_sort_name):
yield property
@classmethod
def readable_fields(cls):
"""Get all metadata fields: the writable ones from
:meth:`fields` and also other audio properties.
"""
for property in cls.fields():
yield property
for property in ('length', 'samplerate', 'bitdepth', 'bitrate',
'bitrate_mode', 'channels', 'encoder_info',
'encoder_settings', 'format'):
yield property
@classmethod
def add_field(cls, name, descriptor):
"""Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
"""
if not isinstance(descriptor, MediaField):
raise ValueError(
u'{0} must be an instance of MediaField'.format(descriptor))
if name in cls.__dict__:
raise ValueError(
u'property "{0}" already exists on MediaFile'.format(name))
setattr(cls, name, descriptor)
def update(self, dict):
"""Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`.
"""
for field in self.sorted_fields():
if field in dict:
if dict[field] is None:
delattr(self, field)
else:
setattr(self, field, dict[field])
def as_dict(self):
"""Get a dictionary with all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`).
"""
return dict((x, getattr(self, x)) for x in self.fields())
# Field definitions.
title = MediaField(
MP3StorageStyle('TIT2'),
MP4StorageStyle('\xa9nam'),
StorageStyle('TITLE'),
ASFStorageStyle('Title'),
)
artist = MediaField(
MP3StorageStyle('TPE1'),
MP4StorageStyle('\xa9ART'),
StorageStyle('ARTIST'),
ASFStorageStyle('Author'),
)
artists = ListMediaField(
MP3ListDescStorageStyle(desc=u'ARTISTS'),
MP4ListStorageStyle('----:com.apple.iTunes:ARTISTS'),
ListStorageStyle('ARTISTS'),
ASFStorageStyle('WM/ARTISTS'),
)
album = MediaField(
MP3StorageStyle('TALB'),
MP4StorageStyle('\xa9alb'),
StorageStyle('ALBUM'),
ASFStorageStyle('WM/AlbumTitle'),
)
genres = ListMediaField(
MP3ListStorageStyle('TCON'),
MP4ListStorageStyle('\xa9gen'),
ListStorageStyle('GENRE'),
ASFStorageStyle('WM/Genre'),
)
genre = genres.single_field()
lyricist = MediaField(
MP3StorageStyle('TEXT'),
MP4StorageStyle('----:com.apple.iTunes:LYRICIST'),
StorageStyle('LYRICIST'),
ASFStorageStyle('WM/Writer'),
)
composer = MediaField(
MP3StorageStyle('TCOM'),
MP4StorageStyle('\xa9wrt'),
StorageStyle('COMPOSER'),
ASFStorageStyle('WM/Composer'),
)
composer_sort = MediaField(
MP3StorageStyle('TSOC'),
MP4StorageStyle('soco'),
StorageStyle('COMPOSERSORT'),
ASFStorageStyle('WM/Composersortorder'),
)
arranger = MediaField(
MP3PeopleStorageStyle('TIPL', involvement='arranger'),
MP4StorageStyle('----:com.apple.iTunes:Arranger'),
StorageStyle('ARRANGER'),
ASFStorageStyle('beets/Arranger'),
)
grouping = MediaField(
MP3StorageStyle('TIT1'),
MP4StorageStyle('\xa9grp'),
StorageStyle('GROUPING'),
ASFStorageStyle('WM/ContentGroupDescription'),
)
track = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=0),
MP4TupleStorageStyle('trkn', index=0),
StorageStyle('TRACK'),
StorageStyle('TRACKNUMBER'),
ASFStorageStyle('WM/TrackNumber'),
out_type=int,
)
tracktotal = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=1),
MP4TupleStorageStyle('trkn', index=1),
StorageStyle('TRACKTOTAL'),
StorageStyle('TRACKC'),
StorageStyle('TOTALTRACKS'),
ASFStorageStyle('TotalTracks'),
out_type=int,
)
disc = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=0),
MP4TupleStorageStyle('disk', index=0),
StorageStyle('DISC'),
StorageStyle('DISCNUMBER'),
ASFStorageStyle('WM/PartOfSet'),
out_type=int,
)
disctotal = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=1),
MP4TupleStorageStyle('disk', index=1),
StorageStyle('DISCTOTAL'),
StorageStyle('DISCC'),
StorageStyle('TOTALDISCS'),
ASFStorageStyle('TotalDiscs'),
out_type=int,
)
url = MediaField(
MP3DescStorageStyle(key='WXXX', attr='url', multispec=False),
MP4StorageStyle('\xa9url'),
StorageStyle('URL'),
ASFStorageStyle('WM/URL'),
)
lyrics = MediaField(
MP3DescStorageStyle(key='USLT', multispec=False),
MP4StorageStyle('\xa9lyr'),
StorageStyle('LYRICS'),
ASFStorageStyle('WM/Lyrics'),
)
comments = MediaField(
MP3DescStorageStyle(key='COMM'),
MP4StorageStyle('\xa9cmt'),
StorageStyle('DESCRIPTION'),
StorageStyle('COMMENT'),
ASFStorageStyle('WM/Comments'),
ASFStorageStyle('Description')
)
copyright = MediaField(
MP3StorageStyle('TCOP'),
MP4StorageStyle('cprt'),
StorageStyle('COPYRIGHT'),
ASFStorageStyle('Copyright'),
)
bpm = MediaField(
MP3StorageStyle('TBPM'),
MP4StorageStyle('tmpo', as_type=int),
StorageStyle('BPM'),
ASFStorageStyle('WM/BeatsPerMinute'),
out_type=int,
)
comp = MediaField(
MP3StorageStyle('TCMP'),
MP4BoolStorageStyle('cpil'),
StorageStyle('COMPILATION'),
ASFStorageStyle('WM/IsCompilation', as_type=bool),
out_type=bool,
)
albumartist = MediaField(
MP3StorageStyle('TPE2'),
MP4StorageStyle('aART'),
StorageStyle('ALBUM ARTIST'),
StorageStyle('ALBUMARTIST'),
ASFStorageStyle('WM/AlbumArtist'),
)
albumartists = ListMediaField(
MP3ListDescStorageStyle(desc=u'ALBUMARTISTS'),
MP4ListStorageStyle('----:com.apple.iTunes:ALBUMARTISTS'),
ListStorageStyle('ALBUMARTISTS'),
ASFStorageStyle('WM/AlbumArtists'),
)
albumtype = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Type'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Type'),
StorageStyle('RELEASETYPE'),
StorageStyle('MUSICBRAINZ_ALBUMTYPE'),
ASFStorageStyle('MusicBrainz/Album Type'),
)
label = MediaField(
MP3StorageStyle('TPUB'),
MP4StorageStyle('----:com.apple.iTunes:LABEL'),
MP4StorageStyle('----:com.apple.iTunes:publisher'),
MP4StorageStyle('----:com.apple.iTunes:Label', read_only=True),
StorageStyle('LABEL'),
StorageStyle('PUBLISHER'), # Traktor
ASFStorageStyle('WM/Publisher'),
)
artist_sort = MediaField(
MP3StorageStyle('TSOP'),
MP4StorageStyle('soar'),
StorageStyle('ARTISTSORT'),
ASFStorageStyle('WM/ArtistSortOrder'),
)
albumartist_sort = MediaField(
MP3DescStorageStyle(u'ALBUMARTISTSORT'),
MP4StorageStyle('soaa'),
StorageStyle('ALBUMARTISTSORT'),
ASFStorageStyle('WM/AlbumArtistSortOrder'),
)
asin = MediaField(
MP3DescStorageStyle(u'ASIN'),
MP4StorageStyle('----:com.apple.iTunes:ASIN'),
StorageStyle('ASIN'),
ASFStorageStyle('MusicBrainz/ASIN'),
)
catalognum = MediaField(
MP3DescStorageStyle(u'CATALOGNUMBER'),
MP4StorageStyle('----:com.apple.iTunes:CATALOGNUMBER'),
StorageStyle('CATALOGNUMBER'),
ASFStorageStyle('WM/CatalogNo'),
)
barcode = MediaField(
MP3DescStorageStyle(u'BARCODE'),
MP4StorageStyle('----:com.apple.iTunes:BARCODE'),
StorageStyle('BARCODE'),
StorageStyle('UPC', read_only=True),
StorageStyle('EAN/UPN', read_only=True),
StorageStyle('EAN', read_only=True),
StorageStyle('UPN', read_only=True),
ASFStorageStyle('WM/Barcode'),
)
isrc = MediaField(
MP3StorageStyle(u'TSRC'),
MP4StorageStyle('----:com.apple.iTunes:ISRC'),
StorageStyle('ISRC'),
ASFStorageStyle('WM/ISRC'),
)
disctitle = MediaField(
MP3StorageStyle('TSST'),
MP4StorageStyle('----:com.apple.iTunes:DISCSUBTITLE'),
StorageStyle('DISCSUBTITLE'),
ASFStorageStyle('WM/SetSubTitle'),
)
encoder = MediaField(
MP3StorageStyle('TENC'),
MP4StorageStyle('\xa9too'),
StorageStyle('ENCODEDBY'),
StorageStyle('ENCODER'),
ASFStorageStyle('WM/EncodedBy'),
)
script = MediaField(
MP3DescStorageStyle(u'Script'),
MP4StorageStyle('----:com.apple.iTunes:SCRIPT'),
StorageStyle('SCRIPT'),
ASFStorageStyle('WM/Script'),
)
language = MediaField(
MP3StorageStyle('TLAN'),
MP4StorageStyle('----:com.apple.iTunes:LANGUAGE'),
StorageStyle('LANGUAGE'),
ASFStorageStyle('WM/Language'),
)
country = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Release Country'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz '
'Album Release Country'),
StorageStyle('RELEASECOUNTRY'),
ASFStorageStyle('MusicBrainz/Album Release Country'),
)
albumstatus = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Status'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Status'),
StorageStyle('RELEASESTATUS'),
StorageStyle('MUSICBRAINZ_ALBUMSTATUS'),
ASFStorageStyle('MusicBrainz/Album Status'),
)
media = MediaField(
MP3StorageStyle('TMED'),
MP4StorageStyle('----:com.apple.iTunes:MEDIA'),
StorageStyle('MEDIA'),
ASFStorageStyle('WM/Media'),
)
albumdisambig = MediaField(
# This tag mapping was invented for beets (not used by Picard, etc).
MP3DescStorageStyle(u'MusicBrainz Album Comment'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Comment'),
StorageStyle('MUSICBRAINZ_ALBUMCOMMENT'),
ASFStorageStyle('MusicBrainz/Album Comment'),
)
# Release date.
date = DateField(
MP3StorageStyle('TDRC'),
MP4StorageStyle('\xa9day'),
StorageStyle('DATE'),
ASFStorageStyle('WM/Year'),
year=(StorageStyle('YEAR'),))
year = date.year_field()
month = date.month_field()
day = date.day_field()
# *Original* release date.
original_date = DateField(
MP3StorageStyle('TDOR'),
MP4StorageStyle('----:com.apple.iTunes:ORIGINAL YEAR'),
StorageStyle('ORIGINALDATE'),
ASFStorageStyle('WM/OriginalReleaseYear'))
original_year = original_date.year_field()
original_month = original_date.month_field()
original_day = original_date.day_field()
# Nonstandard metadata.
artist_credit = MediaField(
MP3DescStorageStyle(u'Artist Credit'),
MP4StorageStyle('----:com.apple.iTunes:Artist Credit'),
StorageStyle('ARTIST_CREDIT'),
ASFStorageStyle('beets/Artist Credit'),
)
albumartist_credit = MediaField(
MP3DescStorageStyle(u'Album Artist Credit'),
MP4StorageStyle('----:com.apple.iTunes:Album Artist Credit'),
StorageStyle('ALBUMARTIST_CREDIT'),
ASFStorageStyle('beets/Album Artist Credit'),
)
# Legacy album art field
art = CoverArtField()
# Image list
images = ImageListField()
# MusicBrainz IDs.
mb_trackid = MediaField(
MP3UFIDStorageStyle(owner='http://musicbrainz.org'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Track Id'),
StorageStyle('MUSICBRAINZ_TRACKID'),
ASFStorageStyle('MusicBrainz/Track Id'),
)
mb_releasetrackid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Release Track Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Release Track Id'),
StorageStyle('MUSICBRAINZ_RELEASETRACKID'),
ASFStorageStyle('MusicBrainz/Release Track Id'),
)
mb_workid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Work Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Work Id'),
StorageStyle('MUSICBRAINZ_WORKID'),
ASFStorageStyle('MusicBrainz/Work Id'),
)
mb_albumid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Id'),
StorageStyle('MUSICBRAINZ_ALBUMID'),
ASFStorageStyle('MusicBrainz/Album Id'),
)
mb_artistids = ListMediaField(
MP3ListDescStorageStyle(u'MusicBrainz Artist Id', split_v23=True),
MP4ListStorageStyle('----:com.apple.iTunes:MusicBrainz Artist Id'),
ListStorageStyle('MUSICBRAINZ_ARTISTID'),
ASFStorageStyle('MusicBrainz/Artist Id'),
)
mb_artistid = mb_artistids.single_field()
mb_albumartistids = ListMediaField(
MP3ListDescStorageStyle(
u'MusicBrainz Album Artist Id',
split_v23=True,
),
MP4ListStorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Artist Id',
),
ListStorageStyle('MUSICBRAINZ_ALBUMARTISTID'),
ASFStorageStyle('MusicBrainz/Album Artist Id'),
)
mb_albumartistid = mb_albumartistids.single_field()
mb_releasegroupid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Release Group Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Release Group Id'),
StorageStyle('MUSICBRAINZ_RELEASEGROUPID'),
ASFStorageStyle('MusicBrainz/Release Group Id'),
)
# Acoustid fields.
acoustid_fingerprint = MediaField(
MP3DescStorageStyle(u'Acoustid Fingerprint'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Fingerprint'),
StorageStyle('ACOUSTID_FINGERPRINT'),
ASFStorageStyle('Acoustid/Fingerprint'),
)
acoustid_id = MediaField(
MP3DescStorageStyle(u'Acoustid Id'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Id'),
StorageStyle('ACOUSTID_ID'),
ASFStorageStyle('Acoustid/Id'),
)
# ReplayGain fields.
rg_track_gain = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=u' dB'
),
MP3DescStorageStyle(
u'replaygain_track_gain',
float_places=2, suffix=u' dB'
),
MP3SoundCheckStorageStyle(
key='COMM',
index=0, desc=u'iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_gain',
float_places=2, suffix=' dB'
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=0
),
StorageStyle(
u'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=u' dB'
),
ASFStorageStyle(
u'replaygain_track_gain',
float_places=2, suffix=u' dB'
),
out_type=float
)
rg_album_gain = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=u' dB'
),
MP3DescStorageStyle(
u'replaygain_album_gain',
float_places=2, suffix=u' dB'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_album_gain',
float_places=2, suffix=' dB'
),
StorageStyle(
u'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=u' dB'
),
ASFStorageStyle(
u'replaygain_album_gain',
float_places=2, suffix=u' dB'
),
out_type=float
)
rg_track_peak = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_TRACK_PEAK',
float_places=6
),
MP3DescStorageStyle(
u'replaygain_track_peak',
float_places=6
),
MP3SoundCheckStorageStyle(
key=u'COMM',
index=1, desc=u'iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_peak',
float_places=6
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=1
),
StorageStyle(u'REPLAYGAIN_TRACK_PEAK', float_places=6),
ASFStorageStyle(u'replaygain_track_peak', float_places=6),
out_type=float,
)
rg_album_peak = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_ALBUM_PEAK',
float_places=6
),
MP3DescStorageStyle(
u'replaygain_album_peak',
float_places=6
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_album_peak',
float_places=6
),
StorageStyle(u'REPLAYGAIN_ALBUM_PEAK', float_places=6),
ASFStorageStyle(u'replaygain_album_peak', float_places=6),
out_type=float,
)
# EBU R128 fields.
r128_track_gain = QNumberField(
8,
MP3DescStorageStyle(
u'R128_TRACK_GAIN'
),
MP4StorageStyle(
'----:com.apple.iTunes:R128_TRACK_GAIN'
),
StorageStyle(
u'R128_TRACK_GAIN'
),
ASFStorageStyle(
u'R128_TRACK_GAIN'
),
)
r128_album_gain = QNumberField(
8,
MP3DescStorageStyle(
u'R128_ALBUM_GAIN'
),
MP4StorageStyle(
'----:com.apple.iTunes:R128_ALBUM_GAIN'
),
StorageStyle(
u'R128_ALBUM_GAIN'
),
ASFStorageStyle(
u'R128_ALBUM_GAIN'
),
)
initial_key = MediaField(
MP3StorageStyle('TKEY'),
MP4StorageStyle('----:com.apple.iTunes:initialkey'),
StorageStyle('INITIALKEY'),
ASFStorageStyle('INITIALKEY'),
)
@property
def length(self):
"""The duration of the audio in seconds (a float)."""
return self.mgfile.info.length
@property
def samplerate(self):
"""The audio's sample rate (an int)."""
if hasattr(self.mgfile.info, 'sample_rate'):
return self.mgfile.info.sample_rate
elif self.type == 'opus':
# Opus is always 48kHz internally.
return 48000
return 0
@property
def bitdepth(self):
"""The number of bits per sample in the audio encoding (an int).
Only available for certain file formats (zero where
unavailable).
"""
if hasattr(self.mgfile.info, 'bits_per_sample'):
return self.mgfile.info.bits_per_sample
return 0
@property
def channels(self):
"""The number of channels in the audio (an int)."""
if hasattr(self.mgfile.info, 'channels'):
return self.mgfile.info.channels
return 0
@property
def bitrate(self):
"""The number of bits per seconds used in the audio coding (an
int). If this is provided explicitly by the compressed file
format, this is a precise reflection of the encoding. Otherwise,
it is estimated from the on-disk file size. In this case, some
imprecision is possible because the file header is incorporated
in the file size.
"""
if hasattr(self.mgfile.info, 'bitrate') and self.mgfile.info.bitrate:
# Many formats provide it explicitly.
return self.mgfile.info.bitrate
else:
# Otherwise, we calculate bitrate from the file size. (This
# is the case for all of the lossless formats.)
if not self.length:
# Avoid division by zero if length is not available.
return 0
return int(self.filesize * 8 / self.length)
@property
def bitrate_mode(self):
"""The mode of the bitrate used in the audio coding
(a string, eg. "CBR", "VBR" or "ABR").
Only available for the MP3 file format (empty where unavailable).
"""
if hasattr(self.mgfile.info, 'bitrate_mode'):
return {
mutagen.mp3.BitrateMode.CBR: 'CBR',
mutagen.mp3.BitrateMode.VBR: 'VBR',
mutagen.mp3.BitrateMode.ABR: 'ABR',
}.get(self.mgfile.info.bitrate_mode, '')
else:
return ''
@property
def encoder_info(self):
"""The name and/or version of the encoder used
(a string, eg. "LAME 3.97.0").
Only available for some formats (empty where unavailable).
"""
if hasattr(self.mgfile.info, 'encoder_info'):
return self.mgfile.info.encoder_info
else:
return ''
@property
def encoder_settings(self):
"""A guess of the settings used for the encoder (a string, eg. "-V2").
Only available for the MP3 file format (empty where unavailable).
"""
if hasattr(self.mgfile.info, 'encoder_settings'):
return self.mgfile.info.encoder_settings
else:
return ''
@property
def format(self):
"""A string describing the file format/codec."""
return TYPES[self.type]
|
import asyncio
import contextlib
import copy
import datetime
import io
import logging
import discord
import lifesaver
from discord.ext import commands
from lifesaver.utils import pluralize
from ruamel.yaml import YAML
from dog.formatting import represent
from .converters import UserReference
from .keeper import Keeper
log = logging.getLogger(__name__)
def require_configuration():
def predicate(ctx):
if not ctx.cog.gatekeeper_config(ctx.guild):
raise commands.CheckFailure(
"Gatekeeper must be configured to use this command."
)
return True
return commands.check(predicate)
class Gatekeeper(lifesaver.Cog):
def __init__(self, bot):
super().__init__(bot)
self.yaml = YAML()
self.keepers = {}
async def cog_check(self, ctx: lifesaver.Context):
if not ctx.guild:
raise commands.NoPrivateMessage()
if not ctx.bot.guild_configs.can_edit(ctx.author, ctx.guild):
raise commands.CheckFailure("You aren't allowed to manage Gatekeeper.")
return True
@property
def dashboard_link(self):
return self.bot.config.dashboard_link
def gatekeeper_config(self, guild: discord.Guild):
"""Return Gatekeeper gatekeeper_config for a guild."""
config = self.bot.guild_configs.get(guild, {})
return config.get("gatekeeper", {})
def keeper(self, guild: discord.Guild) -> Keeper:
"""Return a long-lived Keeper instance for a guild.
The Keeper instance is preserved in memory to contain state and other
associated information.
"""
alive_keeper = self.keepers.get(guild.id)
if alive_keeper is not None:
return alive_keeper
# create a new keeper instance for the guild
config = self.gatekeeper_config(guild)
log.debug("creating a new keeper for guild %d (config=%r)", guild.id, config)
keeper = Keeper(guild, config, bot=self.bot)
self.keepers[guild.id] = keeper
return keeper
@lifesaver.Cog.listener()
async def on_guild_config_edit(self, guild: discord.Guild, config):
if guild.id not in self.keepers:
log.debug("received config edit for keeperless guild %d", guild.id)
return
log.debug("updating keeper config for guild %d", guild.id)
self.keepers[guild.id].update_config(config.get("gatekeeper", {}))
@contextlib.asynccontextmanager
async def edit_config(self, guild: discord.Guild):
config = self.bot.guild_configs.get(guild, {})
copied_gatekeeper_config = copy.deepcopy(config["gatekeeper"])
yield copied_gatekeeper_config
with io.StringIO() as buffer:
self.yaml.indent(mapping=4, sequence=6, offset=4)
self.yaml.dump({**config, "gatekeeper": copied_gatekeeper_config,}, buffer)
await self.bot.guild_configs.write(guild, buffer.getvalue())
def is_being_allowed(self, guild: discord.Guild, user) -> bool:
"""Return whether a user is being specifically allowed."""
return user in self.gatekeeper_config(guild).get("allowed_users", [])
async def allow_user(self, guild: discord.Guild, user):
"""Allow a user to bypass checks in a guild."""
async with self.edit_config(guild) as config:
allowed_users = config.get("allowed_users", [])
# have to do manual replacement in case we get []
config["allowed_users"] = allowed_users + [user]
async def disallow_user(self, guild: discord.Guild, user):
"""Disallow a user to bypass checks in a guild."""
async with self.edit_config(guild) as config:
allowed_users = config.get("allowed_users", [])
allowed_users.remove(user)
@lifesaver.Cog.listener()
async def on_member_join(self, member: discord.Member):
await self.bot.wait_until_ready()
config = self.gatekeeper_config(member.guild)
if not config.get("enabled", False):
return
# fetch the keeper instance for this guild, which manages gatekeeping,
# check processing, and all of that good stuff.
keeper = self.keeper(member.guild)
overridden = config.get("allowed_users", [])
is_whitelisted = str(member) in overridden or member.id in overridden
if not is_whitelisted:
# this function will do the reporting for us if the user fails any
# checks.
is_allowed = await keeper.check(member)
if not is_allowed:
return
if config.get("quiet", False):
return
embed = discord.Embed(
color=discord.Color.green(),
title=f"{represent(member)} has joined",
description="This user has passed all Gatekeeper checks.",
)
if is_whitelisted:
embed.description = (
"This user has been specifically allowed into this server."
)
embed.set_thumbnail(url=member.avatar_url)
embed.timestamp = datetime.datetime.utcnow()
await keeper.report(embed=embed)
@lifesaver.group(aliases=["gk"], hollow=True)
async def gatekeeper(self, ctx: lifesaver.Context):
"""
Manages Gatekeeper.
Gatekeeper is an advanced mechanism of Dogbot that allows you to screen member joins in realtime,
and automatically kick those who don't fit a certain criteria. Only users who can ban can use it.
This is very useful when your server is undergoing raids, unwanted attention, unwanted members, etc.
"""
@gatekeeper.command(name="disallow", aliases=["deallow", "unallow", "unwhitelist"])
@require_configuration()
async def command_disallow(self, ctx: lifesaver.Context, *, user: UserReference):
"""Remove a user from the allowed users list."""
try:
await self.disallow_user(ctx.guild, user)
except ValueError:
await ctx.send(f"{ctx.tick(False)} That user isn't being allowed.")
else:
await ctx.send(f"{ctx.tick()} Disallowed `{user}`.")
@gatekeeper.group(name="allow", aliases=["whitelist"], invoke_without_command=True)
@require_configuration()
async def group_allow(self, ctx: lifesaver.Context, *, user: UserReference):
"""Add a user to the allowed users list.
This will add the user to the allowed_users key in the configuration,
allowing them to bypass checks.
"""
if self.is_being_allowed(ctx.guild, user):
await ctx.send(f"{ctx.tick(False)} That user is already being allowed.")
return
await self.allow_user(ctx.guild, user)
await ctx.send(f"{ctx.tick()} Allowed `{user}`.")
@group_allow.command(name="temp")
@require_configuration()
async def command_allow_temp(
self, ctx: lifesaver.Context, duration: int, *, user: UserReference
):
"""Temporarily allows a user to join for n minutes."""
if duration > 60 * 24:
raise commands.BadArgument("The maximum time is 1 day.")
if duration < 1:
raise commands.BadArgument("Invalid duration.")
await self.allow_user(ctx.guild, user)
minutes = pluralize(minute=duration)
await ctx.send(f"{ctx.tick()} Temporarily allowing `{user}` for {minutes}.")
await asyncio.sleep(duration * 60)
try:
await self.disallow_user(ctx.guild, user)
except ValueError:
# was manually removed from allowed_users... by an admin?
pass
@gatekeeper.command(name="lockdown", aliases=["ld"])
@require_configuration()
async def command_lockdown(self, ctx: lifesaver.Context, *, enabled: bool = True):
"""Enables block_all.
You can also provide "on" or "off" to the command to manually enable
or disable the check as desired.
"""
async with self.edit_config(ctx.guild) as config:
checks = config.get("checks", {})
checks["block_all"] = {"enabled": enabled}
config["checks"] = checks
status = "enabled" if enabled else "disabled"
await ctx.send(f"{ctx.tick()} `block_all` is now {status}.")
@gatekeeper.command(name="enable", aliases=["on"])
@require_configuration()
async def command_enable(self, ctx: lifesaver.Context):
"""Enables Gatekeeper."""
async with self.edit_config(ctx.guild) as config:
config["enabled"] = True
await ctx.send(f"{ctx.tick()} Enabled Gatekeeper.")
@gatekeeper.command(name="disable", aliases=["off"])
@require_configuration()
async def command_disable(self, ctx: lifesaver.Context):
"""Disables Gatekeeper."""
async with self.edit_config(ctx.guild) as config:
config["enabled"] = False
await ctx.send(f"{ctx.tick()} Disabled Gatekeeper.")
@gatekeeper.command(name="toggle", aliases=["flip"])
@require_configuration()
async def command_toggle(self, ctx: lifesaver.Context):
"""Toggles Gatekeeper."""
async with self.edit_config(ctx.guild) as config:
config["enabled"] = not config["enabled"]
state = "enabled" if config["enabled"] else "disabled"
await ctx.send(f"{ctx.tick()} Gatekeeper is now {state}.")
@gatekeeper.command(name="status")
@require_configuration()
async def command_status(self, ctx: lifesaver.Context):
"""Views the current status of Gatekeeper."""
enabled = self.gatekeeper_config(ctx.guild).get("enabled", False)
if enabled:
description = "Incoming members must pass Gatekeeper checks to join."
else:
description = "Anyone can join."
link = f"{self.dashboard_link}/guilds/{ctx.guild.id}"
description += f"\n\nUse [the web dashboard]({link}) to configure gatekeeper."
if enabled:
color = discord.Color.green()
else:
color = discord.Color.red()
embed = discord.Embed(
color=color,
title=f'Gatekeeper is {"on" if enabled else "off"}.',
description=description,
)
await ctx.send(embed=embed)
|
from __future__ import absolute_import # Python 2 only
import os
import os.path
import functools
import warnings
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.urlresolvers import reverse
from django.conf import settings
from django.template import loader, RequestContext
from django.utils.html import mark_safe
from jinja2 import Environment
import jinja2.runtime
from jinja2.runtime import Context
from unipath import Path
from .query import QueryFinder, more_like_this, get_document, when
from .filters import selected_filters_for_field, is_filter_selected
from .templates import get_date_string
from .middleware import get_request
from flags.template_functions import flag_enabled, flag_disabled
PERMALINK_REGISTRY = {}
default_app_config = 'sheerlike.apps.SheerlikeConfig'
def register_permalink(sheer_type, url_pattern_name):
PERMALINK_REGISTRY[sheer_type] = url_pattern_name
def global_render_template(name, **kwargs):
request = get_request()
context = RequestContext(request, kwargs or None)
template = loader.get_template(name, using='wagtail-env')
return mark_safe(template.render(context.flatten()))
def url_for(app, filename, site_slug=None):
if app == 'static' and not site_slug:
return staticfiles_storage.url(filename)
elif app == 'static':
return staticfiles_storage.url(site_slug + '/static/' + filename)
else:
raise ValueError("url_for doesn't know about %s" % app)
class SheerlikeContext(Context):
def __init__(self, environment, parent, name, blocks):
super(
SheerlikeContext,
self).__init__(
environment,
parent,
name,
blocks)
try:
self.vars['request'] = get_request()
except:
pass
jinja2.runtime.Context = SheerlikeContext
class SheerlikeEnvironment(Environment):
def join_path(self, template, parent):
dirname = os.path.dirname(parent)
segments = dirname.split('/')
paths = []
collected = ''
for segment in segments:
collected += segment + '/'
paths.insert(0, collected[:])
for p in paths:
relativepath = os.path.join(p, template)
for search in self.loader.searchpath:
filesystem_path = os.path.join(search, relativepath)
if os.path.exists(filesystem_path):
return relativepath
return template
def environment(**options):
queryfinder = QueryFinder()
options.setdefault('extensions', []).append('jinja2.ext.do')
site_slug = options.get('site_slug')
if site_slug:
del options['site_slug']
env = SheerlikeEnvironment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url_for': functools.partial(url_for, site_slug=site_slug),
'url': reverse,
'queries': queryfinder,
'more_like_this': more_like_this,
'get_document': get_document,
'selected_filters_for_field': selected_filters_for_field,
'is_filter_selected': is_filter_selected,
'when': when,
'flag_enabled': flag_enabled,
'flag_disabled': flag_disabled,
'global_include': global_render_template,
})
env.filters.update({
'date': get_date_string
})
return env
|
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x05\xc3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x18\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x47\x49\x53\x20\x69\x63\x6f\x6e\
\x20\x74\x68\x65\x6d\x65\x20\x30\x2e\x32\xee\x53\xa0\xa0\x00\x00\
\x00\x18\x74\x45\x58\x74\x41\x75\x74\x68\x6f\x72\x00\x52\x6f\x62\
\x65\x72\x74\x20\x53\x7a\x63\x7a\x65\x70\x61\x6e\x65\x6b\x5f\x56\
\xb1\x08\x00\x00\x00\x27\x74\x45\x58\x74\x44\x65\x73\x63\x72\x69\
\x70\x74\x69\x6f\x6e\x00\x68\x74\x74\x70\x3a\x2f\x2f\x72\x6f\x62\
\x65\x72\x74\x2e\x73\x7a\x63\x7a\x65\x70\x61\x6e\x65\x6b\x2e\x70\
\x6c\x90\x59\x48\x60\x00\x00\x00\x18\x74\x45\x58\x74\x43\x72\x65\
\x61\x74\x69\x6f\x6e\x20\x54\x69\x6d\x65\x00\x32\x30\x30\x38\x2d\
\x31\x32\x2d\x31\x32\x58\x2e\x3b\xbf\x00\x00\x00\x52\x74\x45\x58\
\x74\x43\x6f\x70\x79\x72\x69\x67\x68\x74\x00\x43\x43\x20\x41\x74\
\x74\x72\x69\x62\x75\x74\x69\x6f\x6e\x2d\x53\x68\x61\x72\x65\x41\
\x6c\x69\x6b\x65\x20\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\
\x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\
\x6c\x69\x63\x65\x6e\x73\x65\x73\x2f\x62\x79\x2d\x73\x61\x2f\x33\
\x2e\x30\x2f\x5e\x83\x5a\xbc\x00\x00\x04\x43\x49\x44\x41\x54\x48\
\x89\x95\x95\x5f\x68\x14\x57\x18\xc5\x7f\x33\xb3\x3b\xbb\x6e\x66\
\x56\x8b\x26\x26\x62\x93\x62\xa9\x25\xab\x25\x62\x31\x6e\x1b\x13\
\xb0\x25\x11\x8b\x6d\x22\xc5\x3f\x49\xa0\x11\xa5\x91\xa2\x46\x05\
\xcd\x4a\x2a\x95\x34\xad\xd4\xb5\x50\xb4\x52\xad\x0f\x01\x05\xab\
\x96\x56\xfa\x27\x85\x6a\xdd\x62\x8c\xd2\xee\x53\xea\x53\xeb\x9a\
\x28\xb4\x86\x8d\x21\x68\xb2\x99\xac\xd9\x3f\x33\xb7\x0f\xba\x71\
\xb3\xd9\x34\xf6\xc0\x7d\x98\x39\xe7\x7e\xe7\x7e\xdf\x99\xcb\x48\
\xeb\x36\xbc\xfd\x23\x12\x05\x4c\x07\x41\xf8\xeb\x73\xdf\xbc\x39\
\xad\x2e\x03\x36\x24\x0a\x36\xd6\x6d\x40\xcf\xd1\x01\x18\x19\x1d\
\xe1\xdc\x57\xe7\xc9\xf2\x6e\xfa\x43\x64\x35\x80\xf1\x42\xd1\xb1\
\xc4\x9c\x78\x52\xc9\x17\x92\xad\x5f\xcf\xd1\x07\x53\xa2\x14\x3f\
\x15\xd6\x6c\x6f\xdd\x8c\x69\xb6\xa0\x28\x07\x3b\x8e\x1d\x68\x4f\
\xe7\xe4\xf4\x87\x87\xb1\x44\xfe\x9c\xdc\x5c\x87\x90\x94\xff\x77\
\x5a\xd3\x3c\xb0\xb7\x79\xef\xf3\x98\x66\x4b\x26\x25\xc3\xa3\x11\
\x00\xcc\x70\xd8\xfb\x6f\xdf\xb9\x03\x20\x45\x8c\x88\x94\x12\xa5\
\xf8\x6c\x58\xb9\xb2\xd5\x96\x30\x93\xb6\x43\x9f\x1c\x02\x45\x39\
\x98\xc9\xdb\x10\x84\xd3\xe6\x3b\x98\xc0\x5e\xe4\x74\x3a\x94\xb3\
\x67\xbf\x9d\x27\x63\xf6\x8d\x2b\x05\xe1\x6c\x06\x4a\x91\xf1\xe1\
\xfc\xf9\xcf\xcd\x0c\xdd\x0c\xd1\x75\xe6\x48\xfb\x24\x81\x10\x62\
\xc2\x5a\x51\xd7\x24\x2e\x87\xee\x8b\xca\x2d\xbe\xd1\xb2\xfa\x1d\
\xde\x4c\x3e\x7d\x95\xd5\xef\xf0\x56\x6e\xf1\x8d\x5e\x0e\xdd\x17\
\x2b\xea\x9a\x44\x36\xcd\x84\x0c\xd6\x6c\x6f\xdd\xac\x6b\x3a\x3d\
\xbd\xbd\xd4\xd6\xd6\xba\x9c\x76\xc7\x85\x95\xeb\xb7\x69\x59\x47\
\xb3\x7e\x9b\xe6\xb4\x3b\x2e\xd4\xd6\xd6\xba\x7a\x7a\x7b\xd1\x35\
\xfd\x51\xd8\x19\x98\x60\x80\x69\xb6\xf8\xf6\xf9\xe8\xea\xbc\x8a\
\xc7\xe3\xa1\xa4\x64\xc9\x2c\x45\xcf\x39\x91\x75\x34\x7a\xce\x89\
\x92\x92\x25\xb3\x3c\x1e\x0f\x5d\x9d\x57\xf1\xed\xf3\x31\x65\xc8\
\x4f\x76\x29\x07\xfd\x7e\xff\xb0\xee\xd6\x92\x00\x35\x6b\x6b\x66\
\x38\x1d\x8e\x9a\x8a\xfa\x9d\xd5\xe9\xb2\x8a\xfa\x9d\xd5\x4e\x87\
\xa3\xa6\x66\x6d\xcd\x0c\x00\xdd\xad\x25\xfd\x7e\xff\x70\xb6\x90\
\x27\x18\x74\x1c\x3b\xd0\x3e\x34\x3a\xb8\x30\x14\xba\x15\x37\x0c\
\x03\x55\x55\x69\x68\xd8\x94\x63\x57\xd5\x53\xde\xba\x5d\x73\x01\
\xbc\x75\xbb\xe6\xda\x55\xf5\x54\x43\xc3\xa6\x1c\x55\x55\x31\x0c\
\x83\x50\xe8\x56\x7c\x68\x74\x70\x61\xe6\x1d\x98\xdc\x01\x70\xfd\
\xf4\xf1\x01\x59\x92\xdb\x03\x81\x40\x0c\xa0\xb0\xb0\x90\x8a\x8a\
\x72\x97\xee\x74\x9c\x97\x24\x49\xd2\x9d\x8e\xf3\x15\x15\xe5\xae\
\xc2\xc2\x42\x00\x02\x81\x40\x4c\x96\xe4\xf6\xeb\xa7\x8f\x0f\x64\
\x1b\xe5\x24\x03\x80\x68\x32\xd2\x16\x0c\x06\x4d\xc3\x30\x00\xa8\
\xaa\x5a\x65\xd7\xdd\x6e\xef\x1b\x5b\xdf\x1f\xd5\xdd\x6e\x6f\x55\
\xd5\x2a\x3b\x80\x61\x18\x04\x83\x41\x33\x9a\x8c\xb4\x65\xab\x03\
\x20\x09\x21\xb2\x12\x95\x9b\x7d\x9f\x2f\x2b\x5d\xf6\x6e\x75\x75\
\xb5\x03\xe0\xe8\x91\xa3\x34\x6e\x6d\xe4\xe4\x97\x27\xd9\xb1\xe9\
\x15\xac\xdb\x1f\x11\x1b\xf8\x4d\x08\xf3\x61\xd4\x61\x8b\x45\x26\
\x57\xa6\xdb\x82\xd6\x29\x0d\xca\xde\x79\x2f\xcf\xa9\x68\x77\x5a\
\x5a\x5a\x5c\x9a\xa6\xf1\xc7\x8d\x1b\x5c\xeb\xea\x62\x45\x79\x39\
\x2f\x59\x1f\x40\xb4\x07\x8b\x5c\xa1\xbb\xe4\x01\xc5\xe6\xb4\x8a\
\xab\x7e\xe1\xcf\x4b\x95\x08\x73\x0c\xcb\x1c\x23\x3a\xd2\x83\x95\
\x18\xea\x9e\xd2\x20\x5b\x17\x29\x98\x9d\x45\xd8\x67\xbe\x2c\x14\
\x49\x89\x3a\xed\x56\x04\xe0\xc5\xd7\x7f\xe0\x66\xe0\xad\x27\x1a\
\x33\x4a\x24\x1c\xc8\x9e\x41\x0a\x99\x59\xa4\x60\x59\x16\xda\xdc\
\xd5\xbc\xf0\xea\xa7\x86\x96\x57\xca\xbc\xc5\x7b\x00\x98\xb7\x78\
\x0f\x5a\x5e\x29\x45\xa5\x9f\x31\xa7\x68\x1d\x30\x45\xc8\x29\x64\
\x7e\x51\x29\x24\xe2\x31\xf1\x70\xe0\x52\x74\xf8\x9f\xef\xad\xfc\
\x45\xcd\xb8\x66\x2f\x65\xa8\xef\x67\x5c\xb3\x97\x92\xbf\xa8\x99\
\x07\x7f\x7f\xc7\x83\xbb\x3f\x4d\x6f\x90\xad\x0b\xc3\x30\x88\x27\
\x12\xa8\x76\xcb\x88\xf4\x77\x82\x10\x8c\xdc\xbb\xc6\xbd\xbf\xbe\
\x60\xe4\xde\x35\x10\x82\x48\x7f\xe7\xf8\xfe\x69\x0d\x32\xbb\x08\
\x04\x02\x31\xbb\x9c\x8c\x0e\xdf\xed\xb0\x14\xd5\x0d\x08\xb4\xdc\
\xe5\x3c\xf3\xec\x1a\xb4\xdc\xe5\x80\x40\x51\xdd\x3c\xb8\xdb\xf1\
\x74\x06\xe9\x5d\x84\xc3\x61\x82\xc1\xa0\xa9\x2a\x71\xa3\xa0\xb8\
\x89\x02\xcf\x6e\x7a\xae\x6c\x64\xb8\xef\x22\x79\x0b\x1b\x19\xee\
\xbb\x48\xcf\x95\x8d\x14\x78\x76\x53\x50\xdc\x04\xfc\xc7\x3d\xc8\
\xc4\xea\xc6\xfd\x01\x45\x91\x5e\x33\x4d\xf1\xeb\xfe\xb2\x8f\x8b\
\x65\xc5\x89\x65\xc5\x41\x58\x00\x78\x1b\x04\xbf\x9f\x7a\xfc\x8f\
\x92\x64\x64\x59\xc5\x32\xc7\x9e\xae\x03\x00\x45\x91\x17\xec\x6d\
\x6e\x46\x51\xe4\x05\x48\x74\x5b\xe6\xd8\x78\xf1\x49\x10\x16\x96\
\x39\x06\x12\xdd\x4f\x6d\x80\xa2\xb4\x1d\xf6\x1f\xee\x45\x51\xda\
\x2c\x68\x45\xa2\x3b\x9d\x1e\x3f\x7d\x0a\x8f\x6f\xf2\xbf\x45\x13\
\x05\xee\xab\x0d\x7d\xa5\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x11\
\x00\x06\xbf\x85\
\x00\x6e\
\x00\x75\x00\x6d\x00\x65\x00\x72\x00\x69\x00\x63\x00\x61\x00\x6c\x00\x44\x00\x69\x00\x67\x00\x69\x00\x74\x00\x69\x00\x7a\x00\x65\
\
\x00\x1a\
\x0f\xcb\xd8\x87\
\x00\x76\
\x00\x65\x00\x63\x00\x74\x00\x6f\x00\x72\x00\x2d\x00\x63\x00\x72\x00\x65\x00\x61\x00\x74\x00\x65\x00\x2d\x00\x6b\x00\x65\x00\x79\
\x00\x62\x00\x6f\x00\x61\x00\x72\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
import bpy
import sys
argv = sys.argv
argv = argv[argv.index("--") + 1:] # get all args after "--"
obj_out = argv[0]
if not bpy.ops.export_scene.xml3d:
sys.exit("XML3D Exporter script is not registered.")
bpy.ops.export_scene.xml3d(filepath=obj_out)
|
import project.bar
import project.sub1
import bli
import sub2
import project.alien
import project.alien.alien2
import sub1.test
|
__author__ = "Krios Mane"
__license__ = "GPL - https://opensource.org/licenses/GPL-3.0"
__version__ = "1.0"
import serial
import ConfigParser
import os
from fabtotum.os.paths import CONFIG_INI, SERIAL_INI
from fabtotum.fabui.config import ConfigService
configService = ConfigService()
''' Set the highest baudrate available '''
def testBaud(port, baud_rate):
ser = serial.Serial(port, baud_rate, timeout=0.5)
ser.flushInput()
ser.write("G0\r\n")
serial_reply=ser.readline().rstrip()
ser.close()
return serial_reply != ''
baud_list=[250000, 115200]
accepted_baud=0
for baud in baud_list:
if(testBaud(configService.get('serial', 'PORT'), baud)):
accepted_baud = baud
break
if accepted_baud > 0:
print "Baud Rate available is: " + str(accepted_baud)
else:
accepted_baud=115200
if os.path.exists(SERIAL_INI) == False:
file = open(SERIAL_INI, 'w+')
file.write("[serial]\n")
file.close()
config = ConfigParser.ConfigParser()
config.read(SERIAL_INI)
config.set('serial', 'baud', accepted_baud)
config.set('serial', 'port', configService.get('serial', 'PORT'))
with open(SERIAL_INI, 'w') as configfile:
config.write(configfile)
|
import json
import os
import platform
import re
import subprocess
import sys
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static ' +
'initialization into every file including the header. Instead, ' +
'#include <ostream>. See http://crbug.com/94794',
files)]
return []
def _CheckNoFRIEND_TEST(input_api, output_api):
"""Make sure that gtest's FRIEND_TEST() macro is not used, the
FRIEND_TEST_ALL_PREFIXES() macro from testsupport/gtest_prod_util.h should be
used instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes."""
problems = []
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
if 'FRIEND_TEST(' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('WebRTC\'s code should not use '
'gtest\'s FRIEND_TEST() macro. Include testsupport/gtest_prod_util.h and '
'use FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems))]
def _CheckApprovedFilesLintClean(input_api, output_api,
source_file_filter=None):
"""Checks that all new or whitelisted .cc and .h files pass cpplint.py.
This check is based on _CheckChangeLintsClean in
depot_tools/presubmit_canned_checks.py but has less filters and only checks
added files."""
result = []
# Initialize cpplint.
import cpplint
# Access to a protected member _XX of a client class
# pylint: disable=W0212
cpplint._cpplint_state.ResetErrorCounts()
# Use the strictest verbosity level for cpplint.py (level 1) which is the
# default when running cpplint.py from command line.
# To make it possible to work with not-yet-converted code, we're only applying
# it to new (or moved/renamed) files and files listed in LINT_FOLDERS.
verbosity_level = 1
files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
# Note that moved/renamed files also count as added.
if f.Action() == 'A':
files.append(f.AbsoluteLocalPath())
for file_name in files:
cpplint.ProcessFile(file_name, verbosity_level)
if cpplint._cpplint_state.error_count > 0:
if input_api.is_committing:
# TODO(kjellander): Change back to PresubmitError below when we're
# confident with the lint settings.
res_type = output_api.PresubmitPromptWarning
else:
res_type = output_api.PresubmitPromptWarning
result = [res_type('Changelist failed cpplint.py check.')]
return result
def _CheckNoRtcBaseDeps(input_api, gyp_files, output_api):
pattern = input_api.re.compile(r"base.gyp:rtc_base\s*'")
violating_files = []
for f in gyp_files:
gyp_exceptions = (
'base_tests.gyp',
'desktop_capture.gypi',
'libjingle.gyp',
'libjingle_tests.gyp',
'p2p.gyp',
'sound.gyp',
'webrtc_test_common.gyp',
'webrtc_tests.gypi',
)
if f.LocalPath().endswith(gyp_exceptions):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
violating_files.append(f)
if violating_files:
return [output_api.PresubmitError(
'Depending on rtc_base is not allowed. Change your dependency to '
'rtc_base_approved and possibly sanitize and move the desired source '
'file(s) to rtc_base_approved.\nChanged GYP files:',
items=violating_files)]
return []
def _CheckNoSourcesAboveGyp(input_api, gyp_files, output_api):
# Disallow referencing source files with paths above the GYP file location.
source_pattern = input_api.re.compile(r'sources.*?\[(.*?)\]',
re.MULTILINE | re.DOTALL)
file_pattern = input_api.re.compile(r"'((\.\./.*?)|(<\(webrtc_root\).*?))'")
violating_gyp_files = set()
violating_source_entries = []
for gyp_file in gyp_files:
contents = input_api.ReadFile(gyp_file)
for source_block_match in source_pattern.finditer(contents):
# Find all source list entries starting with ../ in the source block
# (exclude overrides entries).
for file_list_match in file_pattern.finditer(source_block_match.group(0)):
source_file = file_list_match.group(0)
if 'overrides/' not in source_file:
violating_source_entries.append(source_file)
violating_gyp_files.add(gyp_file)
if violating_gyp_files:
return [output_api.PresubmitError(
'Referencing source files above the directory of the GYP file is not '
'allowed. Please introduce new GYP targets and/or GYP files in the '
'proper location instead.\n'
'Invalid source entries:\n'
'%s\n'
'Violating GYP files:' % '\n'.join(violating_source_entries),
items=violating_gyp_files)]
return []
def _CheckGypChanges(input_api, output_api):
source_file_filter = lambda x: input_api.FilterSourceFile(
x, white_list=(r'.+\.(gyp|gypi)$',))
gyp_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
if f.LocalPath().startswith('webrtc'):
gyp_files.append(f)
result = []
if gyp_files:
result.append(output_api.PresubmitNotifyResult(
'As you\'re changing GYP files: please make sure corresponding '
'BUILD.gn files are also updated.\nChanged GYP files:',
items=gyp_files))
result.extend(_CheckNoRtcBaseDeps(input_api, gyp_files, output_api))
result.extend(_CheckNoSourcesAboveGyp(input_api, gyp_files, output_api))
return result
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# Copied from Chromium's src/PRESUBMIT.py.
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
checkdeps_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'buildtools', 'checkdeps')
if not os.path.exists(checkdeps_path):
return [output_api.PresubmitError(
'Cannot find checkdeps at %s\nHave you run "gclient sync" to '
'download Chromium and setup the symlinks?' % checkdeps_path)]
sys.path.append(checkdeps_path)
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for _, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _RunPythonTests(input_api, output_api):
def join(*args):
return input_api.os_path.join(input_api.PresubmitLocalPath(), *args)
test_directories = [
join('tools', 'autoroller', 'unittests'),
]
tests = []
for directory in test_directories:
tests.extend(
input_api.canned_checks.GetUnitTestsInDirectory(
input_api,
output_api,
directory,
whitelist=[r'.+_test\.py$']))
return input_api.RunTests(tests, parallel=True)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.RunPylint(input_api, output_api,
black_list=(r'^.*gviz_api\.py$',
r'^.*gaeunit\.py$',
# Embedded shell-script fakes out pylint.
r'^build[\\\/].*\.py$',
r'^buildtools[\\\/].*\.py$',
r'^chromium[\\\/].*\.py$',
r'^google_apis[\\\/].*\.py$',
r'^net.*[\\\/].*\.py$',
r'^out.*[\\\/].*\.py$',
r'^testing[\\\/].*\.py$',
r'^third_party[\\\/].*\.py$',
r'^tools[\\\/]find_depot_tools.py$',
r'^tools[\\\/]clang[\\\/].*\.py$',
r'^tools[\\\/]generate_library_loader[\\\/].*\.py$',
r'^tools[\\\/]gn[\\\/].*\.py$',
r'^tools[\\\/]gyp[\\\/].*\.py$',
r'^tools[\\\/]isolate_driver.py$',
r'^tools[\\\/]protoc_wrapper[\\\/].*\.py$',
r'^tools[\\\/]python[\\\/].*\.py$',
r'^tools[\\\/]python_charts[\\\/]data[\\\/].*\.py$',
r'^tools[\\\/]refactoring[\\\/].*\.py$',
r'^tools[\\\/]swarming_client[\\\/].*\.py$',
r'^tools[\\\/]vim[\\\/].*\.py$',
# TODO(phoglund): should arguably be checked.
r'^tools[\\\/]valgrind-webrtc[\\\/].*\.py$',
r'^tools[\\\/]valgrind[\\\/].*\.py$',
r'^tools[\\\/]win[\\\/].*\.py$',
r'^xcodebuild.*[\\\/].*\.py$',),
disabled_warnings=['F0401', # Failed to import x
'E0611', # No package y in x
'W0232', # Class has no __init__ method
],
pylintrc='pylintrc'))
# WebRTC can't use the presubmit_canned_checks.PanProjectChecks function since
# we need to have different license checks in talk/ and webrtc/ directories.
# Instead, hand-picked checks are included below.
# Skip long-lines check for DEPS, GN and GYP files.
long_lines_sources = lambda x: input_api.FilterSourceFile(x,
black_list=(r'.+\.gyp$', r'.+\.gypi$', r'.+\.gn$', r'.+\.gni$', 'DEPS'))
results.extend(input_api.canned_checks.CheckLongLines(
input_api, output_api, maxlen=80, source_file_filter=long_lines_sources))
results.extend(input_api.canned_checks.CheckChangeHasNoTabs(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeTodoHasOwner(
input_api, output_api))
results.extend(_CheckApprovedFilesLintClean(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoFRIEND_TEST(input_api, output_api))
results.extend(_CheckGypChanges(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_RunPythonTests(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(
input_api.canned_checks.CheckGNFormatted(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckOwners(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeWasUploaded(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasTestField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://webrtc-status.appspot.com/current?format=json'))
return results
def GetPreferredTryMasters(project, change):
cq_config_path = os.path.join(
change.RepositoryRoot(), 'infra', 'config', 'cq.cfg')
# commit_queue.py below is a script in depot_tools directory, which has a
# 'builders' command to retrieve a list of CQ builders from the CQ config.
is_win = platform.system() == 'Windows'
masters = json.loads(subprocess.check_output(
['commit_queue', 'builders', cq_config_path], shell=is_win))
try_config = {}
for master in masters:
try_config.setdefault(master, {})
for builder in masters[master]:
if 'presubmit' in builder:
# Do not trigger presubmit builders, since they're likely to fail
# (e.g. OWNERS checks before finished code review), and we're running
# local presubmit anyway.
pass
else:
try_config[master][builder] = ['defaulttests']
return try_config
|
import re
LANGUAGES = {
'Afrikaans': 'af',
'Albanian': 'sq',
'Arabic': 'ar',
'Asturian': 'ast',
'Basque': 'eu',
'Belarusian': 'be',
'Bosnian': 'bs',
'Breton': 'br',
'Bulgarian': 'bg',
'Catalan': 'ca',
'Cibemba': 'bem',
'Corsican': 'co',
'Croation': 'hr',
'Czech': 'cs',
'Danish': 'da',
'Dutch': 'nl',
'English': 'en',
'Esperanto': 'eo',
'Estonian': 'et',
'Farsi': 'fa',
'Finnish': 'fi',
'French': 'fr',
'Galician': 'gl',
'Georgian': 'ka',
'German': 'de',
'Greek': 'el',
'Hebrew': 'he',
'Hindi': 'hi',
'Hungarian': 'hu',
'Icelandic': 'is',
'Igbo': 'ig',
'Indonesian': 'id',
'Irish': 'ga',
'Italian': 'it',
'Japanese': 'ja',
'Khmer': 'km',
'Korean': 'ko',
'Kurdish': 'ku',
'Latvian': 'lv',
'Lithuanian': 'lt',
'Luxembourgish': 'lb',
'Macedonian': 'mk',
'Malagasy': 'mg',
'Malay': 'ms_MY',
'Mongolian': 'mn',
'Norwegian': 'nb',
'NorwegianNynorsk': 'nn',
'Polish': 'pl',
'Portuguese': 'pt',
'PortugueseBR': 'pt_BR',
'Romanian': 'ro',
'Russian': 'ru',
'ScotsGaelic': 'sco',
'Serbian': 'sr',
# 'SimpChinese': 'zh-Hans',
'SimpChinese': 'zh_CN',
'Slovak': 'sk',
'Slovenian': 'sl',
'Spanish': 'es',
'Swahili': 'sw',
'Swedish': 'sv',
'Tatar': 'tt',
'Thai': 'th',
# 'TradChinese': 'zh-Hant',
'TradChinese': 'zh_TW',
'Turkish': 'tr',
'Ukrainian': 'uk',
'Uzbek': 'uz',
'Vietnamese': 'vi',
'Welsh': 'cy',
'Yoruba': 'yo',
}
_R_LANGUAGES = {code: name for name, code in LANGUAGES.items()}
ESCAPE_CHARS = {
r'$\r': '\r',
r'$\n': '\n',
r'$\t': '\t',
r'$\"': '"',
r'$\'': "'",
r'$\`': '`',
}
RE_LANGSTRING_LINE = re.compile(r'LangString\s+(?P<identifier>[A-Za-z0-9_]+)\s+\${LANG_[A-Z]+}\s+["\'`](?P<text>.*)["\'`]$')
def language_to_code(language):
return LANGUAGES.get(language)
def code_to_language(language_code):
return _R_LANGUAGES.get(language_code)
def escape_string(text):
for escape, char in ESCAPE_CHARS.items():
if char not in {"'", "`"}: # No need to escape quotes other than ""
text = text.replace(char, escape)
return text
def unescape_string(text):
for escape, char in ESCAPE_CHARS.items():
text = text.replace(escape, char)
return text
def parse_langstring(line):
match = RE_LANGSTRING_LINE.match(line)
if match:
return (
match.group('identifier'),
unescape_string(match.group('text'))
)
else:
return None
def make_langstring(language, identifier, text):
language = language.upper()
text = escape_string(text)
return f'LangString {identifier} ${{LANG_{language}}} "{text}"\n'
|
from opus_core.resources import Resources
from opus_core.store.storage import Storage
from numpy import asarray, array, where, ndarray, ones, concatenate, maximum, resize, logical_and
from opus_core.misc import ematch, unique
from opus_core.variables.variable_name import VariableName
from opus_core.logger import logger
import sys
class EquationSpecification(object):
# Names of attributes for specification data on storage
field_submodel_id = 'sub_model_id'
field_equation_id = 'equation_id'
field_coefficient_name = 'coefficient_name'
field_variable_name = 'variable_name'
field_fixed_value = 'fixed_value'
dim_field_prefix = 'dim_' # columns with this prefix are considered as additional dimensions of the specification
def __init__(self, variables=None, coefficients=None, equations=None, submodels=None,
fixed_values=None, other_fields=None, specification_dict=None, in_storage=None, out_storage=None):
""" variables - array of names of variables that are to be connected to coefficients.
coefficients (coef. names), equations and submodels are arrays of the same length as variables or empty arrays.
variables[i] is meant to belong to coefficient[i], equations[i], submodels[i].
fixed_values is an array with coeffcient values that should stay constant in the estimation.
other_fields should be a dictionary holding other columns of the specification table.
The actual connection is done by SpecifiedCoefficients.
If variables is None and specification_dict is not None, the specification is assume to be in one of the dictionary format,
see doc string for get_specification_attributes_from_dictionary.
"""
if (variables is None) and (specification_dict is not None):
variables, coefficients, equations, submodels, fixed_values, other_fields = \
get_specification_attributes_from_dictionary(specification_dict)
self.variables = tuple(map(lambda x: VariableName(x), self._none_or_array_to_array(variables)))
self.coefficients = self._none_or_array_to_array(coefficients)
if not isinstance(self.coefficients, ndarray):
self.coefficients=array(self.coefficients)
self.equations = self._none_or_array_to_array(equations)
if not isinstance(self.equations, ndarray):
self.equations=array(self.equations)
self.submodels=self._none_or_array_to_array(submodels)
if not isinstance(self.submodels, ndarray):
self.submodels=array(self.submodels)
self.in_storage=in_storage
self.out_storage=out_storage
self.fixed_values=self._none_or_array_to_array(fixed_values)
if not isinstance(self.fixed_values, ndarray):
self.fixed_values=array(self.fixed_values)
if other_fields:
self.other_fields = other_fields
else:
self.other_fields = {}
self.other_dim_field_names = []
self.set_other_dim_field_names()
def _none_or_array_to_array(self, array_or_none):
if array_or_none is None:
result = array([])
else:
result = array_or_none
return result
def load(self, resources=None, in_storage=None, in_table_name=None, variables = []):
local_resources = Resources(resources)
local_resources.merge_with_defaults({
"field_submodel_id":self.field_submodel_id,
"field_equation_id":self.field_equation_id,
"field_coefficient_name":self.field_coefficient_name,
"field_variable_name":self.field_variable_name,
"field_fixed_value":self.field_fixed_value})
if in_storage <> None:
self.in_storage = in_storage
if not isinstance(self.in_storage, Storage):
logger.log_warning("in_storage is not of type Storage. No EquationSpecification loaded.")
else:
data = self.in_storage.load_table(table_name=in_table_name)
equations=array([-1])
if local_resources["field_equation_id"] in data:
equations = data[local_resources["field_equation_id"]]
vars=data[local_resources["field_variable_name"]]
self.variables=tuple(map(lambda x: VariableName(x), vars))
self.coefficients=data[local_resources["field_coefficient_name"]]
if local_resources["field_submodel_id"] in data:
submodels = data[local_resources["field_submodel_id"]]
else:
submodels = array([-2]*self.coefficients.size, dtype="int32")
self.submodels=submodels
if equations.max() >= 0:
self.equations=equations
if local_resources["field_fixed_value"] in data:
self.fixed_values = data[local_resources["field_fixed_value"]]
for field in data:
if field not in [local_resources["field_submodel_id"], local_resources["field_equation_id"],
local_resources["field_variable_name"], local_resources["field_coefficient_name"],
local_resources["field_fixed_value"]]:
self.other_fields[field] = data[field]
self.set_other_dim_field_names()
if variables:
self.shrink(variables)
def write(self, resources=None, out_storage=None, out_table_name=None):
"""
""" # TODO: insert docstring
local_resources = Resources(resources)
local_resources.merge_with_defaults({
"field_submodel_id":self.field_submodel_id,
"field_equation_id":self.field_equation_id,
"field_coefficient_name":self.field_coefficient_name,
"field_variable_name":self.field_variable_name,
"field_fixed_value":self.field_fixed_value,
"out_table_name":out_table_name})
if out_storage <> None:
self.out_storage = out_storage
if not isinstance(self.out_storage, Storage):
logger.log_warning("out_storage has to be of type Storage. No EquationSpecifications written.")
return
submodel_ids = self.get_submodels()
if submodel_ids.size == 0:
submodel_ids = resize(array([-2], dtype="int32"), len(self.get_coefficient_names())) #set sub_model_id = -2 when there is no or 1 submodels
equation_ids = self.get_equations()
if equation_ids.size == 0:
equation_ids = resize(array([-2], dtype="int32"), submodel_ids.size)
values = {local_resources["field_submodel_id"]: submodel_ids,
local_resources["field_equation_id"]: equation_ids,
local_resources["field_coefficient_name"]: self.get_coefficient_names(),
local_resources["field_variable_name"]: self.get_long_variable_names()}
if self.fixed_values.size > 0:
values[local_resources["field_fixed_value"]] = self.fixed_values
for field in self.other_fields.keys():
values[field] = self.other_fields[field]
types = {local_resources["field_submodel_id"]: 'integer',
local_resources["field_equation_id"]: 'integer',
local_resources["field_coefficient_name"]: 'text',
local_resources["field_variable_name"]: 'text'}
local_resources.merge({"values":values, 'valuetypes': types, "drop_table_flag":1})
self.out_storage.write_table(table_name = local_resources['out_table_name'],
table_data=local_resources['values']
)
def shrink(self, variables):
""" Shrink all arrays of class attributes to those elements that correspond to given variables.
"""
variables = tuple(variables)
idx_list = []
variable_names = asarray(map(lambda x: x.get_alias(),
self.variables))
for var in variables:
idx = ematch(variable_names, var)
if idx.size > 0:
idx_list.append(idx[0])
idx_array = asarray(idx_list)
self.do_shrink(variable_names, idx_array)
def do_shrink(self, variable_names, idx_array):
new_variables = []
for i in idx_array:
new_variables.append(self.variables[i])
self.variables = tuple(new_variables)
tuple(map(lambda x: VariableName(x), variable_names[idx_array]))
self.coefficients = self.coefficients[idx_array]
if self.submodels.size > 0:
self.submodels = self.submodels[idx_array]
if self.equations.size > 0:
self.equations = self.equations[idx_array]
if self.fixed_values.size > 0:
self.fixed_values = self.fixed_values[idx_array]
for field in self.other_fields.keys():
self.other_fields[field] = self.other_fields[field][idx_array]
def delete(self, variables):
""" Delete given variables from specification."""
variables = tuple(variables)
idx_list = []
variable_names = asarray(map(lambda x: x.get_alias(),
self.variables))
nvariables = variable_names.size
will_not_delete = array(nvariables*[True], dtype='bool8')
for var in variables:
idx = ematch(variable_names, var)
if idx.size > 0:
will_not_delete[idx] = False
self.do_shrink(variable_names, where(will_not_delete)[0])
def add_item(self, variable_name, coefficient_name, submodel=None, equation=None, fixed_value=None, other_fields=None):
if isinstance(variable_name,VariableName):
self.variables = self.variables + (variable_name,)
else:
self.variables = self.variables + (VariableName(variable_name),)
self.coefficients = concatenate((self.coefficients, array([coefficient_name])))
if submodel is not None and self.get_submodels().size > 0:
self.submodels = concatenate((self.submodels, array([submodel], dtype=self.submodels.dtype)))
elif self.get_submodels().size > 0:
self.submodels = concatenate((self.submodels, array([-2], dtype=self.submodels.dtype)))
if equation is not None and self.get_equations().size > 0:
self.equations = concatenate((self.equations, array([equation], dtype=self.equations.dtype)))
elif self.get_equations().size > 0:
self.equations = concatenate((self.equations, array([-2], dtype=self.equations.dtype)))
if fixed_value is not None and self.get_fixed_values().size > 0:
self.fixed_values = concatenate((self.fixed_values, array([fixed_value], dtype=self.fixed_values.dtype)))
elif self.get_fixed_values().size > 0:
self.fixed_values = concatenate((self.fixed_values, array([0], dtype=self.fixed_values.dtype)))
if other_fields is not None:
for field in other_fields.keys():
self.other_fields[field] = concatenate((self.other_fields[field], array([other_fields[field]],
dtype=self.other_fields[field].dtype)))
def summary(self):
logger.log_status("Specification object:")
logger.log_status("size:", len(self.variables))
logger.log_status("variables:")
logger.log_status(map(lambda x: x.get_alias(), self.variables))
logger.log_status("coefficients:")
logger.log_status(self.coefficients)
if self.equations.size > 0:
logger.log_status("equations:")
logger.log_status(self.equations)
if self.submodels.size > 0:
logger.log_status("submodels:")
logger.log_status(self.submodels)
if self.fixed_values.size > 0:
logger.log_status("fixed_values:")
logger.log_status(self.fixed_values)
for field in self.other_fields.keys():
logger.log_status("%s:" % field)
logger.log_status(self.other_fields[field])
def compare_and_try_raise_speclengthexception(self, value, compvalue, name):
if value != compvalue:
try:
raise SpecLengthException(name)
except SpecLengthException, msg:
logger.log_status(msg)
sys.exit(1)
def check_consistency(self):
self.compare_and_try_raise_speclengthexception(len(self.variables),self.coefficients.size,"coefficients")
if self.equations.size > 0:
self.compare_and_try_raise_speclengthexception(len(self.variables),self.equations.size,"equations")
if self.submodels.size > 0:
self.compare_and_try_raise_speclengthexception(len(self.variables),self.submodels.size,"submodels")
for field in self.other_fields.keys():
self.compare_and_try_raise_speclengthexception(len(self.variables),self.other_fields[field].size, field)
def get_variable_names(self):
return array(map(lambda x: x.get_alias(), self.variables))
def get_long_variable_names(self):
return array(map(lambda x: x.get_expression(), self.variables))
def get_variables(self):
return self.variables
def get_coefficient_names(self):
return self.coefficients
def get_distinct_coefficient_names(self):
return unique(self.coefficients)
def get_distinct_variable_names(self):
return unique(self.get_variable_names())
def get_distinct_long_variable_names(self):
return unique(self.get_long_variable_names())
def get_equations(self):
return self.equations
def get_submodels(self):
return self.submodels
def get_fixed_values(self):
return self.fixed_values
def get_coefficient_fixed_values_for_submodel(self, submodel):
"""Return a tuple with two arrays: first one is an array of coefficient names that have fixed values (i.e. <> 0).
The second array are the corresponding fixed values. The fixed values are considered for given submodel."""
fixed_values = self.get_fixed_values()
if fixed_values.size == 0:
return (array([]), array([]))
if self.get_submodels().size > 0:
idx = self.get_submodels() == submodel
else:
idx = ones(fixed_values.size, dtype="bool8")
idx = logical_and(idx, fixed_values <> 0)
return (self.get_coefficient_names()[idx], fixed_values[idx])
def get_nequations(self):
if self.get_equations().size > 0:
return unique(self.get_equations()).size
return 1
def get_number_of_distinct_variables(self):
return self.get_distinct_variable_names().size
def get_distinct_submodels(self):
return unique(self.get_submodels())
def get_nsubmodels(self):
return maximum(1, self.get_distinct_submodels().size)
def get_other_fields(self):
return self.other_fields
def get_other_field_names(self):
return self.other_fields.keys()
def get_other_dim_field_names(self):
return self.other_dim_field_names
def get_other_field(self, name):
return self.other_fields[name]
def get_distinct_values_of_other_field(self, name):
values = self.get_other_field(name)
return unique(values)
def set_variable_prefix(self, prefix):
self.variables = tuple(map(lambda name: VariableName(prefix + name),
self.get_variable_names()))
def set_dataset_name_of_variables(self, dataset_name):
self.set_variable_prefix(dataset_name+".")
def set_other_dim_field_names(self):
""" Choose those names whose prefix correspond to self.dim_field_prefix."""
for field in self.other_fields.keys():
if field[0:len(self.dim_field_prefix)] == self.dim_field_prefix:
self.other_dim_field_names.append(field)
def get_indices_for_submodel(self, submodel):
submodels = self.get_submodels()
return where(submodels==submodel)[0]
def get_equations_for_submodel(self, submodel):
idx = self.get_indices_for_submodel(submodel)
return unique(self.get_equations()[idx])
def get_table_summary(self, submodel_default=-2, equation_default=-2):
first_row = ['Submodel', 'Equation', 'Coefficient Name', 'Variable']
submodels = self.get_submodels()
variables = self.get_long_variable_names()
coefficient_names = self.get_coefficient_names()
equations = self.get_equations()
if equations.size == 0:
equations = equation_default * ones(variables.size, dtype="int32")
if submodels.size == 0:
submodels = submodel_default * ones(variables.size, dtype="int32")
table = [first_row]
for i in range(variables.size):
table += [[submodels[i], equations[i], coefficient_names[i], variables[i]]]
return table
def replace_variables(self, new_variables):
"""new_variables is a dictionary with variable aliases as keys and new expressions as values.
The methos replaces all variables where the alias matches with the new ones.
"""
current_aliases = self.get_variable_names()
variables = list(self.get_variables())
for alias, new_expr in new_variables.iteritems():
idx = where(current_aliases == alias)[0]
for i in range(idx.size):
variables[idx[i]] = VariableName(new_expr)
self.variables = tuple(variables)
def copy_equations_for_dim_if_needed(self, eqs_ids, dim_name, dim_value):
"""If there are equations equal -2 for the specified dim_name and value, it
is copied for each eqs_id.
"""
idx = where(self.other_fields[dim_name] == dim_value)[0]
if self.get_equations().size == 0:
self.equations = array(len(self.variables)*[-2], dtype='int32')
idx_to_copy = idx[where(self.get_equations()[idx] == -2)[0]]
for i in idx_to_copy:
for eq in eqs_ids:
if eq == eqs_ids[0]: # first call
self.equations[i] = eq
continue
other_fields={}
for of_name, of_values in self.other_fields.iteritems():
other_fields[of_name] = of_values[i]
subm = None
if self.get_submodels().size>0:
subm = self.get_submodels()[i]
fv = None
if self.get_fixed_values().size > 0:
fv = self.get_fixed_values()[i]
self.add_item(self.get_long_variable_names()[i], coefficient_name=self.get_coefficient_names()[i],
submodel=subm, equation=eq, fixed_value=fv, other_fields=other_fields)
class SpecLengthException(Exception):
def __init__(self, name):
self.args = "Something is wrong with the size of the specification object " + name + "!"
def get_specification_attributes_from_dictionary(specification_dict):
""" Creates a specification object from a dictionary specification_dict. Keys of the dictionary are submodels. If there
is only one submodel, use -2 as key. A value of specification_dict for each submodel entry is either a list
or a dictionary containing specification for the particular submodel.
If it is a list, each element can be defined in one of the following forms:
- a character string specifying a variable in its fully qualified name or as an expression - in such a case
the coefficient name will be the alias of the variable
- a tuple of length 2: variable name as above, and the corresponding coefficient name
- a tuple of length 3: variable name, coefficient name, fixed value of the coefficient (if the
coefficient should not be estimated)
- a dictionary with pairs variable name, coefficient name
If it is a dictionary, it can contain specification for each equation or for elements of other fields.
It can contain an entry 'name' which specifies the name of the field (by default the name is 'equation').
If it is another name, the values are stored in the dictionary attribute 'other_fields'. Each element of the
submodel dictionary can be again a list (see the previous paragraph), or a dictionary
(like the one described in this paragraph).
specification_dict can contain an entry '_definition_' which should be a list of elements in one of the forms
described in the second paragraph.
In such a case, the entries defined for submodels can contain only the variable aliases. The corresponding
coefficient names and fixed values (if defined) are taken from the definition section.
See examples in unit tests below.
"""
variables = []
coefficients = []
equations = []
submodels = []
fixed_values = []
definition = {}
other_fields = {}
try:
if "_definition_" in specification_dict.keys():
definition["variables"], definition["coefficients"], definition["equations"], dummy1, definition["fixed_values"], dummy2 = \
get_variables_coefficients_equations_for_submodel(specification_dict["_definition_"], "_definition_")
definition["alias"] = map(lambda x: VariableName(x).get_alias(), definition["variables"])
del specification_dict["_definition_"]
for sub_model, submodel_spec in specification_dict.items():
variable, coefficient, equation, submodel, fixed_value, other_field = get_variables_coefficients_equations_for_submodel(
submodel_spec, sub_model, definition)
variables += variable
coefficients += coefficient
equations += equation
submodels += submodel
fixed_values += fixed_value
for key, value in other_field.iteritems():
if key in other_fields:
other_fields[key] = concatenate((other_fields[key], value))
else:
other_fields[key] = array(value)
except Exception, e:
logger.log_stack_trace()
raise ValueError, "Wrong specification format for model specification: %s" % e
if where(array(fixed_values) <> 0)[0].size == 0: # no fixed values defined
fixed_values = []
return (array(variables), array(coefficients), array(equations, dtype="int16"), array(submodels, dtype="int16"),
array(fixed_values), other_fields)
def load_specification_from_dictionary(specification_dict):
"""See the doc string at get_specification_attributes_from_dictionary
"""
variables, coefficients, equations, submodels, fixed_values, other_fields = get_specification_attributes_from_dictionary(specification_dict)
return EquationSpecification(variables=variables, coefficients=coefficients, equations = equations, submodels = submodels,
fixed_values = fixed_values, other_fields=other_fields)
def get_variables_coefficients_equations_for_submodel(submodel_spec, sub_model, definition={}):
variables = []
coefficients = []
fixed_values = []
equations = []
submodels = []
other_fields = {}
error = False
if isinstance(submodel_spec, tuple) or isinstance(submodel_spec, list): # no equations or other fields given
variables, coefficients, fixed_values, error = get_variables_coefficients_from_list(submodel_spec, definition)
elif isinstance(submodel_spec, dict):
name = submodel_spec.get('name', 'equation')
if name.startswith('equation'): # by default the dictionary is on an equation level
variables, coefficients, fixed_values, equations, error = get_variables_coefficients_equations_from_dict(submodel_spec,
definition)
else:
del submodel_spec['name']
other_fields['dim_%s' % name] = []
for other_field_value, spec in submodel_spec.iteritems():
variable, coefficient, equation, submodel, fixed_value, other_field = \
get_variables_coefficients_equations_for_submodel(spec, sub_model, definition)
variables += variable
coefficients += coefficient
equations += equation
submodels += submodel
fixed_values += fixed_value
other_fields['dim_%s' % name] += len(variable)*[other_field_value]
for key, value in other_field.iteritems():
if key in other_fields:
other_fields[key] = concatenate((other_fields[key], value))
else:
other_fields[key] = array(value)
else:
logger.log_error("Error in specification of submodel %s." % sub_model)
return ([],[],[],[],[],{})
if error:
logger.log_error("Error in specification of submodel %s" % sub_model)
submodels = len(variables)*[sub_model]
return (variables, coefficients, equations, submodels, fixed_values, other_fields)
def get_full_variable_specification_for_var_coef(var_coef, definition):
"""get full variable name from definition diectionary by looking up the alias"""
if ("variables" in definition.keys()) and (var_coef in definition["alias"]):
i = definition["alias"].index(var_coef)
variable = definition["variables"][i]
else:
variable = var_coef
i = None
return variable, i
def get_variables_coefficients_equations_from_dict(dict_spec, definition={}):
variables = []
coefficients = []
fixed_values = []
equations = []
error = False
speckeys = dict_spec.keys()
if sum(map(lambda(x): isinstance(x, int), speckeys)) == len(speckeys): # keys are the equations
for eq, spec in dict_spec.iteritems():
variable, coefficient, fixed_value, error = get_variables_coefficients_from_list(
spec, definition)
if error:
logger.log_error("Error in specification of equation %s" % eq)
variables += variable
coefficients += coefficient
fixed_values += fixed_value
equations += len(variable)*[eq]
else:
if dict_spec.has_key("equation_ids"):
equation_ids = dict_spec["equation_ids"]
del dict_spec["equation_ids"]
else:
equation_ids = None
for var, coef in dict_spec.items():
if not equation_ids:
var_name, var_index = get_full_variable_specification_for_var_coef(var, definition)
variables.append(var_name)
if var_index is not None:
coefficients.append(definition["coefficients"][var_index])
fixed_values.append(definition["fixed_values"][var_index])
else:
coefficients.append(coef)
fixed_values.append(0)
elif type(coef) is list or type(coef) is tuple:
for i in range(len(coef)):
if coef[i] != 0:
var_name, var_index = get_full_variable_specification_for_var_coef(var, definition)
variables.append(var_name)
if var_index is not None:
fixed_values.append(definition["fixed_values"][var_index])
else:
fixed_values.append(0)
coefficients.append(coef[i])
equations.append(equation_ids[i])
else:
logger.log_error("Wrong specification format for variable %s; \nwith equation_ids provided, coefficients must be a list or tuple of the same length of equation_ids" % var)
error = True
return (variables, coefficients, fixed_values, equations, error)
def get_variables_coefficients_from_list(list_spec, definition={}):
variables = []
coefficients = []
fixed_values = []
error = False
for var_coef in list_spec:
if isinstance(var_coef, str):
#var_coef is just variables long names or alias
var_name, var_index = get_full_variable_specification_for_var_coef(var_coef, definition)
variables.append(var_name)
if var_index is not None:
coefficients.append(definition["coefficients"][var_index])
fixed_values.append(definition["fixed_values"][var_index])
else:
coefficients.append(VariableName(var_coef).get_alias())
fixed_values.append(0)
elif isinstance(var_coef, tuple) or isinstance(var_coef, list):
var_name, var_index = get_full_variable_specification_for_var_coef(var_coef[0], definition)
variables.append(var_name)
if len(var_coef) == 1: # coefficient name is created from variable alias
coefficients.append(VariableName(var_coef[0]).get_alias())
fixed_values.append(0)
elif len(var_coef) > 1: # coefficient names explicitly given
coefficients.append(var_coef[1])
if len(var_coef) > 2: # third item is the coefficient fixed value
fixed_values.append(var_coef[2])
else:
fixed_values.append(0)
else:
logger.log_error("Wrong specification format for variable %s" % var_coef)
error = True
elif isinstance(var_coef, dict):
var_name, var_index = get_full_variable_specification_for_var_coef(var_coef.keys()[0], definition)
variables.append(var_name)
coefficients.append(var_coef.values()[0])
fixed_values.append(0)
else:
logger.log_error("Wrong specification format for variable %s" % var_coef)
error = True
return (variables, coefficients, fixed_values, error)
from numpy import alltrue, ma
from opus_core.tests import opus_unittest
from opus_core.storage_factory import StorageFactory
class Tests(opus_unittest.OpusTestCase):
def test_write_method_for_expressions(self):
variables = ("x = y.disaggregate(mydataset.my_variable, intermediates=[myfaz])",
"z = y.disaggregate(mydataset.my_variable, intermediates=[myfaz, myzone])")
coefficients = ('c1', 'c2')
storage = StorageFactory().get_storage('dict_storage')
specification = EquationSpecification(variables, coefficients)
specification.write(out_storage=storage, out_table_name="spec")
result = storage.load_table(table_name="spec")
self.assert_(result['variable_name'][0] == variables[0], 'Error in equation_specification')
self.assert_(result['variable_name'][1] == variables[1], 'Error in equation_specification')
def test_load_write_specification(self):
in_storage = StorageFactory().get_storage('dict_storage')
out_storage = StorageFactory().get_storage('dict_storage')
spec_data = {
"variable_name": array(["var1", "var2", "var1"]),
"coefficient_name": array(["coef1", "coef2", "coef1"]),
"nest": array([1,1,2]),
"fixed_value": array([0, 1, 0])
}
in_storage.write_table(table_name='my_specification', table_data=spec_data)
specification = EquationSpecification(in_storage=in_storage, out_storage=out_storage)
specification.load(in_table_name='my_specification')
specification.write(out_table_name="out_specification")
result = out_storage.load_table("out_specification", column_names=["nest", "sub_model_id", "fixed_value"])
self.assert_(alltrue(result['nest'] == spec_data["nest"]), 'Error in equation_specification')
self.assert_(alltrue(result['fixed_value'] == spec_data["fixed_value"]), 'Error in equation_specification')
self.assert_(alltrue(result['sub_model_id'] == array([-2, -2, -2], dtype="int32")),
'Error in equation_specification')
def test_replace_variables(self):
variables = ("x = y.disaggregate(mydataset.my_variable, intermediates=[myfaz])",
"z = y.disaggregate(mydataset.my_variable, intermediates=[myfaz, myzone])",
"y = y.some_variable")
coefficients = ('c1', 'c2', 'c3')
specification = EquationSpecification(variables, coefficients)
new_variables = {'z': 'xxx.my_new_variable', 'q': 'xxx.variable_not_to_be_replaced', 'y': 'y.replaces_y'}
specification.replace_variables(new_variables)
result = specification.get_long_variable_names()
self.assert_(alltrue(result == array([variables[0], 'xxx.my_new_variable', 'y.replaces_y'])),
"Error in replace_varibles")
def test_load_specification(self):
specification = {1: [
("urbansim.gridcell.population", "BPOP"),
("urbansim.gridcell.average_income", "BINC"),
],
2: [
("urbansim.gridcell.is_near_arterial", "BART"),
("urbansim.gridcell.is_near_highway", "BHWY"),
],
3: [
("lage = ln(urbansim.gridcell.average_age+1)", "BAGE")
]
}
result = load_specification_from_dictionary(specification)
vars = result.get_variable_names()
coefs = result.get_coefficient_names()
subm = result.get_submodels()
fixedval = result.get_fixed_values()
self.assert_(alltrue(coefs == array(["BPOP", "BINC", "BART", "BHWY", "BAGE"])),
msg = "Error in test_load_specification (coefficients)")
self.assert_(alltrue(vars ==
array(["population", "average_income", "is_near_arterial", "is_near_highway", "lage"])),
msg = "Error in test_load_specification (variables)")
self.assert_(ma.allequal(subm, array([1, 1, 2, 2, 3])),
msg = "Error in test_load_specification (submodels)")
self.assert_(fixedval.size == 0, msg = "Error in test_load_specification (fixed_values should be empty)")
# add a variable with a fixed value coefficient
specification[3].append(("constant", "C", 1))
result = load_specification_from_dictionary(specification)
fixedval = result.get_fixed_values()
self.assert_(ma.allequal(fixedval, array([0, 0, 0, 0, 0, 1])),
msg = "Error in test_load_specification (fixed_values)")
def test_load_specification_with_definition(self):
specification = {
"_definition_": [
("urbansim.gridcell.population", "BPOP"),
("urbansim.gridcell.average_income", "BINC"),
("urbansim.gridcell.is_near_arterial", "BART"),
("lage = ln(urbansim.gridcell.average_age+1)", "BAGE"),
("constant", "C", 1.5)
],
1: [
"population", "average_income", "lage"
],
2: [
"is_near_arterial",
"constant",
("urbansim.gridcell.is_near_highway", "BHWY"),
],
}
result = load_specification_from_dictionary(specification)
vars = result.get_variable_names()
coefs = result.get_coefficient_names()
subm = result.get_submodels()
fixedval = result.get_fixed_values()
self.assert_(alltrue(coefs == array(["BPOP", "BINC", "BAGE", "BART", "C", "BHWY"])),
msg = "Error in test_load_specification_with_definition (coefficients)")
self.assert_(alltrue(vars ==
array(["population", "average_income", "lage", "is_near_arterial", "constant",
"is_near_highway"])),
msg = "Error in test_load_specification_with_definition (variables)")
self.assert_(ma.allequal(subm, array([1, 1, 1, 2, 2, 2])),
msg = "Error in test_load_specification_with_definition (submodels)")
self.assert_(ma.allclose(fixedval, array([0, 0, 0, 0, 1.5, 0])),
msg = "Error in test_load_specification_with_definition (fixed_values)")
def test_load_specification_with_definition_with_implicit_coefficients(self):
"""Coeficient names should be aliases of the variables."""
specification = {
"_definition_": [
"urbansim.gridcell.population",
"urbansim.gridcell.average_income",
"urbansim.gridcell.is_near_arterial",
"lage = ln(urbansim.gridcell.average_age+1)",
],
1: [
"population", "average_income", "lage"
],
2: [
"is_near_arterial",
("urbansim.gridcell.is_near_highway", "BHWY"),
],
}
result = load_specification_from_dictionary(specification)
vars = result.get_variable_names()
coefs = result.get_coefficient_names()
subm = result.get_submodels()
self.assert_(alltrue(coefs == array(["population", "average_income", "lage", "is_near_arterial", "BHWY"])),
msg = "Error in test_load_specification_with_definition_with_implicit_coefficients (coefficients)")
self.assert_(alltrue(vars ==
array(["population", "average_income", "lage", "is_near_arterial", "is_near_highway"])),
msg = "Error in test_load_specification_with_definition_with_implicit_coefficients (variables)")
self.assert_(ma.allequal(subm, array([1, 1, 1, 2, 2])),
msg = "Error in test_load_specification_with_definition_with_implicit_coefficients (submodels)")
# test data type
self.assert_(subm.dtype.name == "int16",
msg = "Error in data type of submodels.")
def test_load_specification_with_definition_with_equations(self):
specification = {
"_definition_": [
"pop = urbansim.gridcell.population",
"inc = urbansim.gridcell.average_income",
"art = urbansim.gridcell.is_near_arterial",
],
-2: {
"equation_ids": (1,2),
"pop": ("bpop",0),
"inc": (0, "binc"),
"art": ("bart", 0),
"constant": ("asc", 0)
}
}
result = load_specification_from_dictionary(specification)
vars = result.get_variable_names()
coefs = result.get_coefficient_names()
eqs = result.get_equations()
lvars = result.get_long_variable_names()
self.assert_(alltrue(coefs == array(["asc", "bart", "bpop", "binc"])),
msg = "Error in test_load_specification_with_definition_with_equations (coefficients)")
self.assert_(alltrue(vars == array(["constant", "art", "pop", "inc"])),
msg = "Error in test_load_specification_with_definition_with_equations (variables)")
self.assert_(ma.allequal(eqs, array([1,1,1,2])),
msg = "Error in test_load_specification_with_definition_with_equations (equations)")
self.assert_(alltrue(lvars == array(["constant",
"art = urbansim.gridcell.is_near_arterial",
"pop = urbansim.gridcell.population",
"inc = urbansim.gridcell.average_income"])),
msg = "Error in test_load_specification_with_definition_with_equations (long names of variables)")
def test_load_specification_with_definition_with_equations_v2(self):
specification = {
"_definition_": [
("pop = urbansim.gridcell.population", "bpop"),
"inc = urbansim.gridcell.average_income",
"art = urbansim.gridcell.is_near_arterial",
],
-2: {
1: [
"pop",
"inc",
"constant" ],
2: [ "art"]
}
}
result = load_specification_from_dictionary(specification)
vars = result.get_variable_names()
coefs = result.get_coefficient_names()
eqs = result.get_equations()
lvars = result.get_long_variable_names()
self.assert_(alltrue(coefs == array(["bpop", "inc", "constant", "art",])),
msg = "Error in test_load_specification_with_definition_with_equations_v2 (coefficients)")
self.assert_(alltrue(vars == array(["pop", "inc", "constant", "art"])),
msg = "Error in test_load_specification_with_definition_with_equations (variables)")
self.assert_(ma.allequal(eqs, array([1,1,1,2])),
msg = "Error in test_load_specification_with_definition_with_equations (equations)")
self.assert_(alltrue(lvars == array(["pop = urbansim.gridcell.population",
"inc = urbansim.gridcell.average_income",
"constant",
"art = urbansim.gridcell.is_near_arterial",
])),
msg = "Error in test_load_specification_with_definition_with_equations (long names of variables)")
def test_load_specification_with_definition_nests(self):
specification = {
"_definition_": [
("pop = urbansim.gridcell.population", "bpop"),
"inc = urbansim.gridcell.average_income",
"art = urbansim.gridcell.is_near_arterial",
],
-2: {
'name': 'nest_id',
1: [
"pop",
"inc",
"constant" ],
2: [ "art"]
}
}
result = load_specification_from_dictionary(specification)
vars = result.get_variable_names()
coefs = result.get_coefficient_names()
other = result.get_other_fields()
self.assert_(alltrue(coefs == array(["bpop", "inc", "constant", "art",])),
msg = "Error in test_load_specification_with_definition_nests (coefficients)")
self.assert_(alltrue(vars == array(["pop", "inc", "constant", "art"])),
msg = "Error in test_load_specification_with_definition_nests (variables)")
self.assert_(ma.allequal(other['dim_nest_id'], array([1,1,1,2])),
msg = "Error in test_load_specification_with_definition_nests (nests)")
def test_load_specification_with_definition_nest_and_equations(self):
specification = {
"_definition_": [
("pop = urbansim.gridcell.population", "bpop"),
"inc = urbansim.gridcell.average_income",
"art = urbansim.gridcell.is_near_arterial",
],
-2: {
'name': 'nest_id',
1: {1: [
"pop",
"inc",
"constant" ],
2: [ "art"]
},
2: {3:["pop",
"inc"
]}
}
}
result = load_specification_from_dictionary(specification)
vars = result.get_variable_names()
coefs = result.get_coefficient_names()
eqs = result.get_equations()
other = result.get_other_fields()
self.assert_(alltrue(coefs == array(["bpop", "inc", "constant", "art", "bpop", "inc"])),
msg = "Error in test_load_specification_with_definition_nest_and_equations (coefficients)")
self.assert_(alltrue(vars == array(["pop", "inc", "constant", "art", "pop", "inc"])),
msg = "Error in test_load_specification_with_definition_nest_and_equations (variables)")
self.assert_(ma.allequal(eqs, array([1,1,1,2,3,3])),
msg = "Error in test_load_specification_with_definition_nest_and_equations (equations)")
self.assert_(ma.allequal(other['dim_nest_id'], array([1,1,1,1,2,2])),
msg = "Error in test_load_specification_with_definition_nest_and_equations (nests)")
if __name__=='__main__':
opus_unittest.main()
|
{
'name': 'partner_id_validation',
'version': '0.1',
'category': 'General',
'description': "Module that validates that sale orders, purchase orders and invoices are assigned a partner that is a company.",
'author': 'Moldeo Interactive',
'website': 'http://business.moldeo.coop/',
'images': [],
'depends': ['sale','account','purchase'],
'demo': [],
'data': [],
'test': [],
'installable': True,
}
|
from Tools.Directories import resolveFilename, SCOPE_SYSETC
from Components.Console import Console
import sys
import time
import re
from boxbranding import getImageVersion, getMachineBrand
from sys import modules
import socket, fcntl, struct
def getVersionString():
return getImageVersion()
def getImageVersionString():
try:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "version":
version = splitted[1].replace('\n','')
file.close()
return version
except IOError:
return "unavailable"
def getImageUrlString():
try:
if getMachineBrand() == "GI":
return "www.xpeed-lx.de"
elif getMachineBrand() == "Beyonwiz":
return "www.beyonwiz.com.au"
else:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "url":
version = splitted[1].replace('\n','')
file.close()
return version
except IOError:
return "unavailable"
def getEnigmaVersionString():
return getImageVersion()
def getGStreamerVersionString():
import enigma
return enigma.getGStreamerVersionString()
def getKernelVersionString():
try:
f = open("/proc/version","r")
kernelversion = f.read().split(' ', 4)[2].split('-',2)[0]
f.close()
return kernelversion
except:
return _("unknown")
def getLastUpdateString():
try:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "date":
#YYYY MM DD hh mm
#2005 11 29 01 16
string = splitted[1].replace('\n','')
year = string[0:4]
month = string[4:6]
day = string[6:8]
date = '-'.join((year, month, day))
hour = string[8:10]
minute = string[10:12]
time = ':'.join((hour, minute))
lastupdated = ' '.join((date, time))
file.close()
return lastupdated
except IOError:
return "unavailable"
class BootLoaderVersionFetcher:
monMap = {
"Jan": "01", "Feb": "02", "Mar": "03",
"Apr": "04", "May": "05", "Jun": "06",
"Jul": "07", "Aug": "08", "Sep": "09",
"Oct": "10", "Nov": "11", "Dec": "12",
}
dateMatch = "(Sun|Mon|Tue|Wed|Thu|Fri|Sat) (" + '|'.join(monMap.keys()) + ") ([ 1-3][0-9]) [0-2][0-9]:[0-5][0-9]:[0-5][0-9] [A-Za-z]+ ([0-9]{4})"
dateMatchRe = re.compile(dateMatch)
def __init__(self):
pass
def searchBootVer(self, appcallback):
self.console = Console()
cmd = "strings -n 28 /dev/mtd3ro | grep ' [0-2][0-9]:[0-5][0-9]:[0-5][0-9] '"
self.console.ePopen(cmd, callback=self.searchBootVerFinished, extra_args=appcallback)
def searchBootVerFinished(self, result, retval, extra_args):
callback = extra_args
latest_date = (0, 0, 0, "Unknown")
for line in result.splitlines():
line = line.strip()
match = self.dateMatchRe.search(line)
groups = match.groups()
if len(groups) == 4:
month = self.monMap[groups[1]]
day = groups[2]
if day[0] == ' ':
day = '0' + day[1:]
year = groups[3]
d = (year, month, day, line)
if latest_date < d:
latest_date = d
if callback:
callback(latest_date[3])
__bootLoaderFetcher = BootLoaderVersionFetcher()
def getBootLoaderVersion(callback):
__bootLoaderFetcher.searchBootVer(callback)
import socket, fcntl, struct
SIOCGIFADDR = 0x8915
SIOCGIFBRDADDR = 0x8919
SIOCSIFHWADDR = 0x8927
SIOCGIFNETMASK = 0x891b
SIOCGIFFLAGS = 0x8913
ifflags = {
"up": 0x1, # interface is up
"broadcast": 0x2, # broadcast address valid
"debug": 0x4, # turn on debugging
"loopback": 0x8, # is a loopback net
"pointopoint": 0x10, # interface is has p-p link
"notrailers": 0x20, # avoid use of trailers
"running": 0x40, # interface RFC2863 OPER_UP
"noarp": 0x80, # no ARP protocol
"promisc": 0x100, # receive all packets
"allmulti": 0x200, # receive all multicast packets
"master": 0x400, # master of a load balancer
"slave": 0x800, # slave of a load balancer
"multicast": 0x1000, # Supports multicast
"portsel": 0x2000, # can set media type
"automedia": 0x4000, # auto media select active
"dynamic": 0x8000, # dialup device with changing addresses
"lower_up": 0x10000, # driver signals L1 up
"dormant": 0x20000, # driver signals dormant
"echo": 0x40000, # echo sent packets
}
def _ifinfo(sock, addr, ifname):
iface = struct.pack('256s', ifname[:15])
info = fcntl.ioctl(sock.fileno(), addr, iface)
if addr == SIOCSIFHWADDR:
return ':'.join(['%02X' % ord(char) for char in info[18:24]])
elif addr == SIOCGIFFLAGS:
return socket.ntohl(struct.unpack("!L", info[16:20])[0])
else:
return socket.inet_ntoa(info[20:24])
def getIfConfig(ifname):
ifreq = {'ifname': ifname}
infos = {}
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# offsets defined in /usr/include/linux/sockios.h on linux 2.6
infos['addr'] = SIOCGIFADDR
infos['brdaddr'] = SIOCGIFBRDADDR
infos['hwaddr'] = SIOCSIFHWADDR
infos['netmask'] = SIOCGIFNETMASK
infos['flags'] = SIOCGIFFLAGS
try:
for k,v in infos.items():
ifreq[k] = _ifinfo(sock, v, ifname)
except:
pass
if 'flags' in ifreq:
flags = ifreq['flags']
ifreq['flags'] = dict([(name, bool(flags & flag)) for name, flag in ifflags.items()])
sock.close()
return ifreq
def getAllIfTransferredData():
transData = {}
for line in file("/proc/net/dev").readlines():
flds = line.split(':')
if len(flds) > 1:
ifname = flds[0].strip()
flds = flds[1].strip().split()
rx_bytes, tx_bytes = (flds[0], flds[8])
transData[ifname] = (rx_bytes, tx_bytes)
return transData
def getIfTransferredData(ifname):
for line in file("/proc/net/dev").readlines():
if ifname in line:
data = line.split('%s:' % ifname)[1].split()
rx_bytes, tx_bytes = (data[0], data[8])
return rx_bytes, tx_bytes
return None
def getGateways():
gateways = {}
count = 0
for line in file("/proc/net/route").readlines():
if count > 0:
flds = line.strip().split()
for i in range(1, 4):
flds[i] = int(flds[i], 16)
if flds[3] & 2:
if flds[0] not in gateways:
gateways[flds[0]] = []
gateways[flds[0]].append({
"destination": socket.inet_ntoa(struct.pack("!L", socket.htonl(flds[1]))),
"gateway": socket.inet_ntoa(struct.pack("!L", socket.htonl(flds[2])))
})
count += 1
return gateways
def getIfGateways(ifname):
return getGateways().get(ifname)
def getModelString():
try:
file = open("/proc/stb/info/boxtype", "r")
model = file.readline().strip()
file.close()
return model
except IOError:
return "unknown"
def getChipSetString():
try:
f = open('/proc/stb/info/chipset', 'r')
chipset = f.read()
f.close()
return str(chipset.lower().replace('\n','').replace('bcm','').replace('sti',''))
except IOError:
return "unavailable"
def getCPUSpeedString():
try:
mhz = "unknown"
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("cpu MHz"):
mhz = float(splitted[1].split(' ')[0])
if mhz and mhz >= 1000:
mhz = "%s GHz" % str(round(mhz/1000,1))
else:
mhz = "%s MHz" % str(round(mhz,1))
file.close()
return mhz
except IOError:
return "unavailable"
def getCPUString():
try:
system="unknown"
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("system type"):
system = splitted[1].split(' ')[0]
elif splitted[0].startswith("Processor"):
system = splitted[1].split(' ')[0]
file.close()
return system
except IOError:
return "unavailable"
def getCpuCoresString():
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("processor"):
if int(splitted[1]) > 0:
cores = 2
else:
cores = 1
file.close()
return cores
except IOError:
return "unavailable"
def getPythonVersionString():
v = sys.version_info
return "%s.%s.%s" % (v[0], v[1], v[2])
about = sys.modules[__name__]
|
import logging
log = logging.getLogger("Thug")
def SetAction(self, val):
self.__dict__['Action'] = val
if len(val) > 512:
log.ThugLogging.log_exploit_event(self._window.url,
"Myspace UPloader ActiveX",
"Overflow in Action property")
log.DFT.check_shellcode(val)
|
__author__ = 'Markus Weber'
import Communicator
import Sensor
class SensorListe(list, object):
def __init__(self, iterableInput=None):
if not iterableInput:
iterableInput = []
for element in iterableInput:
self.append(element)
if iterableInput.count(element) > 1: # entferne doppelten input
iterableInput.remove(element)
iterableInput.sort()
super(SensorListe,self).__init__(iterableInput) #CHECK ICH NEEED
def __contains__(self, item):
"""Gibt True zurueck falls name oder Id der uebereinstimmt"""
try:
if item.getID() in self.getAllIDs():
return True
except:
pass
try:
if item.getName() in self.getAllNames():
return True
except:
pass
if item in self.getAllIDs():
return True
elif item in self.getAllNames():
return True
elif super(SensorListe,self).__contains__(item):
return True
return False
def __str__(self):
"""Gibt einen String des Inhaltes zurueck"""
string = ""
for sensor in self:
string += str(sensor) + "\n"
return string
def append(self, p_object):
"""fuegt Sensor hinzu, falls dessen name noch nicht existiert, ansonsten wird der existierende ueberschrieben"""
# Test ob das Eingangsobjekt von der Klasse Sensor ist
if not p_object.__class__.__name__ == Sensor.Sensor.__name__:
print ("Typen in SensorListe.append passen nicht zusammen: ")
print (Sensor.Sensor.__name__)
print (p_object.__class__.__name__)
raise TypeError
# Falls SensorID bereits vorhanden ist: nicht aufnehmen
if p_object.getID() in self:
oldSensor = self.getSensor(identifier=p_object.getID())
self[self.index(oldSensor)]=p_object
else:
super(SensorListe,self).append(p_object)
def getSensorName(self, identifier = None, sensor = None, name = None):
"""gibt den Namen zu einer SensorId zurueck"""
# Erst absuchen nach normalen Werten
if identifier is not None:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor.getName()
elif sensor is not None:
for listedSensor in self:
if listedSensor is sensor:
return sensor.getName()
elif name is not None:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor.getName()
# Dann absuchen ob vllt nach None gesucht wird
try:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor.getName()
except:
pass
try:
for listedSensor in self:
if listedSensor is sensor:
return listedSensor.getName()
except:
pass
try:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor.getName()
except:
pass
raise KeyError()
def getSensorID(self, identifier = None, sensor = None, name = None):
"""gibt die ID zu einer SensorId zurueck"""
# Erst absuchen nach normalen Werten
if identifier is not None:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor.getID()
elif sensor is not None:
for listedSensor in self:
if listedSensor is sensor:
return sensor.getID()
elif name is not None:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor.getID()
# Dann absuchen ob vllt nach None gesucht wird
try:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor.getID()
except:
pass
try:
for listedSensor in self:
if listedSensor is sensor:
return listedSensor.getID()
except:
pass
try:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor.getID()
except:
pass
raise KeyError()
def getSensor(self, identifier=None, sensor=None, name=None):
"""gibt die ID zu einer SensorId zurueck"""
assert identifier is None or sensor is None or name is None, "Only 1 identification argument allowed"
# Erst absuchen nach normalen Werten
if identifier is not None:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor
elif sensor is not None:
for listedSensor in self:
if listedSensor is sensor:
return sensor
elif name is not None:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor
raise KeyError("Sensor id:{id} name:{name} obj:{obj} not found in sensorlist".format(id=identifier, name=name, obj=sensor))
def getAllIDs(self):
"""gibt alle SensorIDs als Liste zurueck"""
IDList = []
for sensor in self:
IDList.append(sensor.getID())
return IDList
def getAllNames(self):
"""gibt alle SensorNamen als Liste zurueck"""
NameList = []
for sensor in self:
NameList.append(sensor.getName())
return NameList
def getAllSensors(self):
sensorList = []
for sensor in self:
sensorList.append(sensor)
return sensorList
def getAllTemperatures(self):
"""Gibt alle Temperaturen als Liste mit absteigenden werten zurueck"""
# self.refreshAllSensorTemperatures()
TemperaturList = []
for sensor in [sensor for sensor in self if sensor.getTemperatur() is not None] :
try:
TemperaturList.append(float(sensor.getTemperatur()))
except ValueError:
pass
except TypeError as e:
print (str(e), 'at: {sensorwert}'.format(sensorwert=sensor.getTemperatur()))
TemperaturList.sort(reverse=True)
return TemperaturList
def refreshAllSensorTemperatures(self):
"""Diese Funktion muss aufgerufen werden, damit die Sensoren ihre Temperaturen aktualisieren"""
for sensor in self:
sensor.refreshTemperatur()
def html(self):
"""Gibt die Sensor als HTML string zurueck"""
html = ''
for sensor in sorted(self):
html += ('<tr id="{sensorname}">{sensor}</tr>\n'.format(sensorname=sensor.getName() , sensor=sensor.html()))
return html
def main():
pass
if __name__ == "__main__":
main()
|
import fnmatch
import glob
import optparse
import os
import platform
import re
import subprocess
import sys
import time
from optparse import OptionGroup
from optparse import OptionGroup
from optparse import OptionParser
from sys import stderr
from sys import stdout
class AbortError( Exception ):
def __init__( self, format, *args ):
self.value = format % args
def __str__( self ):
return self.value
class Configure( object ):
OUT_QUIET = 0
OUT_INFO = 1
OUT_VERBOSE = 2
def __init__( self, verbose ):
self._log_info = []
self._log_verbose = []
self._record = False
self.verbose = verbose
self.dir = os.path.dirname( sys.argv[0] )
self.cwd = os.getcwd()
self.build_dir = '.'
## compute src dir which is 2 dirs up from this script
self.src_dir = os.path.normpath( sys.argv[0] )
for i in range( 2 ):
self.src_dir = os.path.dirname( self.src_dir )
if len( self.src_dir ) == 0:
self.src_dir = os.curdir
def _final_dir( self, chdir, dir ):
dir = os.path.normpath( dir )
if not os.path.isabs( dir ):
if os.path.isabs( chdir ):
dir = os.path.normpath( os.path.abspath(dir ))
else:
dir = os.path.normpath( self.relpath( dir, chdir ))
return dir
## output functions
def errln( self, format, *args ):
s = (format % args)
if re.match( '^.*[!?:;.]$', s ):
stderr.write( 'ERROR: %s configure stop.\n' % (s) )
else:
stderr.write( 'ERROR: %s; configure stop.\n' % (s) )
self.record_log()
sys.exit( 1 )
def infof( self, format, *args ):
line = format % args
self._log_verbose.append( line )
if cfg.verbose >= Configure.OUT_INFO:
self._log_info.append( line )
stdout.write( line )
def verbosef( self, format, *args ):
line = format % args
self._log_verbose.append( line )
if cfg.verbose >= Configure.OUT_VERBOSE:
stdout.write( line )
## doc is ready to be populated
def doc_ready( self ):
## compute final paths as they are after chdir into build
self.build_final = os.curdir
self.src_final = self._final_dir( self.build_dir, self.src_dir )
self.prefix_final = self._final_dir( self.build_dir, self.prefix_dir )
cfg.infof( 'compute: makevar SRC/ = %s\n', self.src_final )
cfg.infof( 'compute: makevar BUILD/ = %s\n', self.build_final )
cfg.infof( 'compute: makevar PREFIX/ = %s\n', self.prefix_final )
## perform chdir and enable log recording
def chdir( self ):
if os.path.abspath( self.build_dir ) == os.path.abspath( self.src_dir ):
cfg.errln( 'build (scratch) directory must not be the same as top-level source root!' )
if self.build_dir != os.curdir:
if os.path.exists( self.build_dir ):
if not options.force:
self.errln( 'build directory already exists: %s (use --force to overwrite)', self.build_dir )
else:
self.mkdirs( self.build_dir )
self.infof( 'chdir: %s\n', self.build_dir )
os.chdir( self.build_dir )
## enable logging
self._record = True
def mkdirs( self, dir ):
if len(dir) and not os.path.exists( dir ):
self.infof( 'mkdir: %s\n', dir )
os.makedirs( dir )
def open( self, *args ):
dir = os.path.dirname( args[0] )
if len(args) > 1 and args[1].find('w') != -1:
self.mkdirs( dir )
m = re.match( '^(.*)\.tmp$', args[0] )
if m:
self.infof( 'write: %s\n', m.group(1) )
else:
self.infof( 'write: %s\n', args[0] )
try:
return open( *args )
except Exception, x:
cfg.errln( 'open failure: %s', x )
def record_log( self ):
if not self._record:
return
self._record = False
self.verbose = Configure.OUT_QUIET
file = cfg.open( 'log/config.info.txt', 'w' )
for line in self._log_info:
file.write( line )
file.close()
file = cfg.open( 'log/config.verbose.txt', 'w' )
for line in self._log_verbose:
file.write( line )
file.close()
## Find executable by searching path.
## On success, returns full pathname of executable.
## On fail, returns None.
def findExecutable( self, name ):
if len( os.path.split(name)[0] ):
if os.access( name, os.X_OK ):
return name
return None
if not os.environ.has_key( 'PATH' ) or os.environ[ 'PATH' ] == '':
path = os.defpath
else:
path = os.environ['PATH']
for dir in path.split( os.pathsep ):
f = os.path.join( dir, name )
if os.access( f, os.X_OK ):
return f
return None
## taken from python2.6 -- we need it
def relpath( self, path, start=os.curdir ):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.curdir
return os.path.join(*rel_list)
## update with parsed cli options
def update_cli( self, options ):
self.src_dir = os.path.normpath( options.src )
self.build_dir = os.path.normpath( options.build )
self.prefix_dir = os.path.normpath( options.prefix )
if options.sysroot != None:
self.sysroot_dir = os.path.normpath( options.sysroot )
else:
self.sysroot_dir = ""
if options.minver != None:
self.minver = options.minver
else:
self.minver = ""
## special case if src == build: add build subdir
if os.path.abspath( self.src_dir ) == os.path.abspath( self.build_dir ):
self.build_dir = os.path.join( self.build_dir, 'build' )
class Action( object ):
actions = []
def __init__( self, category, pretext='unknown', abort=False, head=False ):
if self not in Action.actions:
Action.actions.append( self )
self.category = category
self.pretext = pretext
self.abort = abort
self.head = head
self.session = None
self.run_done = False
self.fail = True
self.msg_fail = 'fail'
self.msg_pass = 'pass'
self.msg_end = 'end'
def _actionBegin( self ):
cfg.infof( '%s: %s...', self.category, self.pretext )
def _actionEnd( self ):
if self.fail:
cfg.infof( '(%s) %s\n', self.msg_fail, self.msg_end )
if self.abort:
self._dumpSession( cfg.infof )
cfg.errln( 'unable to continue' )
self._dumpSession( cfg.verbosef )
else:
cfg.infof( '(%s) %s\n', self.msg_pass, self.msg_end )
self._dumpSession( cfg.verbosef )
def _dumpSession( self, printf ):
if self.session and len(self.session):
for line in self.session:
printf( ' : %s\n', line )
else:
printf( ' : <NO-OUTPUT>\n' )
def _parseSession( self ):
pass
def run( self ):
if self.run_done:
return
self.run_done = True
self._actionBegin()
self._action()
if not self.fail:
self._parseSession()
self._actionEnd()
class ShellProbe( Action ):
def __init__( self, pretext, command, abort=False, head=False ):
super( ShellProbe, self ).__init__( 'probe', pretext, abort, head )
self.command = command
def _action( self ):
## pipe and redirect stderr to stdout; effects communicate result
pipe = subprocess.Popen( self.command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
## read data into memory buffers, only first element (stdout) data is used
data = pipe.communicate()
self.fail = pipe.returncode != 0
if data[0]:
self.session = data[0].splitlines()
else:
self.session = []
if pipe.returncode:
self.msg_end = 'code %d' % (pipe.returncode)
def _dumpSession( self, printf ):
printf( ' + %s\n', self.command )
super( ShellProbe, self )._dumpSession( printf )
class CCProbe( Action ):
def __init__( self, pretext, command, test_file ):
super( CCProbe, self ).__init__( 'probe', pretext )
self.command = command
self.test_file = test_file
def _action( self ):
## write program file
file = open( 'conftest.c', 'w' )
file.write( self.test_file )
file.close()
## pipe and redirect stderr to stdout; effects communicate result
pipe = subprocess.Popen( '%s -c -o conftest.o conftest.c' % self.command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
## read data into memory buffers, only first element (stdout) data is used
data = pipe.communicate()
self.fail = pipe.returncode != 0
if data[0]:
self.session = data[0].splitlines()
else:
self.session = []
if pipe.returncode:
self.msg_end = 'code %d' % (pipe.returncode)
os.remove( 'conftest.c' )
if not self.fail:
os.remove( 'conftest.o' )
def _dumpSession( self, printf ):
printf( ' + %s\n', self.command )
super( CCProbe, self )._dumpSession( printf )
class LDProbe( Action ):
def __init__( self, pretext, command, lib, test_file ):
super( LDProbe, self ).__init__( 'probe', pretext )
self.command = command
self.test_file = test_file
self.lib = lib
def _action( self ):
## write program file
file = open( 'conftest.c', 'w' )
file.write( self.test_file )
file.close()
## pipe and redirect stderr to stdout; effects communicate result
pipe = subprocess.Popen( '%s -o conftest conftest.c %s' % (self.command, self.lib), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
## read data into memory buffers, only first element (stdout) data is used
data = pipe.communicate()
self.fail = pipe.returncode != 0
if data[0]:
self.session = data[0].splitlines()
else:
self.session = []
if pipe.returncode:
self.msg_end = 'code %d' % (pipe.returncode)
os.remove( 'conftest.c' )
if not self.fail:
os.remove( 'conftest' )
def _dumpSession( self, printf ):
printf( ' + %s\n', self.command )
super( LDProbe, self )._dumpSession( printf )
class HostTupleProbe( ShellProbe, list ):
GNU_TUPLE_RE = '([^-]+)-?([^-]*)-([^0-9-]+)([^-]*)-?([^-]*)'
def __init__( self ):
super( HostTupleProbe, self ).__init__( 'host tuple', '%s/config.guess' % (cfg.dir), abort=True, head=True )
def _parseSession( self ):
if len(self.session):
self.spec = self.session[0]
else:
self.spec = ''
## grok GNU host tuples
m = re.match( HostTupleProbe.GNU_TUPLE_RE, self.spec )
if not m:
self.fail = True
self.msg_end = 'invalid host tuple: %s' % (self.spec)
return
self.msg_end = self.spec
## assign tuple from regex
self[:] = m.groups()
## for clarity
self.machine = self[0]
self.vendor = self[1]
self.system = self[2]
self.release = self[3]
self.extra = self[4]
## nice formal name for 'system'
self.systemf = platform.system()
if self.match( '*-*-cygwin*' ):
self.systemf = self[2][0].upper() + self[2][1:]
## glob-match against spec
def match( self, *specs ):
for spec in specs:
if fnmatch.fnmatch( self.spec, spec ):
return True
return False
class BuildAction( Action, list ):
def __init__( self ):
super( BuildAction, self ).__init__( 'compute', 'build tuple', abort=True )
def _action( self ):
## check if --cross spec was used; must maintain 5-tuple compatibility with regex
if options.cross:
self.spec = os.path.basename( options.cross ).rstrip( '-' )
else:
self.spec = arch.mode[arch.mode.mode]
## grok GNU host tuples
m = re.match( HostTupleProbe.GNU_TUPLE_RE, self.spec )
if not m:
self.msg_end = 'invalid host tuple: %s' % (self.spec)
return
self.msg_end = self.spec
## assign tuple from regex
self[:] = m.groups()
## for clarity
self.machine = self[0]
self.vendor = self[1]
self.system = self[2]
self.release = self[3]
self.extra = self[4]
self.systemf = host.systemf
## when cross we need switch for platforms
if options.cross:
if self.match( '*mingw*' ):
self.systemf = 'MinGW'
elif self.systemf:
self.systemf[0] = self.systemf[0].upper()
self.title = '%s %s' % (build.systemf,self.machine)
else:
self.title = '%s %s' % (build.systemf,arch.mode.mode)
self.fail = False
## glob-match against spec
def match( self, *specs ):
for spec in specs:
if fnmatch.fnmatch( self.spec, spec ):
return True
return False
class IfHost( object ):
def __init__( self, value, *specs, **kwargs ):
self.value = kwargs.get('none',None)
for spec in specs:
if host.match( spec ):
self.value = value
break
def __nonzero__( self ):
return self.value != None
def __str__( self ):
return self.value
class ForHost( object ):
def __init__( self, default, *tuples ):
self.value = default
for tuple in tuples:
if host.match( tuple[1] ):
self.value = tuple[0]
break
def __str__( self ):
return self.value
class ArchAction( Action ):
def __init__( self ):
super( ArchAction, self ).__init__( 'compute', 'available architectures', abort=True )
self.mode = SelectMode( 'architecture', (host.machine,host.spec) )
def _action( self ):
self.fail = False
## some match on system should be made here; otherwise we signal a warning.
if host.match( '*-*-cygwin*' ):
pass
elif host.match( '*-*-darwin11.*' ):
self.mode['i386'] = 'i386-apple-darwin%s' % (host.release)
self.mode['x86_64'] = 'x86_64-apple-darwin%s' % (host.release)
elif host.match( '*-*-darwin*' ):
self.mode['i386'] = 'i386-apple-darwin%s' % (host.release)
self.mode['x86_64'] = 'x86_64-apple-darwin%s' % (host.release)
self.mode['ppc'] = 'powerpc-apple-darwin%s' % (host.release)
self.mode['ppc64'] = 'powerpc64-apple-darwin%s' % (host.release)
## special cases in that powerpc does not match gcc -arch value
## which we like to use; so it has to be removed.
## note: we don't know if apple will release Ssnow Leopad/ppc64 yet; just a guess.
if 'powerpc' in self.mode:
del self.mode['powerpc']
self.mode.mode = 'ppc'
elif 'powerpc64' in self.mode:
del self.mode['powerpc64']
self.mode.mode = 'ppc64'
elif host.match( '*-*-linux*' ):
pass
elif host.match( '*-*-solaris*' ):
pass
else:
self.msg_pass = 'WARNING'
self.msg_end = self.mode.toString()
## glob-match against spec
def match( self, spec ):
return fnmatch.fnmatch( self.spec, spec )
class CoreProbe( Action ):
def __init__( self ):
super( CoreProbe, self ).__init__( 'probe', 'number of CPU cores' )
self.count = 1
def _action( self ):
if self.fail:
## good for darwin9.6.0 and linux
try:
self.count = os.sysconf( 'SC_NPROCESSORS_ONLN' )
if self.count < 1:
self.count = 1
self.fail = False
except:
pass
if self.fail:
## windows
try:
self.count = int( os.environ['NUMBER_OF_PROCESSORS'] )
if self.count < 1:
self.count = 1
self.fail = False
except:
pass
## clamp
if self.count < 1:
self.count = 1
elif self.count > 32:
self.count = 32
if options.launch:
if options.launch_jobs == 0:
self.jobs = core.count
else:
self.jobs = options.launch_jobs
else:
self.jobs = core.count
self.msg_end = str(self.count)
class SelectMode( dict ):
def __init__( self, descr, *modes, **kwargs ):
super( SelectMode, self ).__init__( modes )
self.descr = descr
self.modes = modes
self.what = kwargs.get('what',' mode')
if modes:
self.default = kwargs.get('default',modes[0][0])
else:
self.default = None
self.mode = self.default
def cli_add_option( self, parser, option ):
parser.add_option( option, default=self.mode, metavar='MODE',
help='select %s%s: %s' % (self.descr,self.what,self.toString()),
action='callback', callback=self.cli_callback, type='str' )
def cli_callback( self, option, opt_str, value, parser, *args, **kwargs ):
if value not in self:
raise optparse.OptionValueError( 'invalid %s%s: %s (choose from: %s)'
% (self.descr,self.what,value,self.toString( True )) )
self.mode = value
def toString( self, nodefault=False ):
keys = self.keys()
keys.sort()
if len(self) == 1:
value = self.mode
elif nodefault:
value = ' '.join( keys )
else:
value = '%s [%s]' % (' '.join( keys ), self.mode )
return value
class RepoProbe( ShellProbe ):
def __init__( self ):
svn = 'svn'
## Possible the repo was created using an incompatible version than what is
## available in PATH when probe runs. Workaround by checking for file
## .svn/HANDBRAKE_REPO_PROBE which points to a preferred svn executable.
try:
hrp = os.path.join( cfg.src_dir, '.svn', 'HANDBRAKE_REPO_PROBE' )
if os.path.isfile( hrp ) and os.path.getsize( hrp ) > 0:
file = cfg.open( hrp, 'r' )
line = file.readline().strip()
file.close()
if line:
svn = line
except:
pass
super( RepoProbe, self ).__init__( 'svn info', '%s info %s' % (svn,cfg.src_dir) )
self.url = 'svn://nowhere.com/project/unknown'
self.root = 'svn://nowhere.com/project'
self.branch = 'unknown'
self.uuid = '00000000-0000-0000-0000-000000000000';
self.rev = 0
self.date = '0000-00-00 00:00:00 -0000'
self.official = 0
self.type = 'unofficial'
def _parseSession( self ):
for line in self.session:
## grok fields
m = re.match( '([^:]+):\\s+(.+)', line )
if not m:
continue
(name,value) = m.groups()
if name == 'URL':
self.url = value
elif name == 'Repository Root':
self.root = value
elif name == 'Repository UUID':
self.uuid = value
elif name == 'Revision':
self.rev = int( value )
elif name == 'Last Changed Date':
# strip chars in parens
if value.find( ' (' ):
self.date = value[0:value.find(' (')]
else:
self.date = value
## grok branch
i = self.url.rfind( '/' )
if i != -1 and i < len(self.url)-1:
self.branch = self.url[i+1:]
# type-classification via repository UUID
if self.uuid == 'b64f7644-9d1e-0410-96f1-a4d463321fa5':
self.official = 1
m = re.match( '([^:]+)://([^/]+)/(.+)', self.url )
if m and re.match( '.*tags/.*', m.group( 3 )):
self.type = 'release'
else:
self.type = 'developer'
self.msg_end = self.url
class Project( Action ):
def __init__( self ):
super( Project, self ).__init__( 'compute', 'project data' )
self.name = 'HandBrake'
self.acro_lower = 'hb'
self.acro_upper = 'HB'
self.url_website = 'http://handbrake.fr'
self.url_community = 'http://forum.handbrake.fr'
self.url_irc = 'irc://irc.freenode.net/handbrake'
self.name_lower = self.name.lower()
self.name_upper = self.name.upper()
self.vmajor = 0
self.vminor = 9
self.vpoint = 6
def _action( self ):
## add architecture to URL only for Mac
if fnmatch.fnmatch( build.spec, '*-*-darwin*' ):
url_arch = '.%s' % (arch.mode.mode)
else:
url_arch = ''
if repo.type == 'release':
self.version = '%d.%d.%d' % (self.vmajor,self.vminor,self.vpoint)
url_ctype = ''
url_ntype = 'stable'
self.build = time.strftime('%Y%m%d') + '00'
self.title = '%s %s (%s)' % (self.name,self.version,self.build)
elif repo.type == 'developer':
self.version = '%dsvn' % (repo.rev)
url_ctype = '_unstable'
url_ntype = 'unstable'
self.build = time.strftime('%Y%m%d') + '01'
self.title = '%s svn%d (%s)' % (self.name,repo.rev,self.build)
else:
self.version = 'rev%d' % (repo.rev)
url_ctype = '_unofficial'
url_ntype = 'unofficial'
self.build = time.strftime('%Y%m%d') + '99'
self.title = '%s rev%d (%s)' % (self.name,repo.rev,self.build)
self.url_appcast = 'http://handbrake.fr/appcast%s%s.xml' % (url_ctype,url_arch)
self.url_appnote = 'http://handbrake.fr/appcast/%s.html' % (url_ntype)
self.msg_end = '%s (%s)' % (self.name,repo.type)
self.fail = False
class ToolProbe( Action ):
tools = []
def __init__( self, var, *names, **kwargs ):
super( ToolProbe, self ).__init__( 'find', abort=kwargs.get('abort',True) )
if not self in ToolProbe.tools:
ToolProbe.tools.append( self )
self.var = var
self.names = []
self.kwargs = kwargs
for name in names:
if name:
self.names.append( str(name) )
self.name = self.names[0]
self.pretext = self.name
self.pathname = self.names[0]
def _action( self ):
self.session = []
for i,name in enumerate(self.names):
self.session.append( 'name[%d] = %s' % (i,name) )
for name in self.names:
f = cfg.findExecutable( name )
if f:
self.pathname = f
self.fail = False
self.msg_end = f
break
if self.fail:
self.msg_end = 'not found'
def cli_add_option( self, parser ):
parser.add_option( '--'+self.name, metavar='PROG',
help='[%s]' % (self.pathname),
action='callback', callback=self.cli_callback, type='str' )
def cli_callback( self, option, opt_str, value, parser, *args, **kwargs ):
self.__init__( self.var, value, **self.kwargs )
self.run()
def doc_add( self, doc ):
doc.add( self.var, self.pathname )
class SelectTool( Action ):
selects = []
def __init__( self, var, name, *pool, **kwargs ):
super( SelectTool, self ).__init__( 'select', abort=kwargs.get('abort',True) )
self.pretext = name
if not self in SelectTool.selects:
SelectTool.selects.append( self )
self.var = var
self.name = name
self.pool = pool
self.kwargs = kwargs
def _action( self ):
self.session = []
for i,(name,tool) in enumerate(self.pool):
self.session.append( 'tool[%d] = %s (%s)' % (i,name,tool.pathname) )
for (name,tool) in self.pool:
if not tool.fail:
self.selected = name
self.fail = False
self.msg_end = '%s (%s)' % (name,tool.pathname)
break
if self.fail:
self.msg_end = 'not found'
def cli_add_option( self, parser ):
parser.add_option( '--'+self.name, metavar='MODE',
help='select %s mode: %s' % (self.name,self.toString()),
action='callback', callback=self.cli_callback, type='str' )
def cli_callback( self, option, opt_str, value, parser, *args, **kwargs ):
found = False
for (name,tool) in self.pool:
if name == value:
found = True
self.__init__( self.var, self.name, [name,tool], **kwargs )
self.run()
break
if not found:
raise optparse.OptionValueError( 'invalid %s mode: %s (choose from: %s)'
% (self.name,value,self.toString( True )) )
def doc_add( self, doc ):
doc.add( self.var, self.selected )
def toString( self, nodefault=False ):
if len(self.pool) == 1:
value = self.pool[0][0]
else:
s = ''
for key,value in self.pool:
s += ' ' + key
if nodefault:
value = s[1:]
else:
value = '%s [%s]' % (s[1:], self.selected )
return value
class ConfigDocument:
def __init__( self ):
self._elements = []
def _outputMake( self, file, namelen, name, value, append ):
if append:
if value == None or len(str(value)) == 0:
file.write( '%-*s +=\n' % (namelen, name) )
else:
file.write( '%-*s += %s\n' % (namelen, name, value) )
else:
if value == None or len(str(value)) == 0:
file.write( '%-*s =\n' % (namelen, name) )
else:
file.write( '%-*s = %s\n' % (namelen, name, value) )
def _outputM4( self, file, namelen, name, value ):
namelen += 7
name = '<<__%s>>,' % name.replace( '.', '_' )
file.write( 'define(%-*s <<%s>>)dnl\n' % (namelen, name, value ))
def add( self, name, value, append=False ):
self._elements.append( [name,value,append] )
def addBlank( self ):
self._elements.append( None )
def addComment( self, format, *args ):
self.addMake( '## ' + format % args )
self.addM4( 'dnl ' + format % args )
def addMake( self, line ):
self._elements.append( ('?make',line) )
def addM4( self, line ):
self._elements.append( ('?m4',line) )
def output( self, file, type ):
namelen = 0
for item in self._elements:
if item == None or item[0].find( '?' ) == 0:
continue
if len(item[0]) > namelen:
namelen = len(item[0])
for item in self._elements:
if item == None:
if type == 'm4':
file.write( 'dnl\n' )
else:
file.write( '\n' )
continue
if item[0].find( '?' ) == 0:
if item[0].find( type, 1 ) == 1:
file.write( '%s\n' % (item[1]) )
continue
if type == 'm4':
self._outputM4( file, namelen, item[0], item[1] )
else:
self._outputMake( file, namelen, item[0], item[1], item[2] )
def update( self, name, value ):
for item in self._elements:
if item == None:
continue
if item[0] == name:
item[1] = value
return
raise ValueError( 'element not found: %s' % (name) )
def write( self, type ):
if type == 'make':
fname = 'GNUmakefile'
elif type == 'm4':
fname = os.path.join( 'project', project.name_lower + '.m4' )
else:
raise ValueError, 'unknown file type: ' + type
ftmp = fname + '.tmp'
try:
try:
file = cfg.open( ftmp, 'w' )
self.output( file, type )
finally:
try:
file.close()
except:
pass
except Exception, x:
try:
os.remove( ftmp )
except Exception, x:
pass
cfg.errln( 'failed writing to %s\n%s', ftmp, x )
try:
os.rename( ftmp, fname )
except Exception, x:
cfg.errln( 'failed writing to %s\n%s', fname, x )
class Option( optparse.Option ):
conf_args = []
def _conf_record( self, opt, value ):
## filter out non-applicable options
if re.match( '^--(force|launch).*$', opt ):
return
## remove duplicates (last duplicate wins)
for i,arg in enumerate( Option.conf_args ):
if opt == arg[0]:
del Option.conf_args[i]
break
if value:
Option.conf_args.append( [opt,'%s=%s' % (opt,value)] )
else:
Option.conf_args.append( [opt,'%s' % (opt)] )
def take_action( self, action, dest, opt, value, values, parser ):
self._conf_record( opt, value )
return optparse.Option.take_action( self, action, dest, opt, value, values, parser )
def createCLI():
cli = OptionParser( 'usage: %prog [OPTIONS...] [TARGETS...]' )
cli.option_class = Option
cli.description = ''
cli.description += 'Configure %s build system.' % (project.name)
## add hidden options
cli.add_option( '--xcode-driver', default='bootstrap', action='store', help=optparse.SUPPRESS_HELP )
cli.add_option( '--force', default=False, action='store_true', help='overwrite existing build config' )
cli.add_option( '--verbose', default=False, action='store_true', help='increase verbosity' )
## add install options
grp = OptionGroup( cli, 'Directory Locations' )
h = IfHost( 'specify sysroot of SDK for Xcode builds', '*-*-darwin*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--sysroot', default=None, action='store', metavar='DIR',
help=h )
grp.add_option( '--src', default=cfg.src_dir, action='store', metavar='DIR',
help='specify top-level source dir [%s]' % (cfg.src_dir) )
grp.add_option( '--build', default=cfg.build_dir, action='store', metavar='DIR',
help='specify build scratch/output dir [%s]' % (cfg.build_dir) )
grp.add_option( '--prefix', default=cfg.prefix_dir, action='store', metavar='DIR',
help='specify install dir for products [%s]' % (cfg.prefix_dir) )
cli.add_option_group( grp )
## add feature options
grp = OptionGroup( cli, 'Feature Options' )
h = IfHost( 'enable assembly code in non-contrib modules', 'NOMATCH*-*-darwin*', 'NOMATCH*-*-linux*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--enable-asm', default=False, action='store_true', help=h )
h = IfHost( 'disable GTK GUI', '*-*-linux*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--disable-gtk', default=False, action='store_true', help=h )
h = IfHost( 'disable GTK GUI update checks', '*-*-linux*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--disable-gtk-update-checks', default=False, action='store_true', help=h )
h = IfHost( 'enable GTK GUI (mingw)', '*-*-mingw*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--enable-gtk-mingw', default=False, action='store_true', help=h )
h = IfHost( 'disable gstreamer (live preview)', '*-*-linux*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--disable-gst', default=False, action='store_true', help=h )
h = IfHost( 'enable use of ffmpeg mpeg2 decoding', '*-*-*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--enable-ff-mpeg2', default=False, action='store_true', help=h )
cli.add_option_group( grp )
## add launch options
grp = OptionGroup( cli, 'Launch Options' )
grp.add_option( '--launch', default=False, action='store_true',
help='launch build, capture log and wait for completion' )
grp.add_option( '--launch-jobs', default=1, action='store', metavar='N', type='int',
help='allow N jobs at once; 0 to match CPU count [1]' )
grp.add_option( '--launch-args', default=None, action='store', metavar='ARGS',
help='specify additional ARGS for launch command' )
grp.add_option( '--launch-quiet', default=False, action='store_true',
help='do not echo build output while waiting' )
cli.add_option_group( grp )
## add compile options
grp = OptionGroup( cli, 'Compiler Options' )
debugMode.cli_add_option( grp, '--debug' )
optimizeMode.cli_add_option( grp, '--optimize' )
arch.mode.cli_add_option( grp, '--arch' )
grp.add_option( '--cross', default=None, action='store', metavar='SPEC',
help='specify GCC cross-compilation spec' )
h = IfHost( 'specify Mac OS X deployment target for Xcode builds', '*-*-darwin*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--minver', default=None, action='store', metavar='VER',
help=h )
h = IfHost( 'Build and use local yasm', '*-*-*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--enable-local-yasm', default=False, action='store_true', help=h )
h = IfHost( 'Build and use local autotools', '*-*-*', none=optparse.SUPPRESS_HELP ).value
grp.add_option( '--enable-local-autotools', default=False, action='store_true', help=h )
cli.add_option_group( grp )
## add Xcode options
if host.match( '*-*-darwin*' ):
grp = OptionGroup( cli, 'Xcode Options' )
grp.add_option( '--disable-xcode', default=False, action='store_true',
help='disable Xcode' )
grp.add_option( '--xcode-symroot', default='xroot', action='store', metavar='DIR',
help='specify root of the directory hierarchy that contains product files and intermediate build files' )
xcconfigMode.cli_add_option( grp, '--xcode-config' )
cli.add_option_group( grp )
## add tool locations
grp = OptionGroup( cli, 'Tool Basenames and Locations' )
for tool in ToolProbe.tools:
tool.cli_add_option( grp )
cli.add_option_group( grp )
## add tool modes
grp = OptionGroup( cli, 'Tool Options' )
for select in SelectTool.selects:
select.cli_add_option( grp )
cli.add_option_group( grp )
return cli
class Launcher:
def __init__( self, targets ):
# open build logfile
self._file = cfg.open( 'log/build.txt', 'w' )
cmd = '%s -j%d' % (Tools.gmake.pathname,core.jobs)
if options.launch_args:
cmd += ' ' + options.launch_args
if len(targets):
cmd += ' ' + ' '.join(targets)
## record begin
timeBegin = time.time()
self.infof( 'time begin: %s\n', time.asctime() )
self.infof( 'launch: %s\n', cmd )
if options.launch_quiet:
stdout.write( 'building to %s ...\n' % (os.path.abspath( cfg.build_final )))
else:
stdout.write( '%s\n' % ('-' * 79) )
## launch/pipe
try:
pipe = subprocess.Popen( cmd, shell=True, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
except Exception, x:
cfg.errln( 'launch failure: %s', x )
for line in pipe.stdout:
self.echof( '%s', line )
pipe.wait()
## record end
timeEnd = time.time()
elapsed = timeEnd - timeBegin
if pipe.returncode:
result = 'FAILURE (code %d)' % pipe.returncode
else:
result = 'SUCCESS'
## present duration in decent format
seconds = elapsed
hours = int(seconds / 3600)
seconds -= hours * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
segs = []
duration = ''
if hours == 1:
segs.append( '%d hour' % hours )
elif hours > 1:
segs.append( '%d hours' % hours )
if len(segs) or minutes == 1:
segs.append( '%d minute' % minutes )
elif len(segs) or minutes > 1:
segs.append( '%d minutes' % minutes )
if seconds == 1:
segs.append( '%d second' % seconds )
else:
segs.append( '%d seconds' % seconds )
if not options.launch_quiet:
stdout.write( '%s\n' % ('-' * 79) )
self.infof( 'time end: %s\n', time.asctime() )
self.infof( 'duration: %s (%.2fs)\n', ', '.join(segs), elapsed )
self.infof( 'result: %s\n', result )
## cleanup
self._file.close()
def echof( self, format, *args ):
line = format % args
self._file.write( line )
if not options.launch_quiet:
stdout.write( ' : %s' % line )
stdout.flush()
def infof( self, format, *args ):
line = format % args
self._file.write( line )
cfg.infof( '%s', line )
try:
## we need to pre-check argv for -h or --help or --verbose to deal with
## initializing Configure correctly.
verbose = Configure.OUT_INFO
for arg in sys.argv:
if arg == '-h' or arg == '--help':
verbose = Configure.OUT_QUIET
break
if arg == '--verbose':
verbose = Configure.OUT_VERBOSE
## create main objects; actions/probes run() is delayed.
## if any actions must be run earlier (eg: for configure --help purposes)
## then run() must be invoked earlier. subequent run() invocations
## are ignored.
cfg = Configure( verbose )
host = HostTupleProbe(); host.run()
cfg.prefix_dir = ForHost( '/usr/local', ['/Applications','*-*-darwin*'] ).value
build = BuildAction()
arch = ArchAction(); arch.run()
## create remaining main objects
core = CoreProbe()
repo = RepoProbe()
project = Project()
## create tools in a scope
class Tools:
ar = ToolProbe( 'AR.exe', 'ar' )
cp = ToolProbe( 'CP.exe', 'cp' )
curl = ToolProbe( 'CURL.exe', 'curl', abort=False )
gcc = ToolProbe( 'GCC.gcc', 'gcc', IfHost( 'gcc-4', '*-*-cygwin*' ))
if host.match( '*-*-darwin*' ):
gmake = ToolProbe( 'GMAKE.exe', 'make', 'gmake' )
else:
gmake = ToolProbe( 'GMAKE.exe', 'gmake', 'make' )
m4 = ToolProbe( 'M4.exe', 'm4' )
mkdir = ToolProbe( 'MKDIR.exe', 'mkdir' )
patch = ToolProbe( 'PATCH.exe', 'gpatch', 'patch' )
rm = ToolProbe( 'RM.exe', 'rm' )
ranlib = ToolProbe( 'RANLIB.exe', 'ranlib' )
strip = ToolProbe( 'STRIP.exe', 'strip' )
tar = ToolProbe( 'TAR.exe', 'gtar', 'tar' )
wget = ToolProbe( 'WGET.exe', 'wget', abort=False )
yasm = ToolProbe( 'YASM.exe', 'yasm', abort=False )
autoconf = ToolProbe( 'AUTOCONF.exe', 'autoconf', abort=False )
automake = ToolProbe( 'AUTOMAKE.exe', 'automake', abort=False )
libtool = ToolProbe( 'LIBTOOL.exe', 'libtool', abort=False )
xcodebuild = ToolProbe( 'XCODEBUILD.exe', 'xcodebuild', abort=False )
lipo = ToolProbe( 'LIPO.exe', 'lipo', abort=False )
fetch = SelectTool( 'FETCH.select', 'fetch', ['wget',wget], ['curl',curl] )
## run tool probes
for tool in ToolProbe.tools:
tool.run()
for select in SelectTool.selects:
select.run()
debugMode = SelectMode( 'debug', ('none','none'), ('min','min'), ('std','std'), ('max','max') )
optimizeMode = SelectMode( 'optimize', ('none','none'), ('speed','speed'), ('size','size'), default='speed' )
## find xcconfig values
xcconfigMode = SelectMode( 'xcconfig', ('none',None), what='' )
if host.match( '*-*-darwin*' ):
for xc in glob.glob( os.path.join(cfg.dir, '../macosx/xcconfig/*.xcconfig') ):
bname = os.path.basename( xc )
xname = os.path.splitext( bname )
if xname and xname[0]:
xcconfigMode[xname[0]] = bname
if not 'native' in xcconfigMode:
raise Exception( 'native xcconfig not found' )
xcconfigMode.default = 'native'
xcconfigMode.mode = xcconfigMode.default
## create CLI and parse
cli = createCLI()
(options,args) = cli.parse_args()
## update cfg with cli directory locations
cfg.update_cli( options )
## prepare list of targets and NAME=VALUE args to pass to make
targets = []
exports = []
rx_exports = re.compile( '([^=]+)=(.*)' )
for arg in args:
m = rx_exports.match( arg )
if m:
exports.append( m.groups() )
else:
targets.append( arg )
## re-run tools with cross-compilation needs
if options.cross:
for tool in ( Tools.ar, Tools.gcc, Tools.ranlib, Tools.strip ):
tool.__init__( tool.var, '%s-%s' % (options.cross,tool.name), **tool.kwargs )
tool.run()
## run delayed actions
for action in Action.actions:
action.run()
## enable local yasm when yasm probe fails
if not options.enable_local_yasm and Tools.yasm.fail:
options.enable_local_yasm = True
## enable local autotools when any of { autoconf, automake, libtool } probe fails
if not options.enable_local_autotools and (Tools.autoconf.fail or Tools.automake.fail or Tools.libtool.fail):
options.enable_local_autotools = True
if build.system == 'mingw':
dlfcn_test = """
void fnord() { int i=42;}
int main ()
{
void *self = dlopen (0, RTLD_GLOBAL|RTLD_NOW);
int status = 1;
if (self)
{
if (dlsym (self,"fnord")) status = 0;
else if (dlsym( self,"_fnord")) status = 0;
/* dlclose (self); */
}
else
puts (dlerror ());
return status;
}
"""
dlfcn = LDProbe( 'static dlfcn', '%s -static' % Tools.gcc.pathname, '-ldl', dlfcn_test )
dlfcn.run()
pthread_test = """
int main ()
{
pthread_t thread;
pthread_create (&thread, NULL, NULL, NULL);
return 0;
}
"""
pthread = LDProbe( 'static pthread', '%s -static' % Tools.gcc.pathname, '-lpthreadGC2', pthread_test )
pthread.run()
bz2_test = """
int main ()
{
BZ2_bzReadOpen(NULL, NULL, 0, 0, NULL, 0);
return 0;
}
"""
bz2 = LDProbe( 'static bz2', '%s -static' % Tools.gcc.pathname, '-lbz2', bz2_test )
bz2.run()
libz_test = """
int main ()
{
compress(NULL, NULL, NULL, 0);
return 0;
}
"""
libz = LDProbe( 'static zlib', '%s -static' % Tools.gcc.pathname, '-lz', libz_test )
libz.run()
iconv_test = """
int main ()
{
iconv_open(NULL, NULL);
return 0;
}
"""
iconv = LDProbe( 'static iconv', '%s -static' % Tools.gcc.pathname, '-liconv', iconv_test )
iconv.run()
## cfg hook before doc prep
cfg.doc_ready()
## create document object
doc = ConfigDocument()
doc.addComment( 'generated by configure on %s', time.strftime( '%c' ))
## add configure line for reconfigure purposes
doc.addBlank()
args = []
for arg in Option.conf_args:
args.append( arg[1] )
doc.add( 'CONF.args', ' '.join( args ))
doc.addBlank()
doc.add( 'HB.title', project.title )
doc.add( 'HB.name', project.name )
doc.add( 'HB.name.lower', project.name_lower )
doc.add( 'HB.name.upper', project.name_upper )
doc.add( 'HB.acro.lower', project.acro_lower )
doc.add( 'HB.acro.upper', project.acro_upper )
doc.add( 'HB.url.website', project.url_website )
doc.add( 'HB.url.community', project.url_community )
doc.add( 'HB.url.irc', project.url_irc )
doc.add( 'HB.url.appcast', project.url_appcast )
doc.add( 'HB.url.appnote', project.url_appnote )
doc.add( 'HB.version.major', project.vmajor )
doc.add( 'HB.version.minor', project.vminor )
doc.add( 'HB.version.point', project.vpoint )
doc.add( 'HB.version', project.version )
doc.add( 'HB.version.hex', '%04x%02x%02x%08x' % (project.vmajor,project.vminor,project.vpoint,repo.rev) )
doc.add( 'HB.build', project.build )
doc.add( 'HB.repo.url', repo.url )
doc.add( 'HB.repo.root', repo.root )
doc.add( 'HB.repo.branch', repo.branch )
doc.add( 'HB.repo.uuid', repo.uuid )
doc.add( 'HB.repo.rev', repo.rev )
doc.add( 'HB.repo.date', repo.date )
doc.add( 'HB.repo.official', repo.official )
doc.add( 'HB.repo.type', repo.type )
doc.addBlank()
doc.add( 'HOST.spec', host.spec )
doc.add( 'HOST.machine', host.machine )
doc.add( 'HOST.vendor', host.vendor )
doc.add( 'HOST.system', host.system )
doc.add( 'HOST.systemf', host.systemf )
doc.add( 'HOST.release', host.release )
doc.add( 'HOST.extra', host.extra )
doc.add( 'HOST.title', '%s %s' % (host.systemf,arch.mode.default) )
doc.add( 'HOST.ncpu', core.count )
doc.addBlank()
doc.add( 'BUILD.spec', build.spec )
doc.add( 'BUILD.machine', build.machine )
doc.add( 'BUILD.vendor', build.vendor )
doc.add( 'BUILD.system', build.system )
doc.add( 'BUILD.systemf', build.systemf )
doc.add( 'BUILD.release', build.release )
doc.add( 'BUILD.extra', build.extra )
doc.add( 'BUILD.title', build.title )
doc.add( 'BUILD.ncpu', core.count )
doc.add( 'BUILD.jobs', core.jobs )
doc.add( 'BUILD.cross', int(options.cross != None or arch.mode.mode != arch.mode.default) )
if options.cross:
doc.add( 'BUILD.cross.prefix', '%s-' % (options.cross) )
else:
doc.add( 'BUILD.cross.prefix', '' )
doc.add( 'BUILD.date', time.strftime('%c') )
doc.add( 'BUILD.arch', arch.mode.mode )
doc.addBlank()
doc.add( 'SRC', cfg.src_final )
doc.add( 'SRC/', cfg.src_final + os.sep )
doc.add( 'BUILD', cfg.build_final )
doc.add( 'BUILD/', cfg.build_final + os.sep )
doc.add( 'PREFIX', cfg.prefix_final )
doc.add( 'PREFIX/', cfg.prefix_final + os.sep )
doc.addBlank()
doc.add( 'FEATURE.local_yasm', int( options.enable_local_yasm ))
doc.add( 'FEATURE.local_autotools', int( options.enable_local_autotools ))
doc.add( 'FEATURE.asm', 'disabled' )
doc.add( 'FEATURE.gtk', int( not options.disable_gtk ))
doc.add( 'FEATURE.gtk.update.checks', int( not options.disable_gtk_update_checks ))
doc.add( 'FEATURE.gtk.mingw', int( options.enable_gtk_mingw ))
doc.add( 'FEATURE.gst', int( not options.disable_gst ))
doc.add( 'FEATURE.ff.mpeg2', int( options.enable_ff_mpeg2 ))
doc.add( 'FEATURE.xcode', int( not (Tools.xcodebuild.fail or options.disable_xcode or options.cross) ))
if not Tools.xcodebuild.fail and not options.disable_xcode:
doc.addBlank()
doc.add( 'XCODE.driver', options.xcode_driver )
if os.path.isabs(options.xcode_symroot):
doc.add( 'XCODE.symroot', options.xcode_symroot )
else:
doc.add( 'XCODE.symroot', os.path.abspath(os.path.join(cfg.build_dir,options.xcode_symroot)) )
doc.add( 'XCODE.xcconfig', xcconfigMode[xcconfigMode.mode] )
if build.system == 'mingw':
doc.addBlank()
if not dlfcn.fail:
doc.add( 'HAS.dlfcn', 1 )
if not pthread.fail:
doc.add( 'HAS.pthread', 1 )
if not bz2.fail:
doc.add( 'HAS.bz2', 1 )
if not libz.fail:
doc.add( 'HAS.libz', 1 )
if not iconv.fail:
doc.add( 'HAS.iconv', 1 )
doc.addMake( '' )
doc.addMake( '## define debug mode and optimize before other includes' )
doc.addMake( '## since it is tested in some module.defs' )
doc.add( 'GCC.g', debugMode.mode )
doc.add( 'GCC.O', optimizeMode.mode )
doc.addBlank()
doc.addMake( '## include definitions' )
doc.addMake( 'include $(SRC/)make/include/main.defs' )
doc.addBlank()
for tool in ToolProbe.tools:
tool.doc_add( doc )
doc.addBlank()
for select in SelectTool.selects:
select.doc_add( doc )
doc.addBlank()
if build.match( '*-*-darwin*' ):
doc.add( 'GCC.archs', arch.mode.mode )
doc.add( 'GCC.sysroot', cfg.sysroot_dir )
doc.add( 'GCC.minver', cfg.minver )
else:
doc.add( 'GCC.archs', '' )
doc.add( 'GCC.sysroot', '' )
doc.add( 'GCC.minver', '' )
if options.enable_asm and ( not Tools.yasm.fail or options.enable_local_yasm ):
asm = ''
if build.match( 'i?86-*' ):
asm = 'x86'
doc.add( 'LIBHB.GCC.D', 'HAVE_MMX', append=True )
doc.add( 'LIBHB.YASM.D', 'ARCH_X86', append=True )
if build.match( '*-*-darwin*' ):
doc.add( 'LIBHB.YASM.f', 'macho32' )
else:
doc.add( 'LIBHB.YASM.f', 'elf32' )
doc.add( 'LIBHB.YASM.m', 'x86' )
elif build.match( 'x86_64-*' ):
asm = 'x86'
doc.add( 'LIBHB.GCC.D', 'HAVE_MMX ARCH_X86_64', append=True )
if build.match( '*-*-darwin*' ):
doc.add( 'LIBHB.YASM.D', 'ARCH_X86_64 PIC', append=True )
doc.add( 'LIBHB.YASM.f', 'macho64' )
else:
doc.add( 'LIBHB.YASM.D', 'ARCH_X86_64', append=True )
doc.add( 'LIBHB.YASM.f', 'elf64' )
doc.add( 'LIBHB.YASM.m', 'amd64' )
doc.update( 'FEATURE.asm', asm )
## add exports to make
if len(exports):
doc.addBlank()
doc.addComment( 'overrides via VARIABLE=VALUE on command-line' )
for nv in exports:
doc.add( nv[0], nv[1] )
doc.addMake( '' )
doc.addMake( '## include custom definitions' )
doc.addMake( '-include $(SRC/)custom.defs' )
doc.addMake( '-include $(BUILD/)GNUmakefile.custom.defs' )
doc.addMake( '' )
doc.addMake( '## include rules' )
doc.addMake( 'include $(SRC/)make/include/main.rules' )
doc.addMake( '-include $(SRC/)custom.rules' )
doc.addMake( '-include $(BUILD/)GNUmakefile.custom.rules' )
## chdir
cfg.chdir()
## perform
doc.write( 'make' )
doc.write( 'm4' )
if options.launch:
Launcher( targets )
cfg.record_log()
if os.path.normpath( cfg.build_dir ) == os.curdir:
nocd = True
else:
nocd = False
stdout.write( '%s\n' % ('-' * 79) )
if options.launch:
stdout.write( 'Build is finished!\n' )
if nocd:
stdout.write( 'You may now examine the output.\n' )
else:
stdout.write( 'You may now cd into %s and examine the output.\n' % (cfg.build_dir) )
else:
stdout.write( 'Build is configured!\n' )
if nocd:
stdout.write( 'You may now run make (%s).\n' % (Tools.gmake.pathname) )
else:
stdout.write( 'You may now cd into %s and run make (%s).\n' % (cfg.build_dir,Tools.gmake.pathname) )
except AbortError, x:
stderr.write( 'ERROR: %s\n' % (x) )
try:
cfg.record_log()
except:
pass
sys.exit( 1 )
sys.exit( 0 )
|
"""
This module contains the BlockNotebook class.
"""
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from blockstreeview import BlocksTreeView
from harpia.system import System as System
class BlockNotebook(Gtk.Notebook):
"""
This class contains methods related the BlockNotebook class.
"""
def __init__(self, main_window):
"""
This method is the constructor.
Parameters:
* **main_window** (:class:`MainWindow<harpia.GUI.mainwindow>`)
"""
Gtk.Notebook.__init__(self)
self.tabs = []
self.main_window = main_window
self.set_scrollable(True)
self.update()
# ----------------------------------------------------------------------
def update(self):
languages = []
while self.get_n_pages() > 0:
self.remove_page(0)
System()
for x in System.plugins:
instance = System.plugins[x]
name = instance.language
name += "/" + instance.framework
if name in languages:
continue
languages.append(name)
for language in languages:
treeview = BlocksTreeView(self.main_window, language)
self.append_page(treeview, Gtk.Label(language))
self.show_all()
# ----------------------------------------------------------------------
def search(self, query):
"""
This method search for a plugin.
Returns:
* **Types** (:class:`list<list>`)
"""
for blocks_tree_view in self.tabs:
blocks_tree_view.search(query)
# ----------------------------------------------------------------------
def get_selected_block(self):
current_tab = None
if self.get_current_page() > -1:
current_tab = self.get_nth_page(self.get_current_page())
if current_tab is None:
return None
return current_tab.get_selected_block()
|
"""empty message
Revision ID: 425d457e8385
Revises: 4bbc6d803aef
Create Date: 2016-06-02 14:05:46.102059
"""
revision = '425d457e8385'
down_revision = '4bbc6d803aef'
from alembic import op
import sqlalchemy as sa
from datetime import date
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer, Date
def upgrade():
# Create an ad-hoc table to use for the insert statement.
equipment_type = table(
'equipment_type',
column('id', Integer),
column('name', String),
column('code', String)
)
norm_type_table = table(
'norm_type',
column('id', Integer),
column('name', String),
)
norm_table = table(
'norm',
column('id', Integer),
column('norm_type_id', String),
column('name', String),
column('code', String)
)
parameter_table = table(
'norm_parameter',
column('id', Integer),
column('norm_id', Integer),
column('name', String),
)
# parameter_value_table = table(
# 'norm_parameter_value',
# column('id', Integer),
# column('param_id', Integer),
# column('equipment_type_id', Integer),
# column('value_type', String),
# column('value', String)
# )
op.create_table(
'equipment_type',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('code', sa.VARCHAR(length=50), nullable=False),
sa.Column('name', sa.VARCHAR(length=255), nullable=False),
)
op.create_table(
'norm_type',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('name', sa.VARCHAR(length=255), nullable=False),
)
op.create_table(
'norm',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('norm_type_id', sa.VARCHAR(length=50), nullable=False),
sa.Column('code', sa.VARCHAR(length=50), nullable=False),
sa.Column('name', sa.VARCHAR(length=255), nullable=False),
)
op.bulk_insert(
equipment_type, [
{'id': 1, 'name': 'Air circuit breaker', 'code': 'A'},
{'id': 2, 'name': 'Bushing', 'code': 'B'},
{'id': 3, 'name': 'Capacitor', 'code': 'C'},
{'id': 4, 'name': 'Breaker', 'code': 'D'},
{'id': 5, 'name': 'Power Source', 'code': 'E'},
{'id': 6, 'name': 'Cable', 'code': 'G'},
{'id': 7, 'name': 'Switchgear', 'code': 'H'},
{'id': 8, 'name': 'Induction machine', 'code': 'I'},
{'id': 9, 'name': 'Synchronous machine', 'code': 'J'},
{'id': 10, 'name': 'Localization', 'code': 'L'},
{'id': 11, 'name': 'Tap changer', 'code': 'P'},
{'id': 12, 'name': 'Rectifier', 'code': 'R'},
{'id': 13, 'name': 'Site', 'code': 'S'},
{'id': 14, 'name': 'Transformer', 'code': 'T'},
{'id': 15, 'name': 'Tank', 'code': 'Y'},
{'id': 16, 'name': 'Switch', 'code': 'Z'},
])
op.bulk_insert(
norm_type_table, [
{'id': 1, 'name': 'Isolation', 'code': 'Isolation'},
{'id': 2, 'name': 'Physical', 'code': 'Physical'},
{'id': 3, 'name': 'Furann', 'code': 'Furann'},
{'id': 4, 'name': 'Gas', 'code': 'Gas'},
]
)
isolation = [] # key is temperature in celsius
i = 0
for i in range(1, 81):
isolation.append({'id': i, 'name': "%s" % i, 'norm_type_id': 1})
isolation.append({'id': i + 1, 'name': "0", 'norm_type_id': 1})
op.bulk_insert(
norm_table, isolation
)
op.bulk_insert(
norm_table, [
{'id': 82, 'name': 'DEFAULT D', 'norm_type_id': 2},
{'id': 83, 'name': 'DEFAULT P', 'norm_type_id': 2},
{'id': 84, 'name': 'DEFAULT-H T', 'norm_type_id': 2},
{'id': 85, 'name': 'DEFAULT-R T', 'norm_type_id': 2},
{'id': 86, 'name': 'DEFAULT-S T', 'norm_type_id': 2},
{'id': 87, 'name': 'NONE/AUCUN', 'norm_type_id': 2},
{'id': 88, 'name': 'NONE/AUCUN-R', 'norm_type_id': 2},
{'id': 89, 'name': 'NONE/AUCUN-S', 'norm_type_id': 2},
{'id': 90, 'name': 'S.D.MYERS', 'norm_type_id': 2},
# furann norms
{'id': 91, 'name': 'DOBLE', 'norm_type_id': 3},
{'id': 92, 'name': 'NONE/AUCUN', 'norm_type_id': 3},
# gas norms
{'id': 93, 'name': 'C57104', 'norm_type_id': 4},
{'id': 94, 'name': 'C57104-R', 'norm_type_id': 4},
{'id': 95, 'name': 'CNONE/AUCUN', 'norm_type_id': 4},
],
)
op.bulk_insert(
parameter_table, [
{'id': 1, 'name': 'Celcius'},
{'id': 2, 'name': 'Fahrenheit'},
{'id': 3, 'name': 'NotSeal'},
{'id': 4, 'name': 'Seal'},
{'id': 5, 'name': 'name'},
{'id': 6, 'name': 'Acid Min'},
{'id': 7, 'name': 'Acid Max'},
{'id': 8, 'name': 'IFT Min'},
{'id': 9, 'name': 'IFT Max'},
{'id': 10, 'name': 'D1816 Min'},
{'id': 11, 'name': 'D1816 Max'},
{'id': 12, 'name': 'D877 Min'},
{'id': 13, 'name': 'D877 Max'},
{'id': 14, 'name': 'Color Min'},
{'id': 15, 'name': 'Color Max'},
{'id': 16, 'name': 'Density Min'},
{'id': 17, 'name': 'Density Max'},
{'id': 18, 'name': 'PF20 Min'},
{'id': 19, 'name': 'PF20 Max'},
{'id': 20, 'name': 'Water Min'},
{'id': 21, 'name': 'Water Max'},
{'id': 22, 'name': 'FlashPoint Min'},
{'id': 23, 'name': 'FlashPoint Max'},
{'id': 24, 'name': 'PourPoint Min'},
{'id': 25, 'name': 'PourPoint Max'},
{'id': 26, 'name': 'Viscosity Min'},
{'id': 27, 'name': 'Viscosity Max'},
{'id': 28, 'name': 'D1816_2 MIN'},
{'id': 29, 'name': 'D1816_2 MAX'},
{'id': 30, 'name': 'P100 MIN'},
{'id': 31, 'name': 'P100 MAX'},
{'id': 32, 'name': 'FluidType'},
{'id': 33, 'name': 'CEI156 Min'},
{'id': 34, 'name': 'CEI156 Max'},
{'id': 35, 'name': 'name'},
{'id': 36, 'name': 'C1'},
{'id': 37, 'name': 'C2'},
{'id': 38, 'name': 'C3'},
{'id': 39, 'name': 'C4'},
{'id': 40, 'name': 'name'},
{'id': 41, 'name': 'H2'},
{'id': 42, 'name': 'C2H2'},
{'id': 43, 'name': 'C2H4'},
{'id': 44, 'name': 'C2H6'},
{'id': 45, 'name': 'CO'},
{'id': 46, 'name': 'CO2'},
{'id': 47, 'name': 'TDCG'},
{'id': 48, 'name': 'H2'},
{'id': 49, 'name': 'CH4'},
# Level. Any gas above listed level indicate this condition
{'id': 50, 'name': 'Level'},
# FluidType. A different sets of levels exist for different type of insulating fluid
{'id': 51, 'name': 'Fluid Type'},
{'id': 52, 'name': 'name'}
],
)
def downgrade():
op.execute(sql='DROP TABLE norm CASCADE;')
op.execute(sql='DROP TABLE norm_type CASCADE;')
op.execute(sql='DROP TABLE equipment_type CASCADE;')
# op.execute(sql='TRUNCATE TABLE norm_parameter CASCADE;')
|
__all__ = ["cpv_expand"]
from portage.exception import AmbiguousPackageName
from portage.localization import _
from portage.util import writemsg
from portage.versions import _pkgsplit
def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
"""Given a string (packagename or virtual) expand it into a valid
cat/package string. Virtuals use the mydb to determine which provided
virtual is a valid choice and defaults to the first element when there
are no installed/available candidates."""
myslash=mycpv.split("/")
mysplit = _pkgsplit(myslash[-1])
if settings is None:
settings = globals()["settings"]
virts = settings.getvirtuals()
virts_p = settings.get_virts_p()
if len(myslash)>2:
# this is illegal case.
mysplit=[]
mykey=mycpv
elif len(myslash)==2:
if mysplit:
mykey=myslash[0]+"/"+mysplit[0]
else:
mykey=mycpv
if mydb and virts and mykey in virts:
writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
if hasattr(mydb, "cp_list"):
if not mydb.cp_list(mykey, use_cache=use_cache):
writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
mykey_orig = mykey[:]
for vkey in virts[mykey]:
# The virtuals file can contain a versioned atom, so
# it may be necessary to remove the operator and
# version from the atom before it is passed into
# dbapi.cp_list().
if mydb.cp_list(vkey.cp):
mykey = str(vkey)
writemsg(_("virts chosen: %s\n") % (mykey), 1)
break
if mykey == mykey_orig:
mykey = str(virts[mykey][0])
writemsg(_("virts defaulted: %s\n") % (mykey), 1)
#we only perform virtual expansion if we are passed a dbapi
else:
#specific cpv, no category, ie. "foo-1.0"
if mysplit:
myp=mysplit[0]
else:
# "foo" ?
myp=mycpv
mykey=None
matches=[]
if mydb and hasattr(mydb, "categories"):
for x in mydb.categories:
if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
matches.append(x+"/"+myp)
if len(matches) > 1:
virtual_name_collision = False
if len(matches) == 2:
for x in matches:
if not x.startswith("virtual/"):
# Assume that the non-virtual is desired. This helps
# avoid the ValueError for invalid deps that come from
# installed packages (during reverse blocker detection,
# for example).
mykey = x
else:
virtual_name_collision = True
if not virtual_name_collision:
# AmbiguousPackageName inherits from ValueError,
# for backward compatibility with calling code
# that already handles ValueError.
raise AmbiguousPackageName(matches)
elif matches:
mykey=matches[0]
if not mykey and not isinstance(mydb, list):
if myp in virts_p:
mykey=virts_p[myp][0]
#again, we only perform virtual expansion if we have a dbapi (not a list)
if not mykey:
mykey="null/"+myp
if mysplit:
if mysplit[2]=="r0":
return mykey+"-"+mysplit[1]
else:
return mykey+"-"+mysplit[1]+"-"+mysplit[2]
else:
return mykey
|
"""unused import"""
import xml.etree # [unused-import]
import xml.sax # [unused-import]
import os.path as test # [unused-import]
from sys import argv as test2 # [unused-import]
from sys import flags # [unused-import]
from collections import deque, OrderedDict, Counter
DATA = Counter()
from fake import SomeName, SomeOtherName # [unused-import]
class SomeClass(object):
SomeName = SomeName # https://bitbucket.org/logilab/pylint/issue/475
SomeOtherName = 1
SomeOtherName = SomeOtherName
from never import __all__
import typing
from typing import TYPE_CHECKING
if typing.TYPE_CHECKING:
import collections
if TYPE_CHECKING:
import itertools
def get_ordered_dict() -> 'collections.OrderedDict':
return []
def get_itertools_obj() -> 'itertools.count':
return []
|
from math import *
import copy
class Heap(object):
"""最大二叉堆实现"""
def __init__(self, *arg):
super(Heap, self).__init__()
self.__array = list(arg)
self.length = len(arg)
self.height = int(log(self.length))
self.__build_heap()
@classmethod
def make_heap(cls, li):
return Heap(*li)
def __build_heap(self):
"""
注意:复杂度是n
"""
for x in reversed(xrange(0, self.length / 2)):
self.loop_heapify(x)
def heapify(self, parent):
"""
基本思路是从一个元素开始,如果这个元素不符合最大堆的规定,那么就把其子节点的元素,提升到父节点上面。
注意这是一个递归的过程,只有在满足最大堆的条件或者到达堆的叶节点的时候才会退出.
注意:heapify方法的条件是他的子节点都是最大堆,使用的时候要注意这一点。
"""
largest = parent
left = parent * 2 + 1
right = parent * 2 + 2
#这个地方使用left和right比较,是为了防止到了叶节点的时候会出现数组越界。
if left < self.length and self.__array[parent] < self.__array[left]:
largest = left
if right < self.length and self.__array[largest] < self.__array[right]:
largest = right
#保证在父元素就是最大值的时候不要移动元素
if largest != parent:
self.__array[largest], self.__array[parent] = self.__array[parent], self.__array[largest]
self.heapify(largest)
def loop_heapify(self, parent):
"""
复杂度:lgn
while true break是一个比较方便的把递归转换成循环的方法,因为在while的时候不用判断任何条件,判断都在break里面,避免了在while
中设置复杂的条件。
"""
while True:
largest = parent
left = parent * 2 + 1
right = parent * 2 + 2
#这个地方使用left和right比较,是为了防止到了叶节点的时候会出现数组越界。
if left < self.length and self.__array[parent] < self.__array[left]:
largest = left
if right < self.length and self.__array[largest] < self.__array[right]:
largest = right
#保证在父元素就是最大值的时候不要移动元素
if largest != parent:
self.__array[largest], self.__array[parent] = self.__array[parent], self.__array[largest]
parent = largest
else:
break
def __left(self, parent):
return parent * 2 + 1
def __right(self, parent):
return parent * 2 + 2
def __wide_walk_through(self, func, start=0):
for x in xrange(start, self.length):
func(self.__array[x])
def __deep_walk_through(self, func, start=0):
if start >= self.length:
func(start)
left = start * 2 + 1
right = start * 2 + 2
self.__deep_walk_through(func, left)
self.__deep_walk_through(func, right)
def __str__(self):
title = "Heap Length: %s\n" % self.length
content_list = [title]
self.__wide_walk_through(lambda s: content_list.append(str(s)))
return '\t'.join(content_list)
def __copy__(self):
newone = type(self)(*self.__array)
newone.__dict__.update(self.__dict__)
return newone
def __deepcopy__(self):
newone = type(self)(*self.__array)
newone.__dict__.update(self.__dict__)
for x in self.__dict__:
newone.__dict__[x] = copy.deepcopy(self.__dict__[x])
return newone
def main():
heap = Heap(16, 4, 10, 14, 7, 9, 3, 2, 8, 1)
print heap
new_heap = copy.copy(heap)
print new_heap
if __name__ == '__main__':
main()
|
'''
Created on 27 Feb 2014
@author: Éric Piel
Copyright © 2014 Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
import csv
import logging
import numpy
from odemis import model
from odemis.util import spectrum, img, find_closest, almost_equal
def get_ar_data(das):
"""
Finds the DataArray that contains the Angular Resolved data for calibration
(typically, a "background" image, ie, an image taken without ebeam).
:param das: (list of DataArrays): all the DA into which to look for.
:return: (DataArray): if polarimetry, return list of bg DAs, else the first DA that seems good.
:raises: LookupError: if no DA can be found
"""
# TODO: also allow to pass an expected resolution, in order to support
# a calibration file with multiple calibrations resolution?
# expect the worst: multiple AR data (per polarization mode)
ar_data = {} # pol-mode -> list of DAs
for da in das:
# AR data is simple to distinguish: it has a AR_POLE metadata
if model.MD_AR_POLE in da.metadata:
polmode = da.metadata.get(model.MD_POL_MODE)
ar_data.setdefault(polmode, []).append(da)
if not ar_data:
raise LookupError("Failed to find any AR data within the %d data acquisitions" %
(len(das)))
# For a polarization mode, if there is more than one data, average them.
# They all should be very similar, so this just reduces the noise.
# Note: it's typical, as often, after an AR acquisition the user re-acquires
# the same area with the same settings excepted the e-beam is blanked. So we
# end-up with MxN AR background data.
for polmode, ldas in ar_data.items():
if len(ldas) > 1:
# Just a check that all AR is the same shape (and if not, just pick
# the ones which are the same as the first found).
ar_same_shape = [da for da in ldas if da.shape == ldas[0].shape]
logging.warning("AR calibration file contained %d AR data, "
"will use the average of %d", len(ldas), len(ar_same_shape))
ar_mean = numpy.mean(ar_same_shape, axis=0)
ar_data[polmode] = [model.DataArray(ar_mean, ar_same_shape[0].metadata)]
# Merge all the data together as a single
ar_data = list(ldas[0] for ldas in ar_data.values())
if len(ar_data) == 1:
ar_data = ar_data[0]
return ar_data
def get_spectrum_data(das):
"""
Finds the DataArray that contains a spectrum data.
(typically, a "background" image, ie, an image taken without ebeam).
:param das: (list of DataArrays): all the DA into which to look for.
:return: (DataArray of shape C1111): The first data array that seems good, if e.g. multiple
streams were acquired. If only one bg stream, but multiple ebeam positions, returns the
averaged image from all ebeam positions (reduces noise).
:raises: LookupError: If no DA can be found.
"""
# TODO: also allow to pass an expected resolution/wavelength polynomial, in
# order to support a calibration file with multiple calibrations resolution?
# TODO: also allow to pass a file with both a efficiency correction data and
# various background data without mixing them up (maybe rely on the fact
# it should have an acquisition date or position?)
# expect the worse: multiple spectrum data
specs = []
for da in das:
# What we are looking for is very specific: has MD_WL_LIST and has C > 1.
# Actually, we even check for C > 3 (as a spectrum with less than 4
# points would be very weird).
if (model.MD_WL_LIST in da.metadata and len(da.shape) == 5
and da.shape[-5] > 4 and da.shape[-4:] == (1, 1, 1, 1)):
specs.append(da)
if not specs:
# be more flexible, and allow X/Y shape > 1, which permits to directly
# use multiple acquisitions and average them to remove the noise
for da in das:
if model.MD_WL_LIST in da.metadata and len(da.shape) == 5 and da.shape[-5] > 4:
# take the average for each wavelength (accumulated with a float64)
dam = da.reshape((da.shape[0], -1)).mean(axis=1) # TODO replace with da.mean(axis=(1, 2, 3, 4))?
dam = dam.astype(da.dtype) # put back into original dtype
dam.shape += (1, 1, 1, 1)
specs.append(dam)
if not specs:
raise LookupError("Failed to find any Spectrum data within the %d data acquisitions" %
(len(das)))
elif len(specs) == 1:
return specs[0]
else:
# look for the first one (in terms of time), hoping that it's the one
# the user expects to be representing the background
logging.warning("Spectrum file contained %d spectrum data, "
"will pick the earliest acquired", len(das))
earliest = min(specs,
key=lambda d: d.metadata.get(model.MD_ACQ_DATE, float("inf")))
return earliest
def get_temporalspectrum_data(das):
"""
Finds the DataArray that contains temporal spectrum data.
(typically, a "background" image, ie, an image taken with a blanket ebeam).
:param das: (list of DataArrays): All the data arrays into which to look for a bg image.
:return: (DataArray of shape CT111): The first data array that seems good, if e.g. multiple
streams were acquired. If only one bg stream, but multiple ebeam positions, returns the
averaged image from all ebeam positions (reduces noise).
:raises: LookupError: If no suitable background data array can be found.
"""
# expect the worse: multiple temporal spectrum data (e.g. multiple streams acquired in one bg file)
temporalspecs = []
for da in das:
# What we are looking for is very specific:
# Need to have C >1 and T > 1.
# Actually, we even check for C > 4 and T > 4
# (as a spectrum and time info with less than 4 points would be very weird).
# Note: We check for same shape, streak mode (MD_STREAK_MODE), time range (MD_STREAK_TIMERANGE),
# wavelength (MD_WL_LIST) and time (MD_TIME_LIST) info etc. in bg setter.
if len(da.shape) == 5 and da.shape[-5] > 4 and da.shape[-4] > 4 and da.shape[-3:] == (1, 1, 1):
temporalspecs.append(da)
if not temporalspecs:
# Be more flexible, and allow X/Y shape > 1 (multiple ebeam positions), which permits to directly
# use multiple acquisitions and average them to remove the noise.
for da in das:
if len(da.shape) == 5 and da.shape[-5] > 4 and da.shape[-4] > 4:
# take the average image of all ebeam pos (accumulated with a float64)
da_mean = da.mean(axis=(2, 3, 4))
da_mean = da_mean.astype(da.dtype) # put back into original dtype
da_mean.shape += (1, 1, 1)
temporalspecs.append(da_mean)
if not temporalspecs:
raise LookupError("Failed to find any temporal spectrum data within the %d data acquisitions" %
(len(das)))
elif len(temporalspecs) == 1:
return temporalspecs[0]
else:
# look for the first one (in terms of time), hoping that it's the one
# the user expects to be representing the background
logging.warning("Temporal spectrum file contained %d temporal spectrum data, "
"will pick the earliest acquired", len(das))
earliest = min(temporalspecs,
key=lambda d: d.metadata.get(model.MD_ACQ_DATE, float("inf")))
return earliest
def get_spectrum_efficiency(das):
"""
Finds the DataArray that contains the spectrum efficiency compensation data
(a "spectrum" which contains factors for each given wavelength).
:param das: (list of DataArrays): all the DA into which to look for.
:return: (DataArray of shape C1111): the first DA that seems good.
:raises: LookupError: if no DA can be found
"""
# expect the worse: multiple DAs available
specs = []
for da in das:
# What we are looking for is very specific: has MD_WL_LIST and has C > 1.
# Actually, we even check for C > 3 (as a spectrum with less than 4
# points would be very weird).
if (model.MD_WL_LIST in da.metadata and len(da.shape) == 5
and da.shape[-5] > 4 and da.shape[-4:] == (1, 1, 1, 1)):
specs.append(da)
if not specs:
raise LookupError("Failed to find any spectrum efficiency correction "
"data within the %d data acquisitions" %
(len(das)))
elif len(specs) == 1:
ret = specs[0]
else:
logging.warning("Spectrum efficiency file contained %d spectrum data, "
"will pick one randomly.", len(das))
ret = specs[0]
# do a few more checks on the data: at least it should be float > 0
if not ret.dtype.kind == "f":
logging.warning("Spectrum efficiency correction data is not of type float, "
"but %s.", ret.dtype.name)
if numpy.any(ret < 0):
logging.warning("Spectrum efficiency correction data contains "
"non-positive values.")
# wavelength should be in meter, so detect wavelength recorded in nm
if any(wl > 50e-6 for wl in ret.metadata[model.MD_WL_LIST]):
raise ValueError("Spectrum efficiency correction data very large "
"wavelength, probably not in meters.")
return ret
def apply_spectrum_corrections(data, bckg=None, coef=None):
"""
Apply the background correction and the spectrum efficiency compensation
factors to the given data if applicable.
If the wavelength of the calibration doesn't cover the whole data wavelength,
the missing wavelength is filled by the same value as the border. Wavelength
in-between points is linearly interpolated.
:param data: (DataArray of at least 5 dims) The original data.
Spectrum data can be of two types:
- mirror (no wl info)
- grating (wl info)
Temporal spectrum data can be of four types:
- mirror + focus mode (no wl and time info)
- mirror + operate mode (no wl but time info)
- grating + focus mode (wl but no time info)
- grating + operate mode (wl and time info)
Chronograph data can be only of one type, with no wl but time info. So far no bg correction is
supported for chronograph data. Spectrum efficiency correction do not apply for this type of data.
:param bckg: (None or DataArray of at least 5 dims) The background data, with
CTZYX = C1111 (spectrum), CTZYX = CT111 (temporal spectrum) or CTZYX = 1T111 (time correlator).
:param coef: (None or DataArray of at least 5 dims) The coefficient data, with CTZXY = C1111.
:returns: (DataArray) Same shape as original data. Can have dtype=float.
"""
# handle time correlator data (chronograph) data
# -> no spectrum efficiency compensation and bg correction supported
if data.shape[-5] <= 1 and data.shape[-4] > 1:
raise ValueError("Do not support any background correction or spectrum efficiency "
"compensation for time correlator (chronograph) data")
# TODO: use MD_BASELINE as a fallback?
if bckg is not None:
# Check that the bg matches the data.
# TODO: support if the data is binned?
if data.shape[0:2] != bckg.shape[0:2]:
raise ValueError("Background should have the same shape as the data, but got %s != %s" %
(bckg.shape[0:2], data.shape[0:2]))
# If temporal spectrum data, check for time range and streak mode.
if model.MD_STREAK_MODE in data.metadata.keys():
# Check that the data and the bg image were acquired with the same streak mode.
if data.metadata[model.MD_STREAK_MODE] != bckg.metadata[model.MD_STREAK_MODE]:
raise ValueError("Background should have the same streak mode as the data, but got %d != %d" %
(bckg.metadata[model.MD_STREAK_MODE], data.metadata[model.MD_STREAK_MODE]))
# Check that the time range of the data matches with the bg image.
if data.metadata[model.MD_STREAK_TIMERANGE] != bckg.metadata[model.MD_STREAK_TIMERANGE]:
raise ValueError("Background should have the same time range as the data, but got %s != %s" %
(bckg.metadata[model.MD_STREAK_TIMERANGE], data.metadata[model.MD_STREAK_TIMERANGE]))
# Check if we have any wavelength information.
if model.MD_WL_LIST not in data.metadata:
# temporal spectrum data, but acquired in mirror mode (with/without time info)
# spectrum data, but acquired in mirror mode
# check that bg data also doesn't contain wl info
if model.MD_WL_LIST in bckg.metadata:
raise ValueError("Found MD_WL_LIST metadata in background image, but "
"data does not provide any wavelength information")
data = img.Subtract(data, bckg)
else:
# temporal spectrum with wl info (with/without time info)
# spectrum data with wl info
# Need to get the calibration data for each wavelength of the data
wl_data = spectrum.get_wavelength_per_pixel(data)
# Check that bg data also contains wl info.
try:
wl_bckg = spectrum.get_wavelength_per_pixel(bckg)
except KeyError:
raise ValueError("Found no spectrum metadata (MD_WL_LIST) in the background image.")
# Warn if not the same wavelength
if not numpy.allclose(wl_bckg, wl_data):
logging.warning("Spectrum background is between %g->%g nm, "
"while the spectrum is between %g->%g nm.",
wl_bckg[0] * 1e9, wl_bckg[-1] * 1e9,
wl_data[0] * 1e9, wl_data[-1] * 1e9)
data = img.Subtract(data, bckg)
if coef is not None:
# Check if we have any wavelength information in data.
if model.MD_WL_LIST not in data.metadata:
raise ValueError("Cannot apply spectrum correction as "
"data does not provide any wavelength information.")
if coef.shape[1:] != (1, 1, 1, 1):
raise ValueError("Spectrum efficiency compensation should have shape C1111.")
# Need to get the calibration data for each wavelength of the data
wl_data = spectrum.get_wavelength_per_pixel(data)
wl_coef = spectrum.get_wavelength_per_pixel(coef)
# Warn if the calibration is not enough for the data
if wl_coef[0] > wl_data[0] or wl_coef[-1] < wl_data[-1]:
logging.warning("Spectrum efficiency compensation is only between "
"%g->%g nm, while the spectrum is between %g->%g nm.",
wl_coef[0] * 1e9, wl_coef[-1] * 1e9,
wl_data[0] * 1e9, wl_data[-1] * 1e9)
# Interpolate the calibration data for each wl_data
calib_fitted = numpy.interp(wl_data, wl_coef, coef[:, 0, 0, 0, 0])
calib_fitted.shape += (1, 1, 1, 1) # put TZYX dims
# Compensate the data
data = data * calib_fitted # will keep metadata from data
return data
def write_trigger_delay_csv(filename, trig_delays):
"""
Store the MD_TIME_RANGE_TO_DELAY into a CSV file
filename (str): the path to file (if it already exists, it will be overwritten)
trig_delays (dict float -> float): time range to trigger delay info
"""
with open(filename, 'w', newline='') as csvfile:
calibFile = csv.writer(csvfile, delimiter=':')
for time_range, trig_delay in trig_delays.items():
calibFile.writerow([time_range, trig_delay])
def read_trigger_delay_csv(filename, time_choices, trigger_delay_range):
"""
Read the MD_TIME_RANGE_TO_DELAY from a CSV file, and check its validity based on the hardware
filename (str): the path to file
time_choices (set): choices possible for timeRange VA
trigger_delay_range (float, float): min/max value of the trigger delay
return (dict float -> float): new dictionary containing the loaded time range to trigger delay info
raise ValueError: if the data of the CSV file cannot be parsed or doesn't fit the hardware
raise IOError: if the file doesn't exist
"""
tr2d = {}
with open(filename, 'r', newline='') as csvfile:
calibFile = csv.reader(csvfile, delimiter=':')
for time_range, delay in calibFile:
try:
time_range = float(time_range)
delay = float(delay)
except ValueError:
raise ValueError("Trigger delay %s and/or time range %s is not of type float. "
"Please check calibration file for trigger delay." % (delay, time_range))
# check delay in range allowed
if not trigger_delay_range[0] <= delay <= trigger_delay_range[1]:
raise ValueError("Trigger delay %s corresponding to time range %s is not in range %s. "
"Please check the calibration file for the trigger delay." %
(delay, time_range, trigger_delay_range))
# check timeRange is in possible choices for timeRange on HW
time_range_hw = find_closest(time_range, time_choices)
if not almost_equal(time_range, time_range_hw):
raise ValueError("Time range % s found in calibration file is not a possible choice "
"for the time range of the streak unit. "
"Please modify CSV file so it fits the possible choices for the "
"time range of the streak unit. "
"Values in file must be of format timeRange:triggerDelay (per line)."
% time_range)
tr2d[time_range_hw] = delay
# check all time ranges are there
if len(tr2d) != len(time_choices):
raise ValueError("The total number of %s time ranges in the loaded calibration file does not "
"match the requested number of %s time ranges."
% (len(tr2d), len(time_choices)))
return tr2d
|
"""
Copyright (C) 2013, Digium, Inc.
Kevin Harwell <kharwell@digium.com>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
"""
URL = 'deviceStates'
DEVICE = 'Stasis:Test'
INITIAL_STATE = 'NOT_INUSE'
CHANGED_STATE = 'INUSE'
def on_start(ari, event, obj):
# add a device state
ari.put(URL, DEVICE, deviceState=INITIAL_STATE)
# subscribe to device
ari.post("applications", "testsuite", "subscription",
eventSource="deviceState:%s" % DEVICE)
# subscribe to device
ari.post("applications", "testsuite", "subscription",
eventSource="deviceState:%s" % DEVICE)
# change the device state
ari.put(URL, DEVICE, deviceState=CHANGED_STATE)
# unsubscribe from device
ari.delete("applications", "testsuite", "subscription",
eventSource="deviceState:%s" % DEVICE)
# remove device
ari.delete(URL, DEVICE)
ari.delete('channels', event['channel']['id'])
return True
def on_state_change(ari, event, obj):
assert event['device_state']['name'] == DEVICE
assert event['device_state']['state'] == CHANGED_STATE
obj.stop_reactor()
return True
|
from mutagen.id3 import ID3, Frames, Frames_2_2, TextFrame
class TCMP(TextFrame):
pass
class TSO2(TextFrame):
pass
class TSOC(TextFrame):
pass
class XDOR(TextFrame):
pass
class XSOP(TextFrame):
pass
class CompatID3(ID3):
"""
Additional features over mutagen.id3.ID3:
* iTunes' TCMP frame
* Allow some v2.4 frames also in v2.3
"""
PEDANTIC = False
def __init__(self, *args, **kwargs):
if args:
known_frames = dict(Frames)
known_frames.update(dict(Frames_2_2))
known_frames["TCMP"] = TCMP
known_frames["TSO2"] = TSO2
known_frames["TSOC"] = TSOC
known_frames["XDOR"] = XDOR
known_frames["XSOP"] = XSOP
kwargs["known_frames"] = known_frames
super(CompatID3, self).__init__(*args, **kwargs)
def update_to_v23(self):
# leave TSOP, TSOA and TSOT even though they are officially defined
# only in ID3v2.4, because most applications use them also in ID3v2.3
frames = []
for key in ["TSOP", "TSOA", "TSOT", "TSST"]:
frames.extend(self.getall(key))
super(CompatID3, self).update_to_v23()
for frame in frames:
self.add(frame)
|
from enigma import eConsoleAppContainer
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.ScrollLabel import ScrollLabel
class Console(Screen):
#TODO move this to skin.xml
skin = """
<screen position="100,100" size="550,400" title="Command execution..." >
<widget name="text" position="0,0" size="550,400" font="Console;14" />
</screen>"""
def __init__(self, session, title = "Console", cmdlist = None, finishedCallback = None, closeOnSuccess = False):
Screen.__init__(self, session)
self.finishedCallback = finishedCallback
self.closeOnSuccess = closeOnSuccess
self["text"] = ScrollLabel("")
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.cancel,
"back": self.cancel,
"up": self["text"].pageUp,
"down": self["text"].pageDown
}, -1)
self.cmdlist = cmdlist
self.newtitle = title
self.onShown.append(self.updateTitle)
self.container = eConsoleAppContainer()
self.run = 0
self.container.appClosed.append(self.runFinished)
self.container.dataAvail.append(self.dataAvail)
self.onLayoutFinish.append(self.startRun) # dont start before gui is finished
def updateTitle(self):
self.setTitle(self.newtitle)
def startRun(self):
self["text"].setText(_("Execution Progress:") + "\n\n")
print "Console: executing in run", self.run, " the command:", self.cmdlist[self.run]
if self.container.execute(self.cmdlist[self.run]): #start of container application failed...
self.runFinished(-1) # so we must call runFinished manual
def runFinished(self, retval):
self.run += 1
if self.run != len(self.cmdlist):
if self.container.execute(self.cmdlist[self.run]): #start of container application failed...
self.runFinished(-1) # so we must call runFinished manual
else:
lastpage = self["text"].isAtLastPage()
str = self["text"].getText()
str += _("Execution finished!!");
self["text"].setText(str)
if lastpage:
self["text"].lastPage()
if self.finishedCallback is not None:
self.finishedCallback()
if not retval and self.closeOnSuccess:
self.cancel()
def cancel(self):
if self.run == len(self.cmdlist):
self.close()
self.container.appClosed.remove(self.runFinished)
self.container.dataAvail.remove(self.dataAvail)
def dataAvail(self, str):
lastpage = self["text"].isAtLastPage()
self["text"].setText(self["text"].getText() + str)
if lastpage:
self["text"].lastPage()
|
from glob import glob
from astrometry.util.file import *
import numpy as np
from legacypipe.bits import IN_BLOB
indir = '/global/cscratch1/sd/dstn/dr9.3'
outdir = '/global/cscratch1/sd/dstn/dr9.3.1'
fns = glob(os.path.join(indir, 'checkpoints', '*', 'checkpoint-*.pickle'))
fns.sort()
for fn in fns:
outfn = fn.replace(indir, outdir)
chk = unpickle_from_file(fn)
print(len(chk), fn, '->', outfn)
keep = []
for c in chk:
r = c['result']
if r is None or len(r) == 0:
keep.append(c)
continue
if np.any(r.brightblob & IN_BLOB['BRIGHT']):
#print('Skipping blob with BRIGHT bits')
continue
keep.append(c)
trymakedirs(outfn, dir=True)
pickle_to_file(keep, outfn)
print('Wrote', len(keep), 'of', len(chk), 'to', outfn)
|
from glob import glob
import os
import sys
import tempfile
import logging
import xkit.xutils
import xkit.xorgparser
import Quirks.quirkreader
import Quirks.quirkinfo
class QuirkChecker:
def __init__(self, handler, path='/usr/share/jockey/quirks'):
self._handler = handler
self.quirks_path = path
self._quirks = []
self.get_quirks_from_path()
self._system_info = self.get_system_info()
self._xorg_conf_d_path = '/usr/share/X11/xorg.conf.d'
def get_quirks_from_path(self):
'''check all the files in a directory looking for quirks'''
self._quirks = []
if os.path.isdir(self.quirks_path):
for f in glob(os.path.join(self.quirks_path, '*')):
if os.path.isfile(f):
logging.debug('Parsing %s' % f)
quirks = self.get_quirks_from_file(f)
self._quirks += quirks
else:
logging.debug('%s does not exist' % self.quirks_path)
return self._quirks
def get_quirks_from_file(self, quirk_file):
'''check all the files in a directory looking for quirks'''
# read other blacklist files (which we will not touch, but evaluate)
quirk_file = Quirks.quirkreader.ReadQuirk(quirk_file)
return quirk_file.get_quirks()
def get_system_info(self):
'''Get system info for the quirk'''
quirk_info = Quirks.quirkinfo.QuirkInfo()
return quirk_info.get_dmi_info()
def matches_tags(self, quirk):
'''See if tags match system info'''
result = True
for tag in quirk.match_tags.keys():
for val in quirk.match_tags[tag]:
if (self._system_info.get(tag) and self._system_info.get(tag) != val
and len(quirk.match_tags[tag]) <= 1):
logging.debug('Failure to match %s with %s' %
(self._system_info.get(tag), val))
return False
logging.debug('Success')
return result
def _check_quirks(self, enable=True):
'''Process quirks and do something with them'''
for quirk in self._quirks:
if self._handler.lower() in [x.lower().strip() for x in quirk.handler]:
logging.debug('Processing quirk %s' % quirk.id)
if self.matches_tags(quirk):
# Do something here
if enable:
logging.info('Applying quirk %s' % quirk.id)
self._apply_quirk(quirk)
else:
logging.info('Unapplying quirk %s' % quirk.id)
self._unapply_quirk(quirk)
else:
logging.debug('Quirk doesn\'t match')
def enable_quirks(self):
'''Enable all quirks for a handler'''
self._check_quirks(True)
def disable_quirks(self):
'''Disable all quirks for a handler'''
self._check_quirks(False)
def _get_destination_path(self, quirk):
'''Return the path to the X config file'''
return '%s/10-%s-%s.conf' % (self._xorg_conf_d_path,
self._handler, quirk.id.lower().replace(' ', '-'))
def _apply_quirk(self, quirk):
'''Get the xorg snippet and apply it'''
# Get the relevant x_snippet
# Write conf file to /usr/share/X11/xorg.conf.d/file.conf
destination = self._get_destination_path(quirk)
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
tmp_file.write(quirk.x_snippet)
tmp_file.close()
tmp_xkit = xkit.xorgparser.Parser(tmp_file.name)
# TODO: REMOVE THIS
logging.debug(tmp_xkit.globaldict)
os.unlink(tmp_file.name)
try:
logging.debug('Creating %s' % destination)
tmp_xkit.write(destination)
except IOError:
logging.exception('Error during write()')
return False
return True
def _unapply_quirk(self, quirk):
'''Remove the file with the xorg snippet'''
# Get the relevant x_snippet
# Write conf file to /usr/share/X11/xorg.conf.d/file.conf
destination = self._get_destination_path(quirk)
logging.debug('Removing %s ...' % destination)
try:
os.unlink(destination)
except (OSError, IOError):
logging.exception('Cannot unlink destination')
return False
return True
def main():
a = QuirkChecker('nvidia', path='/home/alberto/oem/jockey/quirks')
a.enable_quirks()
a.disable_quirks()
print(os.path.abspath( __file__ ))
#quirk_file = ReadQuirk("quirk_snippet.txt")
#quirks = quirk_file.get_quirks()
#for quirk in quirks:
#print 'Quirk id: "%s"' % quirk.id
#for tag in quirk.match_tags.keys():
#print 'Matching "%s" with value "%s"' % (tag, quirk.match_tags[tag])
#print quirk.x_snippet
#tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
#tmp_file.write(quirk.x_snippet)
#tmp_file.close()
#tmp_xkit = xkit.xorgparser.Parser(tmp_file.name)
#print tmp_xkit.globaldict
#os.unlink(tmp_file.name)
return 0
#main()
|
from virtaal.models import StoreModel
from test_scaffolding import TestScaffolding
class TestStoreModel(TestScaffolding):
def test_load(self):
self.model = StoreModel(self.testfile[1], None) # We can pass "None" as the controller, because it does not have an effect on this test
self.model.load_file(self.testfile[1])
assert len(self.model) <= len(self.trans_store.units)
assert self.model.get_filename() == self.testfile[1]
|
import pytest
class TestDcop:
@pytest.mark.complete("dcop ", require_cmd=True)
def test_1(self, completion):
assert completion
|
__author__ = 'simranjitsingh'
from nltk.probability import FreqDist
from nltk.corpus import stopwords
import nltk.tag, nltk.util, nltk.stem
import re
import math
from decimal import *
import mysql.connector
import sys
import json
import string
from ConfigParser import SafeConfigParser
from nltk.tokenize import WhitespaceTokenizer
from CleanTokenize import CleanAndTokenize
from TextStatistics import TextStatistics
stopword_list = stopwords.words('english')
porter = nltk.PorterStemmer()
word_features = []
vocab_freq = {}
nDocuments = 0
vocab_json_data = open("apidata/vocab_freq.json")
vocab_freq = json.load(vocab_json_data)
document_count_json = open("apidata/document_count.json")
count_read = json.load(document_count_json)
nDocuments = int(count_read["document_count"])
with open("apidata/personal.txt") as f:
personal_words = f.read().splitlines()
parser = SafeConfigParser()
parser.read('apidata/database.ini')
user = parser.get('credentials', 'user')
password = parser.get('credentials', 'password')
host = parser.get('credentials', 'host')
database = parser.get('credentials', 'database')
def NormalizeVector(vector):
length = ComputeVectorLength(vector)
for (k,v) in vector.items():
if length > 0:
vector[k] = v / length
return vector
def ComputeVectorLength(vector):
length = 0;
for d in vector.values():
length += d * d
length = math.sqrt(length)
return length
# Assuming that both vectors are normalized
def ComputeCosineSimilarity(v1, v2):
dotproduct = 0
for (key1,val1) in v1.items():
if key1 in v2:
dotproduct += val1 * v2[key1]
return dotproduct;
def escape_string(string):
res = string
res = res.replace('\\','\\\\')
res = res.replace('\n','\\n')
res = res.replace('\r','\\r')
res = res.replace('\047','\134\047') # single quotes
res = res.replace('\042','\134\042') # double quotes
res = res.replace('\032','\134\032') # for Win32
return res
def error_name():
exc_type, exc_obj, exc_tb = sys.exc_info()
msg = str(exc_type)
error = re.split(r'[.]',msg)
error = re.findall(r'\w+',error[1])
error_msg = str(error[0])
return error_msg
def ComputeCommentArticleRelevance(comment_text,ID,operation):
cnx = mysql.connector.connect(user=user, password=password, host=host, database=database)
cursor = cnx.cursor()
if operation == 'add':
articleID = ID
cursor.execute("select full_text from articles where articleID = '" + str(articleID) + "'")
article_data = cursor.fetchall()
elif operation == 'update':
commentID = ID
cursor.execute("select articleID from comments where commentID ='"+ str(commentID) +"' ")
fetch_data = cursor.fetchall()
if len(fetch_data) > 0:
articleID = fetch_data[0][0]
else:
ArticleRelevance = 0.0
return ArticleRelevance
cursor.execute("select full_text from articles where articleID = '" + str(articleID) + "'")
article_data = cursor.fetchall()
else:
ArticleRelevance = 0.0
return ArticleRelevance
cnx.close
if len(article_data) < 1:
ArticleRelevance = 0.0
return ArticleRelevance
for data in article_data:
article_text = data[0]
comment_text = escape_string(comment_text.strip())
# clean and tokenize the comment text and article text, also exclude the stopwords
token_list = CleanAndTokenize(comment_text)
token_list = [word for word in token_list if word not in stopword_list]
comment_stemmed_tokens = [porter.stem(token) for token in token_list]
comment_stemmed_tokens_fd = FreqDist(comment_stemmed_tokens)
token_list = CleanAndTokenize(article_text)
token_list = [word for word in token_list if word not in stopword_list]
article_stemmed_tokens = [porter.stem(token) for token in token_list]
article_stemmed_tokens_fd = FreqDist(article_stemmed_tokens)
# now create the feature vectors for article and comment
article_features = {}
comment_features = {}
# Calculate weight for each word in the comment with tf-idf
for w in vocab_freq:
df = vocab_freq[w]
log_fraction = (nDocuments / df)
if log_fraction < 1:
log_fraction = Decimal(nDocuments) / Decimal(df)
if w in article_stemmed_tokens:
article_features[w] = article_stemmed_tokens_fd[w] * math.log(log_fraction)
else:
article_features[w] = 0.0
if w in comment_stemmed_tokens:
comment_features[w] = comment_stemmed_tokens_fd[w] * math.log(log_fraction)
else:
comment_features[w] = 0.0
# normalize vectors
article_features = NormalizeVector(article_features)
comment_features = NormalizeVector(comment_features)
comment_article_similarity = ComputeCosineSimilarity (article_features, comment_features)
return comment_article_similarity
def ComputeCommentConversationalRelevance(comment_text,ID,operation):
cnx = mysql.connector.connect(user=user, password=password, host=host, database=database)
cursor = cnx.cursor()
if operation == 'add':
articleID = ID
cursor.execute("select commentBody from comments where articleID = '" + str(articleID) + "' ")
comment_data = cursor.fetchall()
elif operation == 'update':
commentID = ID
cursor.execute("select articleID from comments where commentID ='"+ str(commentID) +"' ")
fetch_data = cursor.fetchall()
if len(fetch_data) > 0:
articleID = fetch_data[0][0]
else:
ConversationalRelevance = 0.0
return ConversationalRelevance
cursor.execute("select commentBody from comments "
"where articleID = '"+ str(articleID) +"' and commentID < '"+ str(commentID) +"' ")
comment_data = cursor.fetchall()
else:
ConversationalRelevance = 0.0
return ConversationalRelevance
cnx.close
if len(comment_data) < 2:
ConversationalRelevance = 0.0
return ConversationalRelevance
centroid_comment_stemmed_tokens = []
centroid_comment_features = {}
# clean and tokenize the all the comments text and also exclude the stopwords
comment_list = list(zip(*comment_data)[0])
for comment in comment_list:
token_list = CleanAndTokenize(comment)
token_list = [word for word in token_list if word not in stopword_list]
# Update and compute the centroid
centroid_comment_stemmed_tokens.extend([porter.stem(token) for token in token_list])
centroid_comment_stemmed_tokens_fd = FreqDist(centroid_comment_stemmed_tokens)
# Calculate weight for each word in all the comments with tf-idf
for w in vocab_freq:
log_fraction = (nDocuments / vocab_freq[w])
if log_fraction < 1:
log_fraction = Decimal(nDocuments) / Decimal(vocab_freq[w])
if w in centroid_comment_stemmed_tokens:
centroid_comment_features[w] = centroid_comment_stemmed_tokens_fd[w] * math.log(log_fraction)
else:
centroid_comment_features[w] = 0.0
# normalize vector
centroid_comment_features = NormalizeVector(centroid_comment_features)
# Now compute distance to comment
comment_stemmed_tokens = []
comment_features = {}
comment_text = escape_string(comment_text.strip())
token_list = CleanAndTokenize(comment_text)
token_list = [word for word in token_list if word not in stopword_list]
comment_stemmed_tokens.extend([porter.stem(token) for token in token_list])
comment_stemmed_tokens_fd = FreqDist(comment_stemmed_tokens)
# Calculate weight for each word in the comment with tf-idf
for w in vocab_freq:
log_fraction = (nDocuments / vocab_freq[w])
if log_fraction < 1:
log_fraction = Decimal(nDocuments) / Decimal(vocab_freq[w])
if w in comment_stemmed_tokens:
comment_features[w] = comment_stemmed_tokens_fd[w] * math.log(log_fraction)
else:
comment_features[w] = 0.0
comment_features = NormalizeVector(comment_features)
comment_originality = ComputeCosineSimilarity (centroid_comment_features, comment_features)
return comment_originality
def calcPersonalXPScores(comment_text):
# comment_text = comment_text.decode("utf-8")
# tokenizer = WhitespaceTokenizer()
personal_xp_score = 0
text = comment_text.lower()
#filter out punctuations
punctuations = string.punctuation # includes following characters: !"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~
excluded_punctuations = ["$", "%", "'"]
for p in punctuations:
if p not in excluded_punctuations:
text = text.replace(p, " ")
# tokenize it
token_list = CleanAndTokenize(comment_text)
text_tokens = token_list
# comment_stemmed_tokens = [porter.stem(token) for token in token_list]
# if the tokens are in the personal_words List then increment score
for tok in text_tokens:
tok_stem = porter.stem(tok)
if tok_stem in personal_words:
personal_xp_score = personal_xp_score + 1
# normalize by number of tokens
if len(text_tokens) > 0:
personal_xp_score = float(personal_xp_score) / float(len(text_tokens))
else:
personal_xp_score = 0.0
return personal_xp_score
def calcReadability(comment_text):
textstat = TextStatistics("")
text = comment_text.lower()
#filter out punctuations
punctuations = string.punctuation # includes following characters: !"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~
excluded_punctuations = ["$", "%", "'"]
for p in punctuations:
if p not in excluded_punctuations:
text = text.replace(p, " ")
readability_score = textstat.smog_index(text=text)
return readability_score
def calLength(comment_text):
token = CleanAndTokenize(comment_text)
return len(token)
def updateComment(comment_text,commentID):
operation = "update"
ArticleRelevance = ComputeCommentArticleRelevance(comment_text,commentID,operation)
ConversationalRelevance = ComputeCommentConversationalRelevance(comment_text,commentID,operation)
PersonalXP = calcPersonalXPScores(comment_text)
Readability = calcReadability(comment_text)
Length = calLength(comment_text)
return (ArticleRelevance,ConversationalRelevance,PersonalXP,Readability,Length)
def addComment(comment_text,articleID):
operation = "add"
ArticleRelevance = ComputeCommentArticleRelevance(comment_text,articleID,operation)
ConversationalRelevance = ComputeCommentConversationalRelevance(comment_text,articleID,operation)
PersonalXP = calcPersonalXPScores(comment_text)
Readability = calcReadability(comment_text)
Length = calLength(comment_text)
return (ArticleRelevance,ConversationalRelevance,PersonalXP,Readability,Length)
|
import threading
from subprocess import Popen, PIPE
import os
import shutil
import common as c
class TaskGenerator(threading.Thread):
"""
Thread to generate unique tasks using task generator scripts.
"""
####
# init
####
def __init__(self, name, queues, dbs, submission_mail, tasks_dir, \
course_mode, allow_requests):
"""
Constructor for the thread.
"""
threading.Thread.__init__(self)
self.name = name
self.queues = queues
self.dbs = dbs
self.submission_mail = submission_mail
self.tasks_dir = tasks_dir
self.course_mode = course_mode
self.allow_requests = allow_requests
####
# get_scriptinfo
####
def get_scriptinfo(self, task_nr):
"""
Get the path to the generator script and chosen language for task task_nr.
Returns None if not proper config.
"""
curc, conc = c.connect_to_db(self.dbs["course"], self.queues["logger"], self.name)
data = {'task_nr': task_nr}
sql_cmd = ("SELECT TaskName, GeneratorExecutable, Language FROM TaskConfiguration "
"WHERE TaskNr == :task_nr")
curc.execute(sql_cmd, data)
res = curc.fetchone()
if not res:
logmsg = ("Failed to fetch Configuration for TaskNr {0} from the "
"database! Table TaskConfiguration corrupted?").format(task_nr)
c.log_a_msg(self.queues["logger"], self.name, logmsg, "ERROR")
scriptpath = None
else:
task_name = res[0]
generator_name = res[1]
language = res[2]
scriptpath = self.tasks_dir + "/" + task_name + "/" + generator_name
conc.close()
return (scriptpath, language)
####
# delete_usertask
####
def delete_usertask(self, user_id, task_nr):
"""
Delete existing usertask and its structures
"""
# delete db entry
curs, cons = c.connect_to_db(self.dbs["semester"], self.queues["logger"], self.name)
data = {"user_id": user_id,
"task_nr": task_nr}
sql_cmd = ("DELETE FROM UserTasks "
"WHERE TaskNr == :task_nr AND UserId == :user_id")
# remove directory
usertask_dir = 'users/' + str(user_id) + "/Task"+str(task_nr)
shutil.rmtree(usertask_dir)
curs.execute(sql_cmd, data)
cons.commit()
cons.close()
####
# generator_loop
####
def generator_loop(self):
"""
Loop code for the generator thread
"""
# blocking wait on gen_queue
next_gen_msg = self.queues["generator"].get(True)
logmsg = "gen_queue content:" + str(next_gen_msg)
c.log_a_msg(self.queues["logger"], self.name, logmsg, "DEBUG")
task_nr = next_gen_msg.get('task_nr')
user_id = next_gen_msg.get('user_id')
user_email = next_gen_msg.get('user_email')
message_id = next_gen_msg.get('message_id')
# requested task is a valid task?
if not c.is_valid_task_nr(self.dbs["course"], task_nr, \
self.queues["logger"], self.name):
logmsg = ("Generator was given the task to create non valid TaskNr {0}. "
"This should not happen!").format(task_nr)
c.log_a_msg(self.queues["logger"], self.name, logmsg, "ERROR")
return
# check if user already got this task
already_received = c.user_received_task(self.dbs["semester"], user_id, \
task_nr, self.queues["logger"], \
self.name)
if already_received:
if self.allow_requests == "multiple":
logmsg = ("User with Id {0} TaskNr {1} already got this task, "
"deleting it to make place for new").format(user_id, task_nr)
c.log_a_msg(self.queues["logger"], self.name, logmsg, "INFO")
self.delete_usertask(user_id, task_nr)
else:
logmsg = ("User with Id {0} TaskNr {1} already got this task, "
"multiple request not allowed for this course").format(user_id, task_nr)
c.log_a_msg(self.queues["logger"], self.name, logmsg, "INFO")
c.send_email(self.queues["sender"], str(user_email), str(user_id), \
"NoMultipleRequest", str(task_nr), "", str(message_id))
return
# generate the directory for the task in the space of the user
usertask_dir = 'users/' + str(user_id) + "/Task"+str(task_nr)
c.check_dir_mkdir(usertask_dir, self.queues["logger"], self.name)
# generate the folder for the task description
desc_dir = usertask_dir + "/desc"
c.check_dir_mkdir(desc_dir, self.queues["logger"], self.name)
# get the path to the generator script
scriptpath, language = self.get_scriptinfo(task_nr)
# check the path
if not scriptpath or not os.path.isfile(scriptpath):
logmsg = "Could not find generator script for task{0}".format(task_nr)
c.log_a_msg(self.queues["logger"], self.name, logmsg, "ERROR")
return
command = [scriptpath, str(user_id), str(task_nr), self.submission_mail,\
str(self.course_mode), self.dbs["semester"], str(language)]
logmsg = "generator command with arguments: {0} ".format(command)
c.log_a_msg(self.queues["logger"], self.name, logmsg, "DEBUG")
process = Popen(command, stdout=PIPE, stderr=PIPE)
generator_msg, generator_error = process.communicate()
generator_msg = generator_msg.decode('UTF-8')
generator_error = generator_error.decode('UTF-8')
generator_res = process.returncode
log_src = "Generator{0}({1})".format(str(task_nr), str(user_id))
if generator_msg:
c.log_task_msg(self.queues["logger"], log_src, generator_msg, "INFO")
if generator_error:
c.log_task_error(self.queues["logger"], log_src, generator_error, "ERROR")
# Error at task generation
if generator_res != 0: # generator not 0 returned
logmsg = "Failed executing the generator script, return value: " + \
str(generator_res)
c.log_a_msg(self.queues["logger"], self.name, logmsg, "ERROR")
# alert to admin
c.send_email(self.queues["sender"], "", user_id, \
"TaskAlert", task_nr, "", message_id)
#notice to user
c.send_email(self.queues["sender"], user_email, user_id, \
"TaskErrorNotice", task_nr, "", message_id)
return
logmsg = "Generated individual task for user/task_nr:" + str(user_id) \
+ "/" + str(task_nr)
c.log_a_msg(self.queues["logger"], self.name, logmsg, "INFO")
c.send_email(self.queues["sender"], str(user_email), str(user_id), \
"Task", str(task_nr), "Your personal example", str(message_id))
####
# run
####
def run(self):
"""
Thread code for the generator thread.
"""
c.log_a_msg(self.queues["logger"], self.name, "Task Generator thread started", "INFO")
while True:
self.generator_loop()
|
"""
Least recently used algorithm
"""
class Node(object):
"""
Node to be stored in the LRU structure
"""
def __init__(self, prev, value):
self.prev = prev
self.value = value
self.next = None
class LRU(object):
"""
Implementation of a length-limited O(1) LRU cache
"""
def __init__(self, count):
self.count = max(count, 2)
self.data = {}
self.first = None
self.last = None
def __contains__(self, obj):
"""
Return True if the object is contained in the LRU
"""
return obj in self.data
def __getitem__(self, obj):
"""
Return item associated with Obj
"""
return self.data[obj].value[1]
def __setitem__(self, obj, val):
"""
Set the item in the LRU, removing an old entry if needed
"""
if obj in self.data:
del self[obj]
nobj = Node(self.last, (obj, val))
if self.first is None:
self.first = nobj
if self.last:
self.last.next = nobj
self.last = nobj
self.data[obj] = nobj
if len(self.data) > self.count:
if self.first == self.last:
self.first = None
self.last = None
return
lnk = self.first
lnk.next.prev = None
self.first = lnk.next
lnk.next = None
if lnk.value[0] in self.data:
del self.data[lnk.value[0]]
del lnk
def __delitem__(self, obj):
"""
Delete the object from the LRU
"""
nobj = self.data[obj]
if nobj.prev:
nobj.prev.next = nobj.next
else:
self.first = nobj.next
if nobj.next:
nobj.next.prev = nobj.prev
else:
self.last = nobj.prev
del self.data[obj]
def __iter__(self):
"""
Iterate over the LRU
"""
cur = self.first
while cur is not None:
cur2 = cur.next
yield cur.value[1]
cur = cur2
raise StopIteration
def iteritems(self):
"""
Return items in the LRU using a generator
"""
cur = self.first
while cur is not None:
cur2 = cur.next
yield cur.value
cur = cur2
raise StopIteration
def iterkeys(self):
"""
Return keys in the LRU using a generator
"""
return iter(self.data)
def itervalues(self):
"""
Return items and keys in the LRU using a generator
"""
for data in self.items():
yield data[1]
def keys(self):
"""
Return all keys
"""
return [data[0] for data in self.items()]
def values(self):
"""
Return all values
"""
return [data[1] for data in self.items()]
def items(self):
"""
Return all items
"""
return [data[0] for data in self.items()]
def clear(self):
"""
Empties LRU
"""
# Step through the doubly linked list, setting prev and next to None.
# This ensures that each node is unreachable and therefore eligible for
# garbage collection. "del" is also called for each node, but it is
# unclear whether this actually has any effect, of just removes the
# binding to nobj
nobj = self.first
# The references first and last are removed so that the nodes are not
# reachable from these
self.first = None
self.last = None
# The references from self.data are removed
self.data.clear()
while nobj is not None and nobj.next is not None:
# each node except the last is processed
nobj.next.prev = None
nextobj = nobj.next
nobj.next = None
del nobj
nobj = nextobj
if nobj is not None:
# The last node is processed
del nobj
|
a="hello"
print a.x
|
"""
Shared resources for storing a possibly unlimited amount of objects supporting
requests for specific objects.
The :class:`Store` operates in a FIFO (first-in, first-out) order. Objects are
retrieved from the store in the order they were put in. The *get* requests of a
:class:`FilterStore` can be customized by a filter to only retrieve objects
matching a given criterion.
"""
from simpy.core import BoundClass
from simpy.resources import base
class StorePut(base.Put):
"""Request to put *item* into the *store*. The request is triggered once
there is space for the item in the store.
"""
def __init__(self, store, item):
self.item = item
"""The item to put into the store."""
super(StorePut, self).__init__(store)
class StoreGet(base.Get):
"""Request to get an *item* from the *store*. The request is triggered
once there is an item available in the store.
"""
pass
class FilterStoreGet(StoreGet):
"""Request to get an *item* from the *store* matching the *filter*. The
request is triggered once there is such an item available in the store.
*filter* is a function receiving one item. It should return ``True`` for
items matching the filter criterion. The default function returns ``True``
for all items, which makes the request to behave exactly like
:class:`StoreGet`.
"""
def __init__(self, resource, filter=lambda item: True):
self.filter = filter
"""The filter function to filter items in the store."""
super(FilterStoreGet, self).__init__(resource)
class Store(base.BaseResource):
"""Resource with *capacity* slots for storing arbitrary objects. By
default, the *capacity* is unlimited and objects are put and retrieved from
the store in a first-in first-out order.
The *env* parameter is the :class:`~simpy.core.Environment` instance the
container is bound to.
"""
def __init__(self, env, capacity=float('inf')):
if capacity <= 0:
raise ValueError('"capacity" must be > 0.')
super(Store, self).__init__(env, capacity)
self.items = []
"""List of the items available in the store."""
put = BoundClass(StorePut)
"""Request to put *item* into the store."""
get = BoundClass(StoreGet)
"""Request to get an *item* out of the store."""
def _do_put(self, event):
if len(self.items) < self._capacity:
self.items.append(event.item)
event.succeed()
def _do_get(self, event):
if self.items:
event.succeed(self.items.pop(0))
class FilterStore(Store):
"""Resource with *capacity* slots for storing arbitrary objects supporting
filtered get requests. Like the :class:`Store`, the *capacity* is unlimited
by default and objects are put and retrieved from the store in a first-in
first-out order.
Get requests can be customized with a filter function to only trigger for
items for which said filter function returns ``True``.
.. note::
In contrast to :class:`Store`, get requests of a :class:`FilterStore`
won't necessarily be triggered in the same order they were issued.
*Example:* The store is empty. *Process 1* tries to get an item of type
*a*, *Process 2* an item of type *b*. Another process puts one item of
type *b* into the store. Though *Process 2* made his request after
*Process 1*, it will receive that new item because *Process 1* doesn't
want it.
"""
put = BoundClass(StorePut)
"""Request a to put *item* into the store."""
get = BoundClass(FilterStoreGet)
"""Request a to get an *item*, for which *filter* returns ``True``, out of
the store."""
def _do_get(self, event):
for item in self.items:
if event.filter(item):
self.items.remove(item)
event.succeed(item)
break
return True
|
import wx
class ButtonFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, 'Button Example',
size=(300, 100))
panel = wx.Panel(self, -1)
self.button = wx.Button(panel, -1, "Hello", pos=(50, 20))
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnClick, self.button)
self.button.SetDefault()
def OnClick(self, event):
self.button.SetLabel("Clicked")
self.button.SetBackgroundColour('blue')
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = ButtonFrame()
frame.Show()
app.MainLoop()
|
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
THIS FILE IS DEPRECATED AND MAY BE REMOVED WITHOUT WARNING!
DO NOT CALL THESE FUNCTIONS IN YOUR CODE!
"""
import cvxpy.interface as intf
import cvxpy.lin_ops.lin_op as lo
import copy
import numpy as np
from scipy.signal import fftconvolve
def mul(lin_op, val_dict, is_abs=False):
"""Multiply the expression tree by a vector.
Parameters
----------
lin_op : LinOp
The root of an expression tree.
val_dict : dict
A map of variable id to value.
is_abs : bool, optional
Multiply by the absolute value of the matrix?
Returns
-------
NumPy matrix
The result of the multiplication.
"""
# Look up the value for a variable.
if lin_op.type is lo.VARIABLE:
if lin_op.data in val_dict:
# Use absolute value of variable.
if is_abs:
return np.abs(val_dict[lin_op.data])
else:
return val_dict[lin_op.data]
# Defaults to zero if no value given.
else:
return np.mat(np.zeros(lin_op.shape))
# Return all zeros for NO_OP.
elif lin_op.type is lo.NO_OP:
return np.mat(np.zeros(lin_op.shape))
else:
eval_args = []
for arg in lin_op.args:
eval_args.append(mul(arg, val_dict, is_abs))
if is_abs:
return op_abs_mul(lin_op, eval_args)
else:
return op_mul(lin_op, eval_args)
def tmul(lin_op, value, is_abs=False):
"""Multiply the transpose of the expression tree by a vector.
Parameters
----------
lin_op : LinOp
The root of an expression tree.
value : NumPy matrix
The vector to multiply by.
is_abs : bool, optional
Multiply by the absolute value of the matrix?
Returns
-------
dict
A map of variable id to value.
"""
# Store the value as the variable.
if lin_op.type is lo.VARIABLE:
return {lin_op.data: value}
# Do nothing for NO_OP.
elif lin_op.type is lo.NO_OP:
return {}
else:
if is_abs:
result = op_abs_tmul(lin_op, value)
else:
result = op_tmul(lin_op, value)
result_dicts = []
for arg in lin_op.args:
result_dicts.append(tmul(arg, result, is_abs))
# Sum repeated ids.
return sum_dicts(result_dicts)
def sum_dicts(dicts):
"""Sums the dictionaries entrywise.
Parameters
----------
dicts : list
A list of dictionaries with numeric entries.
Returns
-------
dict
A dict with the sum.
"""
# Sum repeated entries.
sum_dict = {}
for val_dict in dicts:
for id_, value in val_dict.items():
if id_ in sum_dict:
sum_dict[id_] = sum_dict[id_] + value
else:
sum_dict[id_] = value
return sum_dict
def op_mul(lin_op, args):
"""Applies the linear operator to the arguments.
Parameters
----------
lin_op : LinOp
A linear operator.
args : list
The arguments to the operator.
Returns
-------
NumPy matrix or SciPy sparse matrix.
The result of applying the linear operator.
"""
# Constants convert directly to their value.
if lin_op.type in [lo.SCALAR_CONST, lo.DENSE_CONST, lo.SPARSE_CONST]:
result = lin_op.data
# No-op is not evaluated.
elif lin_op.type is lo.NO_OP:
return None
# For non-leaves, recurse on args.
elif lin_op.type is lo.SUM:
result = sum(args)
elif lin_op.type is lo.NEG:
result = -args[0]
elif lin_op.type is lo.MUL:
coeff = mul(lin_op.data, {})
result = coeff*args[0]
elif lin_op.type is lo.DIV:
divisor = mul(lin_op.data, {})
result = args[0]/divisor
elif lin_op.type is lo.SUM_ENTRIES:
result = np.sum(args[0])
elif lin_op.type is lo.INDEX:
row_slc, col_slc = lin_op.data
result = args[0][row_slc, col_slc]
elif lin_op.type is lo.TRANSPOSE:
result = args[0].T
elif lin_op.type is lo.CONV:
result = conv_mul(lin_op, args[0])
elif lin_op.type is lo.PROMOTE:
result = np.ones(lin_op.shape)*args[0]
elif lin_op.type is lo.DIAG_VEC:
val = intf.from_2D_to_1D(args[0])
result = np.diag(val)
else:
raise Exception("Unknown linear operator.")
return result
def op_abs_mul(lin_op, args):
"""Applies the absolute value of the linear operator to the arguments.
Parameters
----------
lin_op : LinOp
A linear operator.
args : list
The arguments to the operator.
Returns
-------
NumPy matrix or SciPy sparse matrix.
The result of applying the linear operator.
"""
# Constants convert directly to their absolute value.
if lin_op.type in [lo.SCALAR_CONST, lo.DENSE_CONST, lo.SPARSE_CONST]:
result = np.abs(lin_op.data)
elif lin_op.type is lo.NEG:
result = args[0]
# Absolute value of coefficient.
elif lin_op.type is lo.MUL:
coeff = mul(lin_op.data, {}, True)
result = coeff*args[0]
elif lin_op.type is lo.DIV:
divisor = mul(lin_op.data, {}, True)
result = args[0]/divisor
elif lin_op.type is lo.CONV:
result = conv_mul(lin_op, args[0], is_abs=True)
else:
result = op_mul(lin_op, args)
return result
def op_tmul(lin_op, value):
"""Applies the transpose of the linear operator to the arguments.
Parameters
----------
lin_op : LinOp
A linear operator.
value : NumPy matrix
A numeric value to apply the operator's transpose to.
Returns
-------
NumPy matrix or SciPy sparse matrix.
The result of applying the linear operator.
"""
if lin_op.type is lo.SUM:
result = value
elif lin_op.type is lo.NEG:
result = -value
elif lin_op.type is lo.MUL:
coeff = mul(lin_op.data, {})
# Scalar coefficient, no need to transpose.
if np.isscalar(coeff):
result = coeff*value
else:
result = coeff.T*value
elif lin_op.type is lo.DIV:
divisor = mul(lin_op.data, {})
result = value/divisor
elif lin_op.type is lo.SUM_ENTRIES:
result = np.mat(np.ones(lin_op.args[0].shape))*value
elif lin_op.type is lo.INDEX:
row_slc, col_slc = lin_op.data
result = np.mat(np.zeros(lin_op.args[0].shape))
result[row_slc, col_slc] = value
elif lin_op.type is lo.TRANSPOSE:
result = value.T
elif lin_op.type is lo.PROMOTE:
result = np.ones(lin_op.shape[0]).dot(value)
elif lin_op.type is lo.DIAG_VEC:
# The return type in numpy versions < 1.10 was ndarray.
result = np.diag(value)
if isinstance(result, np.matrix):
result = result.A[0]
elif lin_op.type is lo.CONV:
result = conv_mul(lin_op, value, transpose=True)
else:
raise Exception("Unknown linear operator.")
return result
def op_abs_tmul(lin_op, value):
"""Applies the linear operator |A.T| to the arguments.
Parameters
----------
lin_op : LinOp
A linear operator.
value : NumPy matrix
A numeric value to apply the operator's transpose to.
Returns
-------
NumPy matrix or SciPy sparse matrix.
The result of applying the linear operator.
"""
if lin_op.type is lo.NEG:
result = value
# Absolute value of coefficient.
elif lin_op.type is lo.MUL:
coeff = mul(lin_op.data, {}, True)
# Scalar coefficient, no need to transpose.
if np.isscalar(coeff):
result = coeff*value
else:
result = coeff.T*value
elif lin_op.type is lo.DIV:
divisor = mul(lin_op.data, {}, True)
result = value/divisor
elif lin_op.type is lo.CONV:
result = conv_mul(lin_op, value, True, True)
else:
result = op_tmul(lin_op, value)
return result
def conv_mul(lin_op, rh_val, transpose=False, is_abs=False):
"""Multiply by a convolution operator.
arameters
----------
lin_op : LinOp
The root linear operator.
rh_val : NDArray
The vector being convolved.
transpose : bool
Is the transpose of convolution being applied?
is_abs : bool
Is the absolute value of convolution being applied?
Returns
-------
NumPy NDArray
The convolution.
"""
constant = mul(lin_op.data, {}, is_abs)
# Convert to 2D
constant, rh_val = map(intf.from_1D_to_2D, [constant, rh_val])
if transpose:
constant = np.flipud(constant)
# rh_val always larger than constant.
return fftconvolve(rh_val, constant, mode='valid')
else:
# First argument must be larger.
if constant.size >= rh_val.size:
return fftconvolve(constant, rh_val, mode='full')
else:
return fftconvolve(rh_val, constant, mode='full')
def get_constant(lin_op):
"""Returns the constant term in the expression.
Parameters
----------
lin_op : LinOp
The root linear operator.
Returns
-------
NumPy NDArray
The constant term as a flattened vector.
"""
constant = mul(lin_op, {})
const_size = constant.shape[0]*constant.shape[1]
return np.reshape(constant, const_size, 'F')
def get_constr_constant(constraints):
"""Returns the constant term for the constraints matrix.
Parameters
----------
constraints : list
The constraints that form the matrix.
Returns
-------
NumPy NDArray
The constant term as a flattened vector.
"""
# TODO what if constraints is empty?
constants = [get_constant(c.expr) for c in constraints]
return np.hstack(constants)
def prune_constants(constraints):
"""Returns a new list of constraints with constant terms removed.
Parameters
----------
constraints : list
The constraints that form the matrix.
Returns
-------
list
The pruned constraints.
"""
pruned_constraints = []
for constr in constraints:
constr_type = type(constr)
expr = copy.deepcopy(constr.expr)
is_constant = prune_expr(expr)
# Replace a constant root with a NO_OP.
if is_constant:
expr = lo.LinOp(lo.NO_OP, expr.shape, [], None)
pruned = constr_type(expr, constr.constr_id, constr.shape)
pruned_constraints.append(pruned)
return pruned_constraints
def prune_expr(lin_op):
"""Prunes constant branches from the expression.
Parameters
----------
lin_op : LinOp
The root linear operator.
Returns
-------
bool
Were all the expression's arguments pruned?
"""
if lin_op.type is lo.VARIABLE:
return False
elif lin_op.type in [lo.SCALAR_CONST,
lo.DENSE_CONST,
lo.SPARSE_CONST,
lo.PARAM]:
return True
pruned_args = []
is_constant = True
for arg in lin_op.args:
arg_constant = prune_expr(arg)
if not arg_constant:
is_constant = False
pruned_args.append(arg)
# Overwrite old args with only non-constant args.
lin_op.args[:] = pruned_args[:]
return is_constant
|
from nose.tools import eq_, timed
from pysimavr.avr import Avr
def test_avr():
avr = Avr(mcu='atmega48', f_cpu=8000000)
eq_(avr.f_cpu, 8000000)
eq_(avr.mcu, 'atmega48')
eq_(avr.pc, 0)
avr.step(1)
eq_(avr.pc, 2)
|
../../../../share/pyshared/dbus/glib.py
|
import os, sys
sys.path.append(os.environ['PYTHON_LIBPATH'])
DEBUG = os.getenv('DEBUG') or os.getenv('VERBOSE') or 0
DEBUG = int(DEBUG)
xtab_version = '0.1.3'
import proxy.tokenizer as tokenizer
xtab_error_status = 0
return_xtab_query = False
xtab_help_messages = (
('xtab - version ' + xtab_version + ' - (C) MySQL AB 2007' ,) ,
('Syntax: ' ,) ,
(' - ' ,) ,
('XTAB table_name row_header col_header operation operation_fld [summary]',),
('"table_name" can be a table or a view' ,) ,
('"row_field" is the field to be used as row header' ,) ,
('"col_field" is the field whose distinct values will become column headers',),
('"operation" is the required operation (COUNT|SUM|AVG|MAX|MIN)' ,) ,
('"operation_field" is the field to which the operation is applied' ,) ,
(' - ' ,) ,
('If the "summary" option is used, then a "WITH ROLLUP" clause ' ,),
('is added to the query.' ,) ,
(' - ' ,) ,
('Other commands:' ,) ,
('XTAB QUERY - the XTAB query is returned instead of its result' ,) ,
('XTAB NOQUERY - the XTAB result is returned (default)' ,) ,
('XTAB version - shows current version' ,) ,
('XTAB help - shows this help' ,) ,
('Created by Giuseppe Maxia' ,) ,
)
allowed_operators = ('count', 'sum', 'avg', 'min', 'max' )
xtab_help_resultset = {
'fields' : (
('XTAB help', proxy.MYSQL_TYPE_STRING),
),
'rows' : xtab_help_messages,
}
xtab_version_resultset = {
'fields' : (
('XTAB version', proxy.MYSQL_TYPE_STRING),
),
'rows' : (
(xtab_version,) ,
)
}
xtab_query_resultset = {
'fields' : (
( 'XTAB query ', proxy.MYSQL_TYPE_STRING),
),
'rows' : (
('Setting XTAB QUERY, the next XTAB command will return ' ,),
('the query text instead of its result.' ,),
('' ,),
('Setting XTAB NOQUERY (default), the XTAB command' ,),
('executes the query and returns its result.' ,),
('' ,),
)
}
xtab_unknown_resultset = {
'fields' : (
( 'XTAB ERROR', proxy.MYSQL_TYPE_STRING),
),
'rows' : (
('unknown command. Enter "XTAB HELP" for help',),
)
}
xtab_unknown_operator = {
'fields' : (
( 'XTAB ERROR', proxy.MYSQL_TYPE_STRING),
),
'rows' : (
('unknown operator.',),
( 'Accepted operators: COUNT, SUM, AVG, MIN, MAX', ),
( 'Enter "XTAB HELP" for help', ),
)
}
xtab_params = {}
xtab_id_before = 1024
xtab_id_start = 2048
xtab_id_exec = 4096
def read_query( proxy, packet ):
if ord(packet[0]) != proxy.COM_QUERY :
return
global xtab_error_status
global xtab_params
xtab_params = {}
xtab_error_status = 0
query = packet[1:]
#-
#- simple tokeninzing the query, looking for accepted pattern
#-
#local option, table_name, row_field, col_field , op, op_col , summary
query_tokens = tokenizer.tokenize(query)
START_TOKEN = 0
if ( query_tokens[0].text.lower() == 'xtab' ):
START_TOKEN = 1
option = query_tokens[1].text
elif ( query_tokens[0].text.lower() == 'select'\
and\
query_tokens[1].text.lower() == 'xtab' ):
START_TOKEN = 2
option = query_tokens[2].text
else:
return
print_debug('received query ' + query)
if len(query_tokens) == START_TOKEN + 1:
if (option.lower() == 'help'):
proxy.response.resultset = xtab_help_resultset
f, r = xtab_help_resultset['fields'],xtab_help_resultset['rows']
elif option.lower() == 'version' :
proxy.response.resultset = xtab_version_resultset
elif option.lower() == 'query' :
#xtab_query_resultset['rows'].append(( 'Current setting: returns a query', ))
#proxy.response.resultset = xtab_query_resultset
proxy.response.resultset = {
'fields' : xtab_query_resultset['fields'],
'rows' : xtab_query_resultset['rows'] + (('Current setting: returns a query', ), ),
}
return_xtab_query = True
elif option.lower() == 'noquery' :
#xtab_query_resultset['rows'].append(( 'Current setting: returns a result set', ))
#proxy.response.resultset = xtab_query_resultset
proxy.response.resultset = {
'fields' : xtab_query_resultset['fields'],
'rows' : xtab_query_resultset['rows'] + (('Current setting: returns a result set', ), ),
}
return_xtab_query = False
else:
proxy.response.resultset = xtab_unknown_resultset
proxy.response.type = proxy.MYSQLD_PACKET_OK
return proxy.PROXY_SEND_RESULT
#-
#- parsing the query for a xtab recognized command
#-
table_name = option
row_field = query_tokens[START_TOKEN + 1 ].text
col_field = query_tokens[START_TOKEN + 2 ].text
op = query_tokens[START_TOKEN + 3 ].text
op_col = query_tokens[START_TOKEN + 4 ].text
print 'Query_tokens:', query_tokens
print 'START_TOKEN:', START_TOKEN
if (len(query_tokens) == START_TOKEN + 7) :
summary = query_tokens[START_TOKEN + 5 ].text
else:
summary = ''
if op_col :
print_debug ("<xtab> <%s> (%s) (%s) [%s] [%s]" % \
(table_name, row_field, col_field, op, op_col ))
else:
return
recognized_operator = op.lower() in allowed_operators
if not recognized_operator:
print_debug('unknown operator ' + op)
proxy.response.type = proxy.MYSQLD_PACKET_OK
proxy.response.resultset = xtab_unknown_operator
return proxy.PROXY_SEND_RESULT
xtab_params['table_name'] = table_name
xtab_params['row_header'] = row_field
xtab_params['col_header'] = col_field
xtab_params['operation'] = op
xtab_params['op_col'] = op_col
xtab_params['summary'] = summary.lower() == 'summary'
print_debug('summary: ' + str(xtab_params['summary']))
proxy.queries.append(xtab_id_before,
chr(proxy.COM_QUERY) + "set group_concat_max_len = 1024*1024", True)
proxy.queries.append(xtab_id_start,
chr(proxy.COM_QUERY) +
'''
select group_concat( distinct concat(
'%s(if( `%s`= ', quote(%s),',`%s`,null)) as `%s_',%s,'`' )
order by `%s` ) from `%s` order by `%s`''' %\
(op,
col_field,
col_field,
op_col,
col_field,
col_field,
col_field,
table_name,
col_field, ), True)
return proxy.PROXY_SEND_QUERY
def read_query_result(proxy, inj):
global xtab_error_status
print_debug('injection id ' + str(inj.id) + ' error status: ' +\
str(xtab_error_status))
if xtab_error_status > 0 :
print_debug('ignoring resultset ' + str(inj.id) + ' for previous error')
return proxy.PROXY_IGNORE_RESULT
res = inj.resultset
#-
#- on error, empty the query queue and return the error message
#-
if res.query_status and res.query_status < 0:
xtab_error_status = 1
print_debug('sending result' + inj.id + ' on error ')
proxy.queries.reset()
return
#-
#- ignoring the preparatory queries
#-
if (inj.id >= xtab_id_before) and (inj.id < xtab_id_start) :
print_debug ('ignoring preparatory query from xtab ' + str(inj.id ))
return proxy.PROXY_IGNORE_RESULT
#-
#- creating the XTAB query
#-
if (inj.id == xtab_id_start) :
print_debug ('getting columns resultset from xtab ' + str(inj.id))
col_query = ''
for row in inj.resultset.rows:
col_query = col_query + row[0]
print_debug ('column values : ' + col_query)
#TODO
print '-' * 50, xtab_params
col_query.replace(',' + xtab_params['operation'], '\n, ' + xtab_params['operation'])
xtab_query = '''
SELECT
%s ,
%s ,
%s(`%s`) AS total
FROM %s
GROUP BY %s
''' % \
(xtab_params['row_header'],
col_query,
xtab_params['operation'],
xtab_params['op_col'],
xtab_params['table_name'],
xtab_params['row_header'])
if xtab_params['summary'] == True :
xtab_query = xtab_query + ' WITH ROLLUP '
#-
#- if the query was requested, it is returned immediately
#-
if (return_xtab_query == True) :
proxy.queries.reset()
proxy.response.type = proxy.MYSQLD_PACKET_OK
proxy.response.resultset = {
'fields' : (
('XTAB query', proxy.MYSQL_TYPE_STRING),
),
rows : [
[ xtab_query, ],
]
}
return proxy.PROXY_SEND_RESULT
#-
#- The XTAB query is executed
#-
proxy.queries.append(xtab_id_exec, chr(proxy.COM_QUERY) + xtab_query, True)
print_debug (xtab_query, 2)
return proxy.PROXY_IGNORE_RESULT
#-
#- Getting the final xtab result
#-
if (inj.id == xtab_id_exec) :
print_debug ('getting final xtab result ' + str(inj.id ))
#-
#- Replacing the default NULL value provided by WITH ROLLUP
#- with a more human readable value
#-
if xtab_params['summary'] == True :
updated_rows = {}
updated_fields = {}
for row in inj.resultset.rows:
if not row:
row[0] = 'Grand Total'
end
updated_rows.append(row)
field_count = 1
fields = inj.resultset.fields
for f in fields:
updated_fields.append((f.name, f.type))
proxy.response.resultset = {
'fields' : updated_fields,
'rows' : updated_rows
}
proxy.response.type = proxy.MYSQLD_PACKET_OK
return proxy.PROXY_SEND_RESULT
def print_debug (msg, min_level=0):
#if DEBUG and (DEBUG >= min_level) :
print msg
|
import os
import unittest
from django.conf import settings
from django.db.models.loading import get_app
from django.contrib.staticfiles.finders import AppDirectoriesFinder
class TestStaticfiles(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# FIXME: Why test the configuration of django_staticfiles in the locks app?
def test_staticfiles(self):
"""
Test whether django-staticfiles is properly configured.
There are various reasons why this could fail:
* App not loaded (not in get_apps())
* models.py missing
* Addon not appended to STATICFILES_PREPEND_LABEL_APPS
* STATIC_ROOT is not absolute path
* STATICFILES_MEDIA_DIRNAMES doesn't include 'media'
"""
suffix = 'css/icons.css'
for addons_root in settings.ADDONS_ROOTS:
ref = os.path.realpath('%s/locks/static/locks/%s' % (addons_root, suffix))
if os.path.exists(ref):
break
path = 'locks/%s' % suffix
r = AppDirectoriesFinder()
self.assertEqual(ref, r.find(path))
|
"""Classes and functions for interfacing with te923 weather stations.
These stations were made by Hideki and branded as Honeywell, Meade, IROX Pro X,
Mebus TE923, and TFA Nexus. They date back to at least 2007 and are still
sold (sparsely in the US, more commonly in Europe) as of 2013.
Apparently there are at least two different memory sizes. One version can
store about 200 records, a newer version can store about 3300 records.
The firmware version of each component can be read by talking to the station,
assuming that the component has a wireless connection to the station, of
course.
To force connection between station and sensors, press and hold DOWN button.
To reset all station parameters:
- press and hold SNOOZE and UP for 4 seconds
- press SET button; main unit will beep
- wait until beeping stops
- remove batteries and wait 10 seconds
- reinstall batteries
From the Meade TE9233W manual (TE923W-M_IM(ENG)_BK_010511.pdf):
Remote temperature/humidty sampling interval: 10 seconds
Remote temperature/humidity transmit interval: about 47 seconds
Indoor temperature/humidity sampling interval: 10 seconds
Indoor pressure sampling interval: 20 minutes
Rain counter transmitting interval: 183 seconds
Wind direction transmitting interval: 33 seconds
Wind/Gust speed display update interval: 33 seconds
Wind/Gust sampling interval: 11 seconds
UV transmitting interval: 300 seconds
Rain counter resolution: 0.03 in (0.6578 mm)
(but console shows instead: 1/36 in (0.705556 mm))
Battery status of each sensor is checked every hour
This implementation polls the station for data. Use the polling_interval to
control the frequency of polling. Default is 10 seconds.
The manual claims that a single bucket tip is 0.03 inches or 0.6578 mm but
neither matches the console display. In reality, a single bucket tip is
between 0.02 and 0.03 in (0.508 to 0.762 mm). This driver uses a value of 1/36
inch as observed in 36 bucket tips per 1.0 inches displayed on the console.
1/36 = 0.02777778 inch = 0.705555556 mm, or 1.0725989 times larger than the
0.02589 inch = 0.6578 mm that was used prior to version 0.41.1.
The station has altitude, latitude, longitude, and time.
Setting the time does not persist. If you set the station time using weewx,
the station initially indicates that it is set to the new time, but then it
reverts.
Notes From/About Other Implementations
Apparently te923tool came first, then wview copied a bit from it. te923tool
provides more detail about the reason for invalid values, for example, values
out of range versus no link with sensors. However, these error states have not
yet been corroborated.
There are some disagreements between the wview and te923tool implementations.
From the te923tool:
- reading from usb in 8 byte chunks instead of all at once
- length of buffer is 35, but reads are 32-byte blocks
- windspeed and windgust state can never be -1
- index 29 in rain count, also in wind dir
From wview:
- wview does the 8-byte reads using interruptRead
- wview ignores the windchill value from the station
- wview treats the pressure reading as barometer (SLP), then calculates the
station pressure and altimeter pressure
Memory Map
0x020000 - Last sample:
[00] = Month (Bits 0-3), Weekday (1 = Monday) (Bits 7:4)
[01] = Day
[02] = Hour
[03] = Minute
[04] ... reading as below
0x020001 - Current readings:
[00] = Temp In Low BCD
[01] = Temp In High BCD (Bit 5 = 0.05 deg, Bit 7 = -ve)
[02] = Humidity In
[03] = Temp Channel 1 Low (No link = Xa)
[04] = Temp Channel 1 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[05] = Humidity Channel 1 (No link = Xa)
[06] = Temp Channel 2 Low (No link = Xa)
[07] = Temp Channel 2 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[08] = Humidity Channel 2 (No link = Xa)
[09] = Temp Channel 3 Low (No link = Xa)
[10] = Temp Channel 3 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[11] = Humidity Channel 3 (No link = Xa)
[12] = Temp Channel 4 Low (No link = Xa)
[13] = Temp Channel 4 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[14] = Humidity Channel 4 (No link = Xa)
[15] = Temp Channel 5 Low (No link = Xa)
[16] = Temp Channel 5 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[17] = Humidity Channel 5 (No link = Xa)
[18] = UV Low (No link = ff)
[19] = UV High (No link = ff)
[20] = Sea-Level Pressure Low
[21] = Sea-Level Pressure High
[22] = Forecast (Bits 0-2) Storm (Bit 3)
[23] = Wind Chill Low (No link = ff)
[24] = Wind Chill High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve, No link = ff)
[25] = Gust Low (No link = ff)
[26] = Gust High (No link = ff)
[27] = Wind Low (No link = ff)
[28] = Wind High (No link = ff)
[29] = Wind Dir (Bits 0-3)
[30] = Rain Low
[31] = Rain High
(1) Memory map values related to sensors use same coding as above
(2) Checksum are via subtraction: 0x100 - sum of all values, then add 0x100
until positive i.e. 0x100 - 0x70 - 0x80 - 0x28 = -0x18, 0x18 + 0x100 = 0xE8
SECTION 1: Date & Local location
0x000000 - Unknown - changes if date section is modified but still changes if
same data is written so not a checksum
0x000001 - Unknown (always 0)
0x000002 - Day (Reverse BCD) (Changes at midday!)
0x000003 - Unknown
0x000004 - Year (Reverse BCD)
0x000005 - Month (Bits 7:4), Weekday (Bits 3:1)
0x000006 - Latitude (degrees) (reverse BCD)
0x000007 - Latitude (minutes) (reverse BCD)
0x000008 - Longitude (degrees) (reverse BCD)
0x000009 - Longitude (minutes) (reverse BCD)
0x00000A - Bit 7 - Set if Latitude southerly
Bit 6 - Set if Longitude easterly
Bit 4 - Set if DST is always on
Bit 3 - Set if -ve TZ
Bits 0 & 1 - Set if half-hour TZ
0x00000B - Longitude (100 degrees) (Bits 7:4), DST zone (Bits 3:0)
0x00000C - City code (High) (Bits 7:4)
Language (Bits 3:0)
0 - English
1 - German
2 - French
3 - Italian
4 - Spanish
6 - Dutch
0x00000D - Timezone (hour) (Bits 7:4), City code (Low) (Bits 3:0)
0x00000E - Bit 2 - Set if 24hr time format
Bit 1 - Set if 12hr time format
0x00000F - Checksum of 00:0E
SECTION 2: Time Alarms
0x000010 - Weekday alarm (hour) (reverse BCD)
Bit 3 - Set if single alarm active
Bit 2 - Set if weekday-alarm active
0x000011 - Weekday alarm (minute) (reverse BCD)
0x000012 - Single alarm (hour) (reverse BCD) (Bit 3 - Set if pre-alarm active)
0x000013 - Single alarm (minute) (reverse BCD)
0x000014 - Bits 7-4: Pre-alarm (1-5 = 15,30,45,60 or 90 mins)
Bits 3-0: Snooze value
0x000015 - Checksum of 10:14
SECTION 3: Alternate Location
0x000016 - Latitude (degrees) (reverse BCD)
0x000017 - Latitude (minutes) (reverse BCD)
0x000018 - Longitude (degrees) (reverse BCD)
0x000019 - Longitude (minutes) (reverse BCD)
0x00001A - Bit 7 - Set if Latitude southerly
Bit 6 - Set if Longitude easterly
Bit 4 - Set if DST is always on
Bit 3 - Set if -ve TZ
Bits 0 & 1 - Set if half-hour TZ
0x00001B - Longitude (100 degrees) (Bits 7:4), DST zone (Bits 3:0)
0x00001C - City code (High) (Bits 7:4), Unknown (Bits 3:0)
0x00001D - Timezone (hour) (Bits 7:4), City code (Low) (Bits 3:0)
0x00001E - Checksum of 16:1D
SECTION 4: Temperature Alarms
0x00001F:20 - High Temp Alarm Value
0x000021:22 - Low Temp Alarm Value
0x000023 - Checksum of 1F:22
SECTION 5: Min/Max 1
0x000024:25 - Min In Temp
0x000026:27 - Max in Temp
0x000028 - Min In Humidity
0x000029 - Max In Humidity
0x00002A:2B - Min Channel 1 Temp
0x00002C:2D - Max Channel 1 Temp
0x00002E - Min Channel 1 Humidity
0x00002F - Max Channel 1 Humidity
0x000030:31 - Min Channel 2 Temp
0x000032:33 - Max Channel 2 Temp
0x000034 - Min Channel 2 Humidity
0x000035 - Max Channel 2 Humidity
0x000036:37 - Min Channel 3 Temp
0x000038:39 - Max Channel 3 Temp
0x00003A - Min Channel 3 Humidity
0x00003B - Max Channel 3 Humidity
0x00003C:3D - Min Channel 4 Temp
0x00003F - Checksum of 24:3E
SECTION 6: Min/Max 2
0x00003E,40 - Max Channel 4 Temp
0x000041 - Min Channel 4 Humidity
0x000042 - Max Channel 4 Humidity
0x000043:44 - Min Channel 4 Temp
0x000045:46 - Max Channel 4 Temp
0x000047 - Min Channel 4 Humidity
0x000048 - Max Channel 4 Humidity
0x000049 - ? Values rising/falling ?
Bit 5 : Chan 1 temp falling
Bit 2 : In temp falling
0x00004A:4B - 0xFF (Unused)
0x00004C - Battery status
Bit 7: Rain
Bit 6: Wind
Bit 5: UV
Bits 4:0: Channel 5:1
0x00004D:58 - 0xFF (Unused)
0x000059 - Checksum of 3E:58
SECTION 7: Altitude
0x00005A:5B - Altitude (Low:High)
0x00005C - Bit 3 - Set if altitude negative
Bit 2 - Pressure falling?
Bit 1 - Always set
0X00005D - Checksum of 5A:5C
0x00005E:5F - Unused (0xFF)
SECTION 8: Pressure 1
0x000060 - Month of last reading (Bits 0-3), Weekday (1 = Monday) (Bits 7:4)
0x000061 - Day of last reading
0x000062 - Hour of last reading
0x000063 - Minute of last reading
0x000064:65 - T -0 Hours
0x000066:67 - T -1 Hours
0x000068:69 - T -2 Hours
0x00006A:6B - T -3 Hours
0x00006C:6D - T -4 Hours
0x00006E:6F - T -5 Hours
0x000070:71 - T -6 Hours
0x000072:73 - T -7 Hours
0x000074:75 - T -8 Hours
0x000076:77 - T -9 Hours
0x000078:79 - T -10 Hours
0x00007B - Checksum of 60:7A
SECTION 9: Pressure 2
0x00007A,7C - T -11 Hours
0x00007D:7E - T -12 Hours
0x00007F:80 - T -13 Hours
0x000081:82 - T -14 Hours
0x000083:84 - T -15 Hours
0x000085:86 - T -16 Hours
0x000087:88 - T -17 Hours
0x000089:90 - T -18 Hours
0x00008B:8C - T -19 Hours
0x00008D:8E - T -20 Hours
0x00008f:90 - T -21 Hours
0x000091:92 - T -22 Hours
0x000093:94 - T -23 Hours
0x000095:96 - T -24 Hours
0x000097 - Checksum of 7C:96
SECTION 10: Versions
0x000098 - firmware versions (barometer)
0x000099 - firmware versions (uv)
0x00009A - firmware versions (rcc)
0x00009B - firmware versions (wind)
0x00009C - firmware versions (system)
0x00009D - Checksum of 98:9C
0x00009E:9F - 0xFF (Unused)
SECTION 11: Rain/Wind Alarms 1
0x0000A0 - Alarms
Bit2 - Set if rain alarm active
Bit 1 - Set if wind alarm active
Bit 0 - Set if gust alarm active
0x0000A1:A2 - Rain alarm value (High:Low) (BCD)
0x0000A3 - Unknown
0x0000A4:A5 - Wind speed alarm value
0x0000A6 - Unknown
0x0000A7:A8 - Gust alarm value
0x0000A9 - Checksum of A0:A8
SECTION 12: Rain/Wind Alarms 2
0x0000AA:AB - Max daily wind speed
0x0000AC:AD - Max daily gust speed
0x0000AE:AF - Rain bucket count (yesterday) (Low:High)
0x0000B0:B1 - Rain bucket count (week) (Low:High)
0x0000B2:B3 - Rain bucket count (month) (Low:High)
0x0000B4 - Checksum of AA:B3
0x0000B5:E0 - 0xFF (Unused)
SECTION 13: Unknownn
0x0000E1:F9 - 0x15 (Unknown)
0x0000FA - Checksum of E1:F9
SECTION 14: Archiving
0x0000FB - Unknown
0x0000FC - Memory size (0 = 0x1fff, 2 = 0x20000)
0x0000FD - Number of records (High)
0x0000FE - Archive interval
1-11 = 5, 10, 20, 30, 60, 90, 120, 180, 240, 360, 1440 mins
0x0000FF - Number of records (Low)
0x000100 - Checksum of FB:FF
0x000101 - Start of historical records:
[00] = Month (Bits 0-3), Weekday (1 = Monday) (Bits 7:4)
[01] = Day
[02] = Hour
[03] = Minute
[04] = Temp In Low BCD
[05] = Temp In High BCD (Bit 5 = 0.05 deg, Bit 7 = -ve)
[06] = Humidity In
[07] = Temp Channel 1 Low (No link = Xa)
[08] = Temp Channel 1 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[09] = Humidity Channel 1 (No link = Xa)
[10] = Temp Channel 2 Low (No link = Xa)
[11] = Temp Channel 2 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[12] = Humidity Channel 2 (No link = Xa)
[13] = Temp Channel 3 Low (No link = Xa)
[14] = Temp Channel 3 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[15] = Checksum of bytes 0:14
[16] = Humidity Channel 3 (No link = Xa)
[17] = Temp Channel 4 Low (No link = Xa)
[18] = Temp Channel 4 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[19] = Humidity Channel 4 (No link = Xa)
[20] = Temp Channel 5 Low (No link = Xa)
[21] = Temp Channel 5 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[22] = Humidity Channel 5 (No link = Xa)
[23] = UV Low (No link = ff)
[24] = UV High (No link = ff)
[25] = Sea-Level Pressure Low
[26] = Sea-Level Pressure High
[27] = Forecast (Bits 0-2) Storm (Bit 3)
[28] = Wind Chill Low (No link = ff)
[29] = Wind Chill High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve, No link = ee)
[30] = Gust Low (No link = ff)
[31] = Gust High (No link = ff)
[32] = Wind Low (No link = ff)
[33] = Wind High (No link = ff)
[34] = Wind Dir (Bits 0-3)
[35] = Rain Low
[36] = Rain High
[37] = Checksum of bytes 16:36
USB Protocol
The station shows up on the USB as a HID. Control packet is 8 bytes.
Read from station:
0x05 (Length)
0xAF (Read)
Addr (Bit 17:16), Addr (Bits 15:8), Addr (Bits 7:0), CRC, Unused, Unused
Read acknowledge:
0x24 (Ack)
0xAF (Read)
Addr (Bit 17:16), Addr (Bits 15:8), Addr (Bits 7:0), CRC, Unused, Unused
Write to station:
0x07 (Length)
0xAE (Write)
Addr (Bit 17:16), Addr (Bits 15:8), Addr (Bits 7:0), Data1, Data2, Data3
... Data continue with 3 more packets of length 7 then ...
0x02 (Length), Data32, CRC, Unused, Unused, Unused, Unused, Unused, Unused
Reads returns 32 bytes. Write expects 32 bytes as well, but address must be
aligned to a memory-map section start address and will only write to that
section.
Schema Additions
The station emits more sensor data than the default schema (wview schema) can
handle. This driver includes a mapping between the sensor data and the wview
schema, plus additional fields. To use the default mapping with the wview
schema, these are the additional fields that must be added to the schema:
('extraTemp4', 'REAL'),
('extraHumid3', 'REAL'),
('extraHumid4', 'REAL'),
('extraBatteryStatus1', 'REAL'),
('extraBatteryStatus2', 'REAL'),
('extraBatteryStatus3', 'REAL'),
('extraBatteryStatus4', 'REAL'),
('windLinkStatus', 'REAL'),
('rainLinkStatus', 'REAL'),
('uvLinkStatus', 'REAL'),
('outLinkStatus', 'REAL'),
('extraLinkStatus1', 'REAL'),
('extraLinkStatus2', 'REAL'),
('extraLinkStatus3', 'REAL'),
('extraLinkStatus4', 'REAL'),
('forecast', 'REAL'),
('storm', 'REAL'),
"""
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
import logging
import time
import usb
import weewx.drivers
import weewx.wxformulas
from weeutil.weeutil import timestamp_to_string
log = logging.getLogger(__name__)
DRIVER_NAME = 'TE923'
DRIVER_VERSION = '0.41.1'
def loader(config_dict, engine): # @UnusedVariable
return TE923Driver(**config_dict[DRIVER_NAME])
def configurator_loader(config_dict): # @UnusedVariable
return TE923Configurator()
def confeditor_loader():
return TE923ConfEditor()
DEBUG_READ = 1
DEBUG_WRITE = 1
DEBUG_DECODE = 0
DEFAULT_MAP = {
'windLinkStatus': 'link_wind',
'windBatteryStatus': 'bat_wind',
'rainLinkStatus': 'link_rain',
'rainBatteryStatus': 'bat_rain',
'uvLinkStatus': 'link_uv',
'uvBatteryStatus': 'bat_uv',
'inTemp': 't_in',
'inHumidity': 'h_in',
'outTemp': 't_1',
'outHumidity': 'h_1',
'outTempBatteryStatus': 'bat_1',
'outLinkStatus': 'link_1',
'extraTemp1': 't_2',
'extraHumid1': 'h_2',
'extraBatteryStatus1': 'bat_2',
'extraLinkStatus1': 'link_2',
'extraTemp2': 't_3',
'extraHumid2': 'h_3',
'extraBatteryStatus2': 'bat_3',
'extraLinkStatus2': 'link_3',
'extraTemp3': 't_4',
'extraHumid3': 'h_4',
'extraBatteryStatus3': 'bat_4',
'extraLinkStatus3': 'link_4',
'extraTemp4': 't_5',
'extraHumid4': 'h_5',
'extraBatteryStatus4': 'bat_5',
'extraLinkStatus4': 'link_5'
}
class TE923ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[TE923]
# This section is for the Hideki TE923 series of weather stations.
# The station model, e.g., 'Meade TE923W' or 'TFA Nexus'
model = TE923
# The driver to use:
driver = weewx.drivers.te923
# The default configuration associates the channel 1 sensor with outTemp
# and outHumidity. To change this, or to associate other channels with
# specific columns in the database schema, use the following map.
#[[sensor_map]]
%s
""" % "\n".join([" # %s = %s" % (x, DEFAULT_MAP[x]) for x in DEFAULT_MAP])
class TE923Configurator(weewx.drivers.AbstractConfigurator):
LOCSTR = "CITY|USR,LONG_DEG,LONG_MIN,E|W,LAT_DEG,LAT_MIN,N|S,TZ,DST"
ALMSTR = "WEEKDAY,SINGLE,PRE_ALARM,SNOOZE,MAXTEMP,MINTEMP,RAIN,WIND,GUST"
idx_to_interval = {
1: "5 min", 2: "10 min", 3: "20 min", 4: "30 min", 5: "60 min",
6: "90 min", 7: "2 hour", 8: "3 hour", 9: "4 hour", 10: "6 hour",
11: "1 day"}
interval_to_idx = {
"5m": 1, "10m": 2, "20m": 3, "30m": 4, "60m": 5, "90m": 6,
"2h": 7, "3h": 8, "4h": 9, "6h": 10, "1d": 11}
forecast_dict = {
0: 'heavy snow',
1: 'light snow',
2: 'heavy rain',
3: 'light rain',
4: 'heavy clouds',
5: 'light clouds',
6: 'sunny',
}
dst_dict = {
0: ["NO", 'None'],
1: ["SA", 'Australian'],
2: ["SB", 'Brazilian'],
3: ["SC", 'Chilian'],
4: ["SE", 'European'],
5: ["SG", 'Eqyptian'],
6: ["SI", 'Cuban'],
7: ["SJ", 'Iraq and Syria'],
8: ["SK", 'Irkutsk and Moscow'],
9: ["SM", 'Uruguayan'],
10: ["SN", 'Nambian'],
11: ["SP", 'Paraguayan'],
12: ["SQ", 'Iranian'],
13: ["ST", 'Tasmanian'],
14: ["SU", 'American'],
15: ["SZ", 'New Zealand'],
}
city_dict = {
0: ["ADD", 3, 0, 9, 1, "N", 38, 44, "E", "Addis Ababa, Ethiopia"],
1: ["ADL", 9.5, 1, 34, 55, "S", 138, 36, "E", "Adelaide, Australia"],
2: ["AKR", 2, 4, 39, 55, "N", 32, 55, "E", "Ankara, Turkey"],
3: ["ALG", 1, 0, 36, 50, "N", 3, 0, "E", "Algiers, Algeria"],
4: ["AMS", 1, 4, 52, 22, "N", 4, 53, "E", "Amsterdam, Netherlands"],
5: ["ARN", 1, 4, 59, 17, "N", 18, 3, "E", "Stockholm Arlanda, Sweden"],
6: ["ASU", -3, 11, 25, 15, "S", 57, 40, "W", "Asuncion, Paraguay"],
7: ["ATH", 2, 4, 37, 58, "N", 23, 43, "E", "Athens, Greece"],
8: ["ATL", -5, 14, 33, 45, "N", 84, 23, "W", "Atlanta, Ga."],
9: ["AUS", -6, 14, 30, 16, "N", 97, 44, "W", "Austin, Tex."],
10: ["BBU", 2, 4, 44, 25, "N", 26, 7, "E", "Bucharest, Romania"],
11: ["BCN", 1, 4, 41, 23, "N", 2, 9, "E", "Barcelona, Spain"],
12: ["BEG", 1, 4, 44, 52, "N", 20, 32, "E", "Belgrade, Yugoslavia"],
13: ["BEJ", 8, 0, 39, 55, "N", 116, 25, "E", "Beijing, China"],
14: ["BER", 1, 4, 52, 30, "N", 13, 25, "E", "Berlin, Germany"],
15: ["BHM", -6, 14, 33, 30, "N", 86, 50, "W", "Birmingham, Ala."],
16: ["BHX", 0, 4, 52, 25, "N", 1, 55, "W", "Birmingham, England"],
17: ["BKK", 7, 0, 13, 45, "N", 100, 30, "E", "Bangkok, Thailand"],
18: ["BNA", -6, 14, 36, 10, "N", 86, 47, "W", "Nashville, Tenn."],
19: ["BNE", 10, 0, 27, 29, "S", 153, 8, "E", "Brisbane, Australia"],
20: ["BOD", 1, 4, 44, 50, "N", 0, 31, "W", "Bordeaux, France"],
21: ["BOG", -5, 0, 4, 32, "N", 74, 15, "W", "Bogota, Colombia"],
22: ["BOS", -5, 14, 42, 21, "N", 71, 5, "W", "Boston, Mass."],
23: ["BRE", 1, 4, 53, 5, "N", 8, 49, "E", "Bremen, Germany"],
24: ["BRU", 1, 4, 50, 52, "N", 4, 22, "E", "Brussels, Belgium"],
25: ["BUA", -3, 0, 34, 35, "S", 58, 22, "W", "Buenos Aires, Argentina"],
26: ["BUD", 1, 4, 47, 30, "N", 19, 5, "E", "Budapest, Hungary"],
27: ["BWI", -5, 14, 39, 18, "N", 76, 38, "W", "Baltimore, Md."],
28: ["CAI", 2, 5, 30, 2, "N", 31, 21, "E", "Cairo, Egypt"],
29: ["CCS", -4, 0, 10, 28, "N", 67, 2, "W", "Caracas, Venezuela"],
30: ["CCU", 5.5, 0, 22, 34, "N", 88, 24, "E", "Calcutta, India (as Kolkata)"],
31: ["CGX", -6, 14, 41, 50, "N", 87, 37, "W", "Chicago, IL"],
32: ["CLE", -5, 14, 41, 28, "N", 81, 37, "W", "Cleveland, Ohio"],
33: ["CMH", -5, 14, 40, 0, "N", 83, 1, "W", "Columbus, Ohio"],
34: ["COR", -3, 0, 31, 28, "S", 64, 10, "W", "Cordoba, Argentina"],
35: ["CPH", 1, 4, 55, 40, "N", 12, 34, "E", "Copenhagen, Denmark"],
36: ["CPT", 2, 0, 33, 55, "S", 18, 22, "E", "Cape Town, South Africa"],
37: ["CUU", -6, 14, 28, 37, "N", 106, 5, "W", "Chihuahua, Mexico"],
38: ["CVG", -5, 14, 39, 8, "N", 84, 30, "W", "Cincinnati, Ohio"],
39: ["DAL", -6, 14, 32, 46, "N", 96, 46, "W", "Dallas, Tex."],
40: ["DCA", -5, 14, 38, 53, "N", 77, 2, "W", "Washington, D.C."],
41: ["DEL", 5.5, 0, 28, 35, "N", 77, 12, "E", "New Delhi, India"],
42: ["DEN", -7, 14, 39, 45, "N", 105, 0, "W", "Denver, Colo."],
43: ["DKR", 0, 0, 14, 40, "N", 17, 28, "W", "Dakar, Senegal"],
44: ["DTW", -5, 14, 42, 20, "N", 83, 3, "W", "Detroit, Mich."],
45: ["DUB", 0, 4, 53, 20, "N", 6, 15, "W", "Dublin, Ireland"],
46: ["DUR", 2, 0, 29, 53, "S", 30, 53, "E", "Durban, South Africa"],
47: ["ELP", -7, 14, 31, 46, "N", 106, 29, "W", "El Paso, Tex."],
48: ["FIH", 1, 0, 4, 18, "S", 15, 17, "E", "Kinshasa, Congo"],
49: ["FRA", 1, 4, 50, 7, "N", 8, 41, "E", "Frankfurt, Germany"],
50: ["GLA", 0, 4, 55, 50, "N", 4, 15, "W", "Glasgow, Scotland"],
51: ["GUA", -6, 0, 14, 37, "N", 90, 31, "W", "Guatemala City, Guatemala"],
52: ["HAM", 1, 4, 53, 33, "N", 10, 2, "E", "Hamburg, Germany"],
53: ["HAV", -5, 6, 23, 8, "N", 82, 23, "W", "Havana, Cuba"],
54: ["HEL", 2, 4, 60, 10, "N", 25, 0, "E", "Helsinki, Finland"],
55: ["HKG", 8, 0, 22, 20, "N", 114, 11, "E", "Hong Kong, China"],
56: ["HOU", -6, 14, 29, 45, "N", 95, 21, "W", "Houston, Tex."],
57: ["IKT", 8, 8, 52, 30, "N", 104, 20, "E", "Irkutsk, Russia"],
58: ["IND", -5, 0, 39, 46, "N", 86, 10, "W", "Indianapolis, Ind."],
59: ["JAX", -5, 14, 30, 22, "N", 81, 40, "W", "Jacksonville, Fla."],
60: ["JKT", 7, 0, 6, 16, "S", 106, 48, "E", "Jakarta, Indonesia"],
61: ["JNB", 2, 0, 26, 12, "S", 28, 4, "E", "Johannesburg, South Africa"],
62: ["KIN", -5, 0, 17, 59, "N", 76, 49, "W", "Kingston, Jamaica"],
63: ["KIX", 9, 0, 34, 32, "N", 135, 30, "E", "Osaka, Japan"],
64: ["KUL", 8, 0, 3, 8, "N", 101, 42, "E", "Kuala Lumpur, Malaysia"],
65: ["LAS", -8, 14, 36, 10, "N", 115, 12, "W", "Las Vegas, Nev."],
66: ["LAX", -8, 14, 34, 3, "N", 118, 15, "W", "Los Angeles, Calif."],
67: ["LIM", -5, 0, 12, 0, "S", 77, 2, "W", "Lima, Peru"],
68: ["LIS", 0, 4, 38, 44, "N", 9, 9, "W", "Lisbon, Portugal"],
69: ["LON", 0, 4, 51, 32, "N", 0, 5, "W", "London, England"],
70: ["LPB", -4, 0, 16, 27, "S", 68, 22, "W", "La Paz, Bolivia"],
71: ["LPL", 0, 4, 53, 25, "N", 3, 0, "W", "Liverpool, England"],
72: ["LYO", 1, 4, 45, 45, "N", 4, 50, "E", "Lyon, France"],
73: ["MAD", 1, 4, 40, 26, "N", 3, 42, "W", "Madrid, Spain"],
74: ["MEL", 10, 1, 37, 47, "S", 144, 58, "E", "Melbourne, Australia"],
75: ["MEM", -6, 14, 35, 9, "N", 90, 3, "W", "Memphis, Tenn."],
76: ["MEX", -6, 14, 19, 26, "N", 99, 7, "W", "Mexico City, Mexico"],
77: ["MIA", -5, 14, 25, 46, "N", 80, 12, "W", "Miami, Fla."],
78: ["MIL", 1, 4, 45, 27, "N", 9, 10, "E", "Milan, Italy"],
79: ["MKE", -6, 14, 43, 2, "N", 87, 55, "W", "Milwaukee, Wis."],
80: ["MNL", 8, 0, 14, 35, "N", 120, 57, "E", "Manila, Philippines"],
81: ["MOW", 3, 8, 55, 45, "N", 37, 36, "E", "Moscow, Russia"],
82: ["MRS", 1, 4, 43, 20, "N", 5, 20, "E", "Marseille, France"],
83: ["MSP", -6, 14, 44, 59, "N", 93, 14, "W", "Minneapolis, Minn."],
84: ["MSY", -6, 14, 29, 57, "N", 90, 4, "W", "New Orleans, La."],
85: ["MUC", 1, 4, 48, 8, "N", 11, 35, "E", "Munich, Germany"],
86: ["MVD", -3, 9, 34, 53, "S", 56, 10, "W", "Montevideo, Uruguay"],
87: ["NAP", 1, 4, 40, 50, "N", 14, 15, "E", "Naples, Italy"],
88: ["NBO", 3, 0, 1, 25, "S", 36, 55, "E", "Nairobi, Kenya"],
89: ["NKG", 8, 0, 32, 3, "N", 118, 53, "E", "Nanjing (Nanking), China"],
90: ["NYC", -5, 14, 40, 47, "N", 73, 58, "W", "New York, N.Y."],
91: ["ODS", 2, 4, 46, 27, "N", 30, 48, "E", "Odessa, Ukraine"],
92: ["OKC", -6, 14, 35, 26, "N", 97, 28, "W", "Oklahoma City, Okla."],
93: ["OMA", -6, 14, 41, 15, "N", 95, 56, "W", "Omaha, Neb."],
94: ["OSL", 1, 4, 59, 57, "N", 10, 42, "E", "Oslo, Norway"],
95: ["PAR", 1, 4, 48, 48, "N", 2, 20, "E", "Paris, France"],
96: ["PDX", -8, 14, 45, 31, "N", 122, 41, "W", "Portland, Ore."],
97: ["PER", 8, 0, 31, 57, "S", 115, 52, "E", "Perth, Australia"],
98: ["PHL", -5, 14, 39, 57, "N", 75, 10, "W", "Philadelphia, Pa."],
99: ["PHX", -7, 0, 33, 29, "N", 112, 4, "W", "Phoenix, Ariz."],
100: ["PIT", -5, 14, 40, 27, "N", 79, 57, "W", "Pittsburgh, Pa."],
101: ["PRG", 1, 4, 50, 5, "N", 14, 26, "E", "Prague, Czech Republic"],
102: ["PTY", -5, 0, 8, 58, "N", 79, 32, "W", "Panama City, Panama"],
103: ["RGN", 6.5, 0, 16, 50, "N", 96, 0, "E", "Rangoon, Myanmar"],
104: ["RIO", -3, 2, 22, 57, "S", 43, 12, "W", "Rio de Janeiro, Brazil"],
105: ["RKV", 0, 0, 64, 4, "N", 21, 58, "W", "Reykjavik, Iceland"],
106: ["ROM", 1, 4, 41, 54, "N", 12, 27, "E", "Rome, Italy"],
107: ["SAN", -8, 14, 32, 42, "N", 117, 10, "W", "San Diego, Calif."],
108: ["SAT", -6, 14, 29, 23, "N", 98, 33, "W", "San Antonio, Tex."],
109: ["SCL", -4, 3, 33, 28, "S", 70, 45, "W", "Santiago, Chile"],
110: ["SEA", -8, 14, 47, 37, "N", 122, 20, "W", "Seattle, Wash."],
111: ["SFO", -8, 14, 37, 47, "N", 122, 26, "W", "San Francisco, Calif."],
112: ["SHA", 8, 0, 31, 10, "N", 121, 28, "E", "Shanghai, China"],
113: ["SIN", 8, 0, 1, 14, "N", 103, 55, "E", "Singapore, Singapore"],
114: ["SJC", -8, 14, 37, 20, "N", 121, 53, "W", "San Jose, Calif."],
115: ["SOF", 2, 4, 42, 40, "N", 23, 20, "E", "Sofia, Bulgaria"],
116: ["SPL", -3, 2, 23, 31, "S", 46, 31, "W", "Sao Paulo, Brazil"],
117: ["SSA", -3, 0, 12, 56, "S", 38, 27, "W", "Salvador, Brazil"],
118: ["STL", -6, 14, 38, 35, "N", 90, 12, "W", "St. Louis, Mo."],
119: ["SYD", 10, 1, 34, 0, "S", 151, 0, "E", "Sydney, Australia"],
120: ["TKO", 9, 0, 35, 40, "N", 139, 45, "E", "Tokyo, Japan"],
121: ["TPA", -5, 14, 27, 57, "N", 82, 27, "W", "Tampa, Fla."],
122: ["TRP", 2, 0, 32, 57, "N", 13, 12, "E", "Tripoli, Libya"],
123: ["USR", 0, 0, 0, 0, "N", 0, 0, "W", "User defined city"],
124: ["VAC", -8, 14, 49, 16, "N", 123, 7, "W", "Vancouver, Canada"],
125: ["VIE", 1, 4, 48, 14, "N", 16, 20, "E", "Vienna, Austria"],
126: ["WAW", 1, 4, 52, 14, "N", 21, 0, "E", "Warsaw, Poland"],
127: ["YMX", -5, 14, 45, 30, "N", 73, 35, "W", "Montreal, Que., Can."],
128: ["YOW", -5, 14, 45, 24, "N", 75, 43, "W", "Ottawa, Ont., Can."],
129: ["YTZ", -5, 14, 43, 40, "N", 79, 24, "W", "Toronto, Ont., Can."],
130: ["YVR", -8, 14, 49, 13, "N", 123, 6, "W", "Vancouver, B.C., Can."],
131: ["YYC", -7, 14, 51, 1, "N", 114, 1, "W", "Calgary, Alba., Can."],
132: ["ZRH", 1, 4, 47, 21, "N", 8, 31, "E", "Zurich, Switzerland"]
}
@property
def version(self):
return DRIVER_VERSION
def add_options(self, parser):
super(TE923Configurator, self).add_options(parser)
parser.add_option("--info", dest="info", action="store_true",
help="display weather station configuration")
parser.add_option("--current", dest="current", action="store_true",
help="get the current weather conditions")
parser.add_option("--history", dest="nrecords", type=int, metavar="N",
help="display N history records")
parser.add_option("--history-since", dest="recmin",
type=int, metavar="N",
help="display history records since N minutes ago")
parser.add_option("--minmax", dest="minmax", action="store_true",
help="display historical min/max data")
parser.add_option("--get-date", dest="getdate", action="store_true",
help="display station date")
parser.add_option("--set-date", dest="setdate",
type=str, metavar="YEAR,MONTH,DAY",
help="set station date")
parser.add_option("--sync-date", dest="syncdate", action="store_true",
help="set station date using system clock")
parser.add_option("--get-location-local", dest="loc_local",
action="store_true",
help="display local location and timezone")
parser.add_option("--set-location-local", dest="setloc_local",
type=str, metavar=self.LOCSTR,
help="set local location and timezone")
parser.add_option("--get-location-alt", dest="loc_alt",
action="store_true",
help="display alternate location and timezone")
parser.add_option("--set-location-alt", dest="setloc_alt",
type=str, metavar=self.LOCSTR,
help="set alternate location and timezone")
parser.add_option("--get-altitude", dest="getalt", action="store_true",
help="display altitude")
parser.add_option("--set-altitude", dest="setalt", type=int,
metavar="ALT", help="set altitude (meters)")
parser.add_option("--get-alarms", dest="getalarms",
action="store_true", help="display alarms")
parser.add_option("--set-alarms", dest="setalarms", type=str,
metavar=self.ALMSTR, help="set alarm state")
parser.add_option("--get-interval", dest="getinterval",
action="store_true", help="display archive interval")
parser.add_option("--set-interval", dest="setinterval",
type=str, metavar="INTERVAL",
help="set archive interval (minutes)")
parser.add_option("--format", dest="format",
type=str, metavar="FORMAT", default='table',
help="formats include: table, dict")
def do_options(self, options, parser, config_dict, prompt): # @UnusedVariable
if (options.format.lower() != 'table' and
options.format.lower() != 'dict'):
parser.error("Unknown format '%s'. Known formats include 'table' and 'dict'." % options.format)
with TE923Station() as station:
if options.info is not None:
self.show_info(station, fmt=options.format)
elif options.current is not None:
self.show_current(station, fmt=options.format)
elif options.nrecords is not None:
self.show_history(station, count=options.nrecords,
fmt=options.format)
elif options.recmin is not None:
ts = int(time.time()) - options.recmin * 60
self.show_history(station, ts=ts, fmt=options.format)
elif options.minmax is not None:
self.show_minmax(station)
elif options.getdate is not None:
self.show_date(station)
elif options.setdate is not None:
self.set_date(station, options.setdate)
elif options.syncdate:
self.set_date(station, None)
elif options.loc_local is not None:
self.show_location(station, 0)
elif options.setloc_local is not None:
self.set_location(station, 0, options.setloc_local)
elif options.loc_alt is not None:
self.show_location(station, 1)
elif options.setloc_alt is not None:
self.set_location(station, 1, options.setloc_alt)
elif options.getalt is not None:
self.show_altitude(station)
elif options.setalt is not None:
self.set_altitude(station, options.setalt)
elif options.getalarms is not None:
self.show_alarms(station)
elif options.setalarms is not None:
self.set_alarms(station, options.setalarms)
elif options.getinterval is not None:
self.show_interval(station)
elif options.setinterval is not None:
self.set_interval(station, options.setinterval)
@staticmethod
def show_info(station, fmt='dict'):
print('Querying the station for the configuration...')
data = station.get_config()
TE923Configurator.print_data(data, fmt)
@staticmethod
def show_current(station, fmt='dict'):
print('Querying the station for current weather data...')
data = station.get_readings()
TE923Configurator.print_data(data, fmt)
@staticmethod
def show_history(station, ts=0, count=None, fmt='dict'):
print("Querying the station for historical records...")
for r in station.gen_records(ts, count):
TE923Configurator.print_data(r, fmt)
@staticmethod
def show_minmax(station):
print("Querying the station for historical min/max data")
data = station.get_minmax()
print("Console Temperature Min : %s" % data['t_in_min'])
print("Console Temperature Max : %s" % data['t_in_max'])
print("Console Humidity Min : %s" % data['h_in_min'])
print("Console Humidity Max : %s" % data['h_in_max'])
for i in range(1, 6):
print("Channel %d Temperature Min : %s" % (i, data['t_%d_min' % i]))
print("Channel %d Temperature Max : %s" % (i, data['t_%d_max' % i]))
print("Channel %d Humidity Min : %s" % (i, data['h_%d_min' % i]))
print("Channel %d Humidity Max : %s" % (i, data['h_%d_max' % i]))
print("Wind speed max since midnight : %s" % data['windspeed_max'])
print("Wind gust max since midnight : %s" % data['windgust_max'])
print("Rain yesterday : %s" % data['rain_yesterday'])
print("Rain this week : %s" % data['rain_week'])
print("Rain this month : %s" % data['rain_month'])
print("Last Barometer reading : %s" % time.strftime(
"%Y %b %d %H:%M", time.localtime(data['barometer_ts'])))
for i in range(25):
print(" T-%02d Hours : %.1f" % (i, data['barometer_%d' % i]))
@staticmethod
def show_date(station):
ts = station.get_date()
tt = time.localtime(ts)
print("Date: %02d/%02d/%d" % (tt[2], tt[1], tt[0]))
TE923Configurator.print_alignment()
@staticmethod
def set_date(station, datestr):
if datestr is not None:
date_list = datestr.split(',')
if len(date_list) != 3:
print("Bad date '%s', format is YEAR,MONTH,DAY" % datestr)
return
if int(date_list[0]) < 2000 or int(date_list[0]) > 2099:
print("Year must be between 2000 and 2099 inclusive")
return
if int(date_list[1]) < 1 or int(date_list[1]) > 12:
print("Month must be between 1 and 12 inclusive")
return
if int(date_list[2]) < 1 or int(date_list[2]) > 31:
print("Day must be between 1 and 31 inclusive")
return
tt = time.localtime()
offset = 1 if tt[3] < 12 else 0
ts = time.mktime((int(date_list[0]), int(date_list[1]), int(date_list[2]) - offset, 0, 0, 0, 0, 0, 0))
else:
ts = time.time()
station.set_date(ts)
TE923Configurator.print_alignment()
def show_location(self, station, loc_type):
data = station.get_loc(loc_type)
print("City : %s (%s)" % (self.city_dict[data['city_time']][9],
self.city_dict[data['city_time']][0]))
degree_sign= u'\N{DEGREE SIGN}'.encode('iso-8859-1')
print("Location : %03d%s%02d'%s %02d%s%02d'%s" % (
data['long_deg'], degree_sign, data['long_min'], data['long_dir'],
data['lat_deg'], degree_sign, data['lat_min'], data['lat_dir']))
if data['dst_always_on']:
print("DST : Always on")
else:
print("DST : %s (%s)" % (self.dst_dict[data['dst']][1],
self.dst_dict[data['dst']][0]))
def set_location(self, station, loc_type, location):
dst_on = 1
dst_index = 0
location_list = location.split(',')
if len(location_list) == 1 and location_list[0] != "USR":
city_index = None
for idx in range(len(self.city_dict)):
if self.city_dict[idx][0] == location_list[0]:
city_index = idx
break
if city_index is None:
print("City code '%s' not recognized - consult station manual for valid city codes" % location_list[0])
return
long_deg = self.city_dict[city_index][6]
long_min = self.city_dict[city_index][7]
long_dir = self.city_dict[city_index][8]
lat_deg = self.city_dict[city_index][3]
lat_min = self.city_dict[city_index][4]
lat_dir = self.city_dict[city_index][5]
tz_hr = int(self.city_dict[city_index][1])
tz_min = 0 if self.city_dict[city_index][1] == int(self.city_dict[city_index][1]) else 30
dst_on = 0
dst_index = self.city_dict[city_index][2]
elif len(location_list) == 9 and location_list[0] == "USR":
if int(location_list[1]) < 0 or int(location_list[1]) > 180:
print("Longitude degrees must be between 0 and 180 inclusive")
return
if int(location_list[2]) < 0 or int(location_list[2]) > 180:
print("Longitude minutes must be between 0 and 59 inclusive")
return
if location_list[3] != "E" and location_list[3] != "W":
print("Longitude direction must be E or W")
return
if int(location_list[4]) < 0 or int(location_list[4]) > 180:
print("Latitude degrees must be between 0 and 90 inclusive")
return
if int(location_list[5]) < 0 or int(location_list[5]) > 180:
print("Latitude minutes must be between 0 and 59 inclusive")
return
if location_list[6] != "N" and location_list[6] != "S":
print("Longitude direction must be N or S")
return
tz_list = location_list[7].split(':')
if len(tz_list) != 2:
print("Bad timezone '%s', format is HOUR:MINUTE" % location_list[7])
return
if int(tz_list[0]) < -12 or int(tz_list[0]) > 12:
print("Timezone hour must be between -12 and 12 inclusive")
return
if int(tz_list[1]) != 0 and int(tz_list[1]) != 30:
print("Timezone minute must be 0 or 30")
return
if location_list[8].lower() != 'on':
dst_on = 0
dst_index = None
for idx in range(16):
if self.dst_dict[idx][0] == location_list[8]:
dst_index = idx
break
if dst_index is None:
print("DST code '%s' not recognized - consult station manual for valid DST codes" % location_list[8])
return
else:
dst_on = 1
dst_index = 0
city_index = 123 # user-defined city
long_deg = int(location_list[1])
long_min = int(location_list[2])
long_dir = location_list[3]
lat_deg = int(location_list[4])
lat_min = int(location_list[5])
lat_dir = location_list[6]
tz_hr = int(tz_list[0])
tz_min = int(tz_list[1])
else:
print("Bad location '%s'" % location)
print("Location format is: %s" % self.LOCSTR)
return
station.set_loc(loc_type, city_index, dst_on, dst_index, tz_hr, tz_min,
lat_deg, lat_min, lat_dir,
long_deg, long_min, long_dir)
@staticmethod
def show_altitude(station):
altitude = station.get_alt()
print("Altitude: %d meters" % altitude)
@staticmethod
def set_altitude(station, altitude):
if altitude < -200 or altitude > 5000:
print("Altitude must be between -200 and 5000 inclusive")
return
station.set_alt(altitude)
@staticmethod
def show_alarms(station):
data = station.get_alarms()
print("Weekday alarm : %02d:%02d (%s)" % (
data['weekday_hour'], data['weekday_min'], data['weekday_active']))
print("Single alarm : %02d:%02d (%s)" % (
data['single_hour'], data['single_min'], data['single_active']))
print("Pre-alarm : %s (%s)" % (
data['prealarm_period'], data['prealarm_active']))
if data['snooze'] > 0:
print("Snooze : %d mins" % data['snooze'])
else:
print("Snooze : Invalid")
print("Max Temperature Alarm : %s" % data['max_temp'])
print("Min Temperature Alarm : %s" % data['min_temp'])
print("Rain Alarm : %d mm (%s)" % (
data['rain'], data['rain_active']))
print("Wind Speed Alarm : %s (%s)" % (
data['windspeed'], data['windspeed_active']))
print("Wind Gust Alarm : %s (%s)" % (
data['windgust'], data['windgust_active']))
@staticmethod
def set_alarms(station, alarm):
alarm_list = alarm.split(',')
if len(alarm_list) != 9:
print("Bad alarm '%s'" % alarm)
print("Alarm format is: %s" % TE923Configurator.ALMSTR)
return
weekday = alarm_list[0]
if weekday.lower() != 'off':
weekday_list = weekday.split(':')
if len(weekday_list) != 2:
print("Bad alarm '%s', expected HOUR:MINUTE or OFF" % weekday)
return
if int(weekday_list[0]) < 0 or int(weekday_list[0]) > 23:
print("Alarm hours must be between 0 and 23 inclusive")
return
if int(weekday_list[1]) < 0 or int(weekday_list[1]) > 59:
print("Alarm minutes must be between 0 and 59 inclusive")
return
single = alarm_list[1]
if single.lower() != 'off':
single_list = single.split(':')
if len(single_list) != 2:
print("Bad alarm '%s', expected HOUR:MINUTE or OFF" % single)
return
if int(single_list[0]) < 0 or int(single_list[0]) > 23:
print("Alarm hours must be between 0 and 23 inclusive")
return
if int(single_list[1]) < 0 or int(single_list[1]) > 59:
print("Alarm minutes must be between 0 and 59 inclusive")
return
if alarm_list[2].lower() != 'off' and alarm_list[2] not in ['15', '30', '45', '60', '90']:
print("Prealarm must be 15, 30, 45, 60, 90 or OFF")
return
if int(alarm_list[3]) < 1 or int(alarm_list[3]) > 15:
print("Snooze must be between 1 and 15 inclusive")
return
if float(alarm_list[4]) < -50 or float(alarm_list[4]) > 70:
print("Temperature alarm must be between -50 and 70 inclusive")
return
if float(alarm_list[5]) < -50 or float(alarm_list[5]) > 70:
print("Temperature alarm must be between -50 and 70 inclusive")
return
if alarm_list[6].lower() != 'off' and (int(alarm_list[6]) < 1 or int(alarm_list[6]) > 9999):
print("Rain alarm must be between 1 and 999 inclusive or OFF")
return
if alarm_list[7].lower() != 'off' and (float(alarm_list[7]) < 1 or float(alarm_list[7]) > 199):
print("Wind alarm must be between 1 and 199 inclusive or OFF")
return
if alarm_list[8].lower() != 'off' and (float(alarm_list[8]) < 1 or float(alarm_list[8]) > 199):
print("Wind alarm must be between 1 and 199 inclusive or OFF")
return
station.set_alarms(alarm_list[0], alarm_list[1], alarm_list[2],
alarm_list[3], alarm_list[4], alarm_list[5],
alarm_list[6], alarm_list[7], alarm_list[8])
print("Temperature alarms can only be modified via station controls")
@staticmethod
def show_interval(station):
idx = station.get_interval()
print("Interval: %s" % TE923Configurator.idx_to_interval.get(idx, 'unknown'))
@staticmethod
def set_interval(station, interval):
"""accept 30s|2h|1d format or raw minutes, but only known intervals"""
idx = TE923Configurator.interval_to_idx.get(interval)
if idx is None:
try:
ival = int(interval * 60)
for i in TE923Station.idx_to_interval_sec:
if ival == TE923Station.idx_to_interval_sec[i]:
idx = i
except ValueError:
pass
if idx is None:
print("Bad interval '%s'" % interval)
print("Valid intervals are %s" % ','.join(list(TE923Configurator.interval_to_idx.keys())))
return
station.set_interval(idx)
@staticmethod
def print_data(data, fmt):
if fmt.lower() == 'table':
TE923Configurator.print_table(data)
else:
print(data)
@staticmethod
def print_table(data):
for key in sorted(data):
print("%s: %s" % (key.rjust(16), data[key]))
@staticmethod
def print_alignment():
print(" If computer time is not aligned to station time then date")
print(" may be incorrect by 1 day")
class TE923Driver(weewx.drivers.AbstractDevice):
"""Driver for Hideki TE923 stations."""
def __init__(self, **stn_dict):
"""Initialize the station object.
polling_interval: How often to poll the station, in seconds.
[Optional. Default is 10]
model: Which station model is this?
[Optional. Default is 'TE923']
"""
log.info('driver version is %s' % DRIVER_VERSION)
global DEBUG_READ
DEBUG_READ = int(stn_dict.get('debug_read', DEBUG_READ))
global DEBUG_WRITE
DEBUG_WRITE = int(stn_dict.get('debug_write', DEBUG_WRITE))
global DEBUG_DECODE
DEBUG_DECODE = int(stn_dict.get('debug_decode', DEBUG_DECODE))
self._last_rain_loop = None
self._last_rain_archive = None
self._last_ts = None
self.model = stn_dict.get('model', 'TE923')
self.max_tries = int(stn_dict.get('max_tries', 5))
self.retry_wait = int(stn_dict.get('retry_wait', 3))
self.read_timeout = int(stn_dict.get('read_timeout', 10))
self.polling_interval = int(stn_dict.get('polling_interval', 10))
log.info('polling interval is %s' % str(self.polling_interval))
self.sensor_map = dict(DEFAULT_MAP)
if 'sensor_map' in stn_dict:
self.sensor_map.update(stn_dict['sensor_map'])
log.info('sensor map is %s' % self.sensor_map)
self.station = TE923Station(max_tries=self.max_tries,
retry_wait=self.retry_wait,
read_timeout=self.read_timeout)
self.station.open()
log.info('logger capacity %s records' % self.station.get_memory_size())
ts = self.station.get_date()
now = int(time.time())
log.info('station time is %s, computer time is %s' % (ts, now))
def closePort(self):
if self.station is not None:
self.station.close()
self.station = None
@property
def hardware_name(self):
return self.model
def genLoopPackets(self):
while True:
data = self.station.get_readings()
status = self.station.get_status()
packet = self.data_to_packet(data, status=status,
last_rain=self._last_rain_loop,
sensor_map=self.sensor_map)
self._last_rain_loop = packet['rainTotal']
yield packet
time.sleep(self.polling_interval)
# same as genStartupRecords, but insert battery status on the last record.
# when record_generation is hardware, this results in a full suit of sensor
# data, but with the archive interval calculations done by the hardware.
# there is no battery status for historical records.
def genStartupRecords(self, since_ts=0):
log.info("reading records from logger since %s" % since_ts)
cnt = 0
for data in self.station.gen_records(since_ts):
packet = self.data_to_packet(data, status=None,
last_rain=self._last_rain_archive,
sensor_map=self.sensor_map)
self._last_rain_archive = packet['rainTotal']
if self._last_ts:
packet['interval'] = (packet['dateTime'] - self._last_ts) // 60
if packet['interval'] > 0:
cnt += 1
yield packet
else:
log.info("skip packet with duplidate timestamp: %s" % packet)
self._last_ts = packet['dateTime']
if cnt % 50 == 0:
log.info("read %s records from logger" % cnt)
log.info("read %s records from logger" % cnt)
@staticmethod
def data_to_packet(data, status, last_rain, sensor_map):
"""convert raw data to format and units required by weewx
station weewx (metric)
temperature degree C degree C
humidity percent percent
uv index unitless unitless
slp mbar mbar
wind speed mile/h km/h
wind gust mile/h km/h
wind dir degree degree
rain mm cm
rain rate cm/h
"""
packet = dict()
packet['usUnits'] = weewx.METRIC
packet['dateTime'] = data['dateTime']
# include the link status - 0 indicates ok, 1 indicates no link
data['link_wind'] = 0 if data['windspeed_state'] == STATE_OK else 1
data['link_rain'] = 0 if data['rain_state'] == STATE_OK else 1
data['link_uv'] = 0 if data['uv_state'] == STATE_OK else 1
data['link_1'] = 0 if data['t_1_state'] == STATE_OK else 1
data['link_2'] = 0 if data['t_2_state'] == STATE_OK else 1
data['link_3'] = 0 if data['t_3_state'] == STATE_OK else 1
data['link_4'] = 0 if data['t_4_state'] == STATE_OK else 1
data['link_5'] = 0 if data['t_5_state'] == STATE_OK else 1
# map extensible sensors to database fields
for label in sensor_map:
if sensor_map[label] in data:
packet[label] = data[sensor_map[label]]
elif status is not None and sensor_map[label] in status:
packet[label] = int(status[sensor_map[label]])
# handle unit converstions
packet['windSpeed'] = data.get('windspeed')
if packet['windSpeed'] is not None:
packet['windSpeed'] *= 1.60934 # speed is mph; weewx wants km/h
packet['windDir'] = data.get('winddir')
if packet['windDir'] is not None:
packet['windDir'] *= 22.5 # weewx wants degrees
packet['windGust'] = data.get('windgust')
if packet['windGust'] is not None:
packet['windGust'] *= 1.60934 # speed is mph; weewx wants km/h
packet['rainTotal'] = data['rain']
if packet['rainTotal'] is not None:
packet['rainTotal'] *= 0.0705555556 # weewx wants cm (1/36 inch)
packet['rain'] = weewx.wxformulas.calculate_rain(
packet['rainTotal'], last_rain)
# some stations report uv
packet['UV'] = data['uv']
# station calculates windchill
packet['windchill'] = data['windchill']
# station reports baromter (SLP)
packet['barometer'] = data['slp']
# forecast and storm fields use the station's algorithms
packet['forecast'] = data['forecast']
packet['storm'] = data['storm']
return packet
STATE_OK = 'ok'
STATE_INVALID = 'invalid'
STATE_NO_LINK = 'no_link'
def _fmt(buf):
if buf:
return ' '.join(["%02x" % x for x in buf])
return ''
def bcd2int(bcd):
return int(((bcd & 0xf0) >> 4) * 10) + int(bcd & 0x0f)
def rev_bcd2int(bcd):
return int((bcd & 0xf0) >> 4) + int((bcd & 0x0f) * 10)
def int2bcd(num):
return int(num / 10) * 0x10 + (num % 10)
def rev_int2bcd(num):
return (num % 10) * 0x10 + int(num / 10)
def decode(buf):
data = dict()
for i in range(6): # console plus 5 remote channels
data.update(decode_th(buf, i))
data.update(decode_uv(buf))
data.update(decode_pressure(buf))
data.update(decode_forecast(buf))
data.update(decode_windchill(buf))
data.update(decode_wind(buf))
data.update(decode_rain(buf))
return data
def decode_th(buf, i):
if i == 0:
tlabel = 't_in'
hlabel = 'h_in'
else:
tlabel = 't_%d' % i
hlabel = 'h_%d' % i
tstate = '%s_state' % tlabel
hstate = '%s_state' % hlabel
offset = i * 3
if DEBUG_DECODE:
log.debug("TH%d BUF[%02d]=%02x BUF[%02d]=%02x BUF[%02d]=%02x" %
(i, 0 + offset, buf[0 + offset], 1 + offset, buf[1 + offset],
2 + offset, buf[2 + offset]))
data = dict()
data[tlabel], data[tstate] = decode_temp(buf[0 + offset], buf[1 + offset],
i != 0)
data[hlabel], data[hstate] = decode_humid(buf[2 + offset])
if DEBUG_DECODE:
log.debug("TH%d %s %s %s %s" % (i, data[tlabel], data[tstate],
data[hlabel], data[hstate]))
return data
def decode_temp(byte1, byte2, remote):
"""decode temperature. result is degree C."""
if bcd2int(byte1 & 0x0f) > 9:
if byte1 & 0x0f == 0x0a:
return None, STATE_NO_LINK
else:
return None, STATE_INVALID
if byte2 & 0x40 != 0x40 and remote:
return None, STATE_INVALID
value = bcd2int(byte1) / 10.0 + bcd2int(byte2 & 0x0f) * 10.0
if byte2 & 0x20 == 0x20:
value += 0.05
if byte2 & 0x80 != 0x80:
value *= -1
return value, STATE_OK
def decode_humid(byte):
"""decode humidity. result is percentage."""
if bcd2int(byte & 0x0f) > 9:
if byte & 0x0f == 0x0a:
return None, STATE_NO_LINK
else:
return None, STATE_INVALID
return bcd2int(byte), STATE_OK
def decode_uv(buf):
"""decode data from uv sensor"""
data = dict()
if DEBUG_DECODE:
log.debug("UVX BUF[18]=%02x BUF[19]=%02x" % (buf[18], buf[19]))
if ((buf[18] == 0xaa and buf[19] == 0x0a) or
(buf[18] == 0xff and buf[19] == 0xff)):
data['uv_state'] = STATE_NO_LINK
data['uv'] = None
elif bcd2int(buf[18]) > 99 or bcd2int(buf[19]) > 99:
data['uv_state'] = STATE_INVALID
data['uv'] = None
else:
data['uv_state'] = STATE_OK
data['uv'] = bcd2int(buf[18] & 0x0f) / 10.0 \
+ bcd2int((buf[18] & 0xf0) >> 4) \
+ bcd2int(buf[19] & 0x0f) * 10.0
if DEBUG_DECODE:
log.debug("UVX %s %s" % (data['uv'], data['uv_state']))
return data
def decode_pressure(buf):
"""decode pressure data"""
data = dict()
if DEBUG_DECODE:
log.debug("PRS BUF[20]=%02x BUF[21]=%02x" % (buf[20], buf[21]))
if buf[21] & 0xf0 == 0xf0:
data['slp_state'] = STATE_INVALID
data['slp'] = None
else:
data['slp_state'] = STATE_OK
data['slp'] = int(buf[21] * 0x100 + buf[20]) * 0.0625
if DEBUG_DECODE:
log.debug("PRS %s %s" % (data['slp'], data['slp_state']))
return data
def decode_wind(buf):
"""decode wind speed, gust, and direction"""
data = dict()
if DEBUG_DECODE:
log.debug("WGS BUF[25]=%02x BUF[26]=%02x" % (buf[25], buf[26]))
data['windgust'], data['windgust_state'] = decode_ws(buf[25], buf[26])
if DEBUG_DECODE:
log.debug("WGS %s %s" % (data['windgust'], data['windgust_state']))
if DEBUG_DECODE:
log.debug("WSP BUF[27]=%02x BUF[28]=%02x" % (buf[27], buf[28]))
data['windspeed'], data['windspeed_state'] = decode_ws(buf[27], buf[28])
if DEBUG_DECODE:
log.debug("WSP %s %s" % (data['windspeed'], data['windspeed_state']))
if DEBUG_DECODE:
log.debug("WDR BUF[29]=%02x" % buf[29])
data['winddir_state'] = data['windspeed_state']
data['winddir'] = int(buf[29] & 0x0f)
if DEBUG_DECODE:
log.debug("WDR %s %s" % (data['winddir'], data['winddir_state']))
return data
def decode_ws(byte1, byte2):
"""decode wind speed, result is mph"""
if bcd2int(byte1 & 0xf0) > 90 or bcd2int(byte1 & 0x0f) > 9:
if ((byte1 == 0xee and byte2 == 0x8e) or
(byte1 == 0xff and byte2 == 0xff)):
return None, STATE_NO_LINK
else:
return None, STATE_INVALID
offset = 100 if byte2 & 0x10 == 0x10 else 0
value = bcd2int(byte1) / 10.0 + bcd2int(byte2 & 0x0f) * 10.0 + offset
return value, STATE_OK
def decode_rain(buf):
"""rain counter is number of bucket tips, each tip is about 0.03 inches"""
data = dict()
if DEBUG_DECODE:
log.debug("RAIN BUF[30]=%02x BUF[31]=%02x" % (buf[30], buf[31]))
data['rain_state'] = STATE_OK
data['rain'] = int(buf[31] * 0x100 + buf[30])
if DEBUG_DECODE:
log.debug("RAIN %s %s" % (data['rain'], data['rain_state']))
return data
def decode_windchill(buf):
data = dict()
if DEBUG_DECODE:
log.debug("WCL BUF[23]=%02x BUF[24]=%02x" % (buf[23], buf[24]))
if bcd2int(buf[23] & 0xf0) > 90 or bcd2int(buf[23] & 0x0f) > 9:
if ((buf[23] == 0xee and buf[24] == 0x8e) or
(buf[23] == 0xff and buf[24] == 0xff)):
data['windchill_state'] = STATE_NO_LINK
else:
data['windchill_state'] = STATE_INVALID
data['windchill'] = None
elif buf[24] & 0x40 != 0x40:
data['windchill_state'] = STATE_INVALID
data['windchill'] = None
else:
data['windchill_state'] = STATE_OK
data['windchill'] = bcd2int(buf[23]) / 10.0 \
+ bcd2int(buf[24] & 0x0f) * 10.0
if buf[24] & 0x20 == 0x20:
data['windchill'] += 0.05
if buf[24] & 0x80 != 0x80:
data['windchill'] *= -1
if DEBUG_DECODE:
log.debug("WCL %s %s" % (data['windchill'], data['windchill_state']))
return data
def decode_forecast(buf):
data = dict()
if DEBUG_DECODE:
log.debug("STT BUF[22]=%02x" % buf[22])
if buf[22] & 0x0f == 0x0f:
data['storm'] = None
data['forecast'] = None
else:
data['storm'] = 1 if buf[22] & 0x08 == 0x08 else 0
data['forecast'] = int(buf[22] & 0x07)
if DEBUG_DECODE:
log.debug("STT %s %s" % (data['storm'], data['forecast']))
return data
class BadRead(weewx.WeeWxIOError):
"""Bogus data length, CRC, header block, or other read failure"""
class BadWrite(weewx.WeeWxIOError):
"""Bogus data length, header block, or other write failure"""
class BadHeader(weewx.WeeWxIOError):
"""Bad header byte"""
class TE923Station(object):
ENDPOINT_IN = 0x81
READ_LENGTH = 0x8
TIMEOUT = 1200
START_ADDRESS = 0x101
RECORD_SIZE = 0x26
idx_to_interval_sec = {
1: 300, 2: 600, 3: 1200, 4: 1800, 5: 3600, 6: 5400, 7: 7200,
8: 10800, 9: 14400, 10: 21600, 11: 86400}
def __init__(self, vendor_id=0x1130, product_id=0x6801,
max_tries=10, retry_wait=5, read_timeout=5):
self.vendor_id = vendor_id
self.product_id = product_id
self.devh = None
self.max_tries = max_tries
self.retry_wait = retry_wait
self.read_timeout = read_timeout
self._num_rec = None
self._num_blk = None
def __enter__(self):
self.open()
return self
def __exit__(self, type_, value, traceback): # @UnusedVariable
self.close()
def open(self, interface=0):
dev = self._find_dev(self.vendor_id, self.product_id)
if not dev:
log.critical("Cannot find USB device with VendorID=0x%04x ProductID=0x%04x" % (self.vendor_id, self.product_id))
raise weewx.WeeWxIOError('Unable to find station on USB')
self.devh = dev.open()
if not self.devh:
raise weewx.WeeWxIOError('Open USB device failed')
# be sure kernel does not claim the interface
try:
self.devh.detachKernelDriver(interface)
except (AttributeError, usb.USBError):
pass
# attempt to claim the interface
try:
self.devh.claimInterface(interface)
self.devh.setAltInterface(interface)
except usb.USBError as e:
self.close()
log.critical("Unable to claim USB interface %s: %s" % (interface, e))
raise weewx.WeeWxIOError(e)
# figure out which type of memory this station has
self.read_memory_size()
def close(self):
try:
self.devh.releaseInterface()
except (ValueError, usb.USBError) as e:
log.error("release interface failed: %s" % e)
self.devh = None
@staticmethod
def _find_dev(vendor_id, product_id):
"""Find the vendor and product ID on the USB."""
for bus in usb.busses():
for dev in bus.devices:
if dev.idVendor == vendor_id and dev.idProduct == product_id:
log.info('Found device on USB bus=%s device=%s' %
(bus.dirname, dev.filename))
return dev
return None
def _raw_read(self, addr):
reqbuf = [0x05, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
reqbuf[4] = addr // 0x10000
reqbuf[3] = (addr - (reqbuf[4] * 0x10000)) // 0x100
reqbuf[2] = addr - (reqbuf[4] * 0x10000) - (reqbuf[3] * 0x100)
reqbuf[5] = (reqbuf[1] ^ reqbuf[2] ^ reqbuf[3] ^ reqbuf[4])
ret = self.devh.controlMsg(requestType=0x21,
request=usb.REQ_SET_CONFIGURATION,
value=0x0200,
index=0x0000,
buffer=reqbuf,
timeout=self.TIMEOUT)
if ret != 8:
raise BadRead('Unexpected response to data request: %s != 8' % ret)
start_ts = time.time()
rbuf = []
while time.time() - start_ts < self.read_timeout:
try:
buf = self.devh.interruptRead(
self.ENDPOINT_IN, self.READ_LENGTH, self.TIMEOUT)
if buf:
nbytes = buf[0]
if nbytes > 7 or nbytes > len(buf) - 1:
raise BadRead("Bogus length during read: %d" % nbytes)
rbuf.extend(buf[1:1 + nbytes])
if len(rbuf) >= 34:
break
except usb.USBError as e:
errmsg = repr(e)
if not ('No data available' in errmsg or 'No error' in errmsg):
raise
else:
log.debug("timeout while reading: ignoring bytes: %s" % _fmt(rbuf))
raise BadRead("Timeout after %d bytes" % len(rbuf))
# Send acknowledgement whether or not it was a good read
reqbuf = [0x24, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
reqbuf[4] = addr // 0x10000
reqbuf[3] = (addr - (reqbuf[4] * 0x10000)) // 0x100
reqbuf[2] = addr - (reqbuf[4] * 0x10000) - (reqbuf[3] * 0x100)
reqbuf[5] = (reqbuf[1] ^ reqbuf[2] ^ reqbuf[3] ^ reqbuf[4])
ret = self.devh.controlMsg(requestType=0x21,
request=usb.REQ_SET_CONFIGURATION,
value=0x0200,
index=0x0000,
buffer=reqbuf,
timeout=self.TIMEOUT)
# now check what we got
if len(rbuf) < 34:
raise BadRead("Not enough bytes: %d < 34" % len(rbuf))
# there must be a header byte...
if rbuf[0] != 0x5a:
raise BadHeader("Bad header byte: %02x != %02x" % (rbuf[0], 0x5a))
# ...and the last byte must be a valid crc
crc = 0x00
for x in rbuf[:33]:
crc = crc ^ x
if crc != rbuf[33]:
raise BadRead("Bad crc: %02x != %02x" % (crc, rbuf[33]))
# early versions of this driver used to get long reads, but these
# might not happen any more. log it then try to use the data anyway.
if len(rbuf) != 34:
log.info("read: wrong number of bytes: %d != 34" % len(rbuf))
return rbuf
def _raw_write(self, addr, buf):
wbuf = [0] * 38
wbuf[0] = 0xAE
wbuf[3] = addr // 0x10000
wbuf[2] = (addr - (wbuf[3] * 0x10000)) // 0x100
wbuf[1] = addr - (wbuf[3] * 0x10000) - (wbuf[2] * 0x100)
crc = wbuf[0] ^ wbuf[1] ^ wbuf[2] ^ wbuf[3]
for i in range(32):
wbuf[i + 4] = buf[i]
crc = crc ^ buf[i]
wbuf[36] = crc
for i in range(6):
if i == 5:
reqbuf = [0x2,
wbuf[i * 7], wbuf[1 + i * 7],
0x00, 0x00, 0x00, 0x00, 0x00]
else:
reqbuf = [0x7,
wbuf[i * 7], wbuf[1 + i * 7], wbuf[2 + i * 7],
wbuf[3 + i * 7], wbuf[4 + i * 7], wbuf[5 + i * 7],
wbuf[6 + i * 7]]
if DEBUG_WRITE:
log.debug("write: %s" % _fmt(reqbuf))
ret = self.devh.controlMsg(requestType=0x21,
request=usb.REQ_SET_CONFIGURATION,
value=0x0200,
index=0x0000,
buffer=reqbuf,
timeout=self.TIMEOUT)
if ret != 8:
raise BadWrite('Unexpected response: %s != 8' % ret)
# Wait for acknowledgement
time.sleep(0.1)
start_ts = time.time()
rbuf = []
while time.time() - start_ts < 5:
try:
tmpbuf = self.devh.interruptRead(
self.ENDPOINT_IN, self.READ_LENGTH, self.TIMEOUT)
if tmpbuf:
nbytes = tmpbuf[0]
if nbytes > 7 or nbytes > len(tmpbuf) - 1:
raise BadRead("Bogus length during read: %d" % nbytes)
rbuf.extend(tmpbuf[1:1 + nbytes])
if len(rbuf) >= 1:
break
except usb.USBError as e:
errmsg = repr(e)
if not ('No data available' in errmsg or 'No error' in errmsg):
raise
time.sleep(0.009)
else:
raise BadWrite("Timeout after %d bytes" % len(rbuf))
if len(rbuf) != 1:
log.info("write: ack got wrong number of bytes: %d != 1" % len(rbuf))
if len(rbuf) == 0:
raise BadWrite("Bad ack: zero length response")
elif rbuf[0] != 0x5a:
raise BadHeader("Bad header byte: %02x != %02x" % (rbuf[0], 0x5a))
def _read(self, addr):
"""raw_read returns the entire 34-byte chunk, i.e., one header byte,
32 data bytes, one checksum byte. this function simply returns it."""
# FIXME: strip the header and checksum so that we return only the
# 32 bytes of data. this will require shifting every index
# pretty much everywhere else in this code.
if DEBUG_READ:
log.debug("read: address 0x%06x" % addr)
for cnt in range(self.max_tries):
try:
buf = self._raw_read(addr)
if DEBUG_READ:
log.debug("read: %s" % _fmt(buf))
return buf
except (BadRead, BadHeader, usb.USBError) as e:
log.error("Failed attempt %d of %d to read data: %s" %
(cnt + 1, self.max_tries, e))
log.debug("Waiting %d seconds before retry" % self.retry_wait)
time.sleep(self.retry_wait)
else:
raise weewx.RetriesExceeded("Read failed after %d tries" %
self.max_tries)
def _write(self, addr, buf):
if DEBUG_WRITE:
log.debug("write: address 0x%06x: %s" % (addr, _fmt(buf)))
for cnt in range(self.max_tries):
try:
self._raw_write(addr, buf)
return
except (BadWrite, BadHeader, usb.USBError) as e:
log.error("Failed attempt %d of %d to write data: %s" %
(cnt + 1, self.max_tries, e))
log.debug("Waiting %d seconds before retry" % self.retry_wait)
time.sleep(self.retry_wait)
else:
raise weewx.RetriesExceeded("Write failed after %d tries" %
self.max_tries)
def read_memory_size(self):
buf = self._read(0xfc)
if DEBUG_DECODE:
log.debug("MEM BUF[1]=%s" % buf[1])
if buf[1] == 0:
self._num_rec = 208
self._num_blk = 256
log.debug("detected small memory size")
elif buf[1] == 2:
self._num_rec = 3442
self._num_blk = 4096
log.debug("detected large memory size")
else:
msg = "Unrecognised memory size '%s'" % buf[1]
log.error(msg)
raise weewx.WeeWxIOError(msg)
def get_memory_size(self):
return self._num_rec
def gen_blocks(self, count=None):
"""generator that returns consecutive blocks of station memory"""
if not count:
count = self._num_blk
for x in range(0, count * 32, 32):
buf = self._read(x)
yield x, buf
def dump_memory(self):
for i in range(8):
buf = self._read(i * 32)
for j in range(4):
log.info("%02x : %02x %02x %02x %02x %02x %02x %02x %02x" %
(i * 32 + j * 8, buf[1 + j * 8], buf[2 + j * 8],
buf[3 + j * 8], buf[4 + j * 8], buf[5 + j * 8],
buf[6 + j * 8], buf[7 + j * 8], buf[8 + j * 8]))
def get_config(self):
data = dict()
data.update(self.get_versions())
data.update(self.get_status())
data['latitude'], data['longitude'] = self.get_location()
data['altitude'] = self.get_altitude()
return data
def get_versions(self):
data = dict()
buf = self._read(0x98)
if DEBUG_DECODE:
log.debug("VER BUF[1]=%s BUF[2]=%s BUF[3]=%s BUF[4]=%s BUF[5]=%s" %
(buf[1], buf[2], buf[3], buf[4], buf[5]))
data['version_bar'] = buf[1]
data['version_uv'] = buf[2]
data['version_rcc'] = buf[3]
data['version_wind'] = buf[4]
data['version_sys'] = buf[5]
if DEBUG_DECODE:
log.debug("VER bar=%s uv=%s rcc=%s wind=%s sys=%s" %
(data['version_bar'], data['version_uv'],
data['version_rcc'], data['version_wind'],
data['version_sys']))
return data
def get_status(self):
# map the battery status flags. 0 indicates ok, 1 indicates failure.
# FIXME: i get 1 for uv even when no uv link
# FIXME: i get 0 for th3, th4, th5 even when no link
status = dict()
buf = self._read(0x4c)
if DEBUG_DECODE:
log.debug("BAT BUF[1]=%02x" % buf[1])
status['bat_rain'] = 0 if buf[1] & 0x80 == 0x80 else 1
status['bat_wind'] = 0 if buf[1] & 0x40 == 0x40 else 1
status['bat_uv'] = 0 if buf[1] & 0x20 == 0x20 else 1
status['bat_5'] = 0 if buf[1] & 0x10 == 0x10 else 1
status['bat_4'] = 0 if buf[1] & 0x08 == 0x08 else 1
status['bat_3'] = 0 if buf[1] & 0x04 == 0x04 else 1
status['bat_2'] = 0 if buf[1] & 0x02 == 0x02 else 1
status['bat_1'] = 0 if buf[1] & 0x01 == 0x01 else 1
if DEBUG_DECODE:
log.debug("BAT rain=%s wind=%s uv=%s th5=%s th4=%s th3=%s th2=%s th1=%s" %
(status['bat_rain'], status['bat_wind'], status['bat_uv'],
status['bat_5'], status['bat_4'], status['bat_3'],
status['bat_2'], status['bat_1']))
return status
# FIXME: is this any different than get_alt?
def get_altitude(self):
buf = self._read(0x5a)
if DEBUG_DECODE:
log.debug("ALT BUF[1]=%02x BUF[2]=%02x BUF[3]=%02x" %
(buf[1], buf[2], buf[3]))
altitude = buf[2] * 0x100 + buf[1]
if buf[3] & 0x8 == 0x8:
altitude *= -1
if DEBUG_DECODE:
log.debug("ALT %s" % altitude)
return altitude
# FIXME: is this any different than get_loc?
def get_location(self):
buf = self._read(0x06)
if DEBUG_DECODE:
log.debug("LOC BUF[1]=%02x BUF[2]=%02x BUF[3]=%02x BUF[4]=%02x BUF[5]=%02x BUF[6]=%02x" % (buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]))
latitude = float(rev_bcd2int(buf[1])) + (float(rev_bcd2int(buf[2])) / 60)
if buf[5] & 0x80 == 0x80:
latitude *= -1
longitude = float((buf[6] & 0xf0) // 0x10 * 100) + float(rev_bcd2int(buf[3])) + (float(rev_bcd2int(buf[4])) / 60)
if buf[5] & 0x40 == 0x00:
longitude *= -1
if DEBUG_DECODE:
log.debug("LOC %s %s" % (latitude, longitude))
return latitude, longitude
def get_readings(self):
"""get sensor readings from the station, return as dictionary"""
buf = self._read(0x020001)
data = decode(buf[1:])
data['dateTime'] = int(time.time() + 0.5)
return data
def _get_next_index(self):
"""get the index of the next history record"""
buf = self._read(0xfb)
if DEBUG_DECODE:
log.debug("HIS BUF[3]=%02x BUF[5]=%02x" % (buf[3], buf[5]))
record_index = buf[3] * 0x100 + buf[5]
log.debug("record_index=%s" % record_index)
if record_index > self._num_rec:
msg = "record index of %d exceeds memory size of %d records" % (
record_index, self._num_rec)
log.error(msg)
raise weewx.WeeWxIOError(msg)
return record_index
def _get_starting_addr(self, requested):
"""calculate the oldest and latest addresses"""
count = requested
if count is None:
count = self._num_rec
elif count > self._num_rec:
count = self._num_rec
log.info("too many records requested (%d), using %d instead" %
(requested, count))
idx = self._get_next_index()
if idx < 1:
idx += self._num_rec
latest_addr = self.START_ADDRESS + (idx - 1) * self.RECORD_SIZE
oldest_addr = latest_addr - (count - 1) * self.RECORD_SIZE
log.debug("count=%s oldest_addr=0x%06x latest_addr=0x%06x" %
(count, oldest_addr, latest_addr))
return oldest_addr, count
def gen_records(self, since_ts=0, requested=None):
"""return requested records from station from oldest to newest. If
since_ts is specified, then all records since that time. If requested
is specified, then at most that many most recent records. If both
are specified then at most requested records newer than the timestamp.
Each historical record is 38 bytes (0x26) long. Records start at
memory address 0x101 (257). The index of the record after the latest
is at address 0xfc:0xff (253:255), indicating the offset from the
starting address.
On small memory stations, the last 32 bytes of memory are never used.
On large memory stations, the last 20 bytes of memory are never used.
"""
log.debug("gen_records: since_ts=%s requested=%s" % (since_ts, requested))
# we need the current year and month since station does not track year
start_ts = time.time()
tt = time.localtime(start_ts)
# get the archive interval for use in calculations later
arcint = self.get_interval_seconds()
# if nothing specified, get everything since time began
if since_ts is None:
since_ts = 0
# if no count specified, use interval to estimate number of records
if requested is None:
requested = int((start_ts - since_ts) / arcint)
requested += 1 # safety margin
# get the starting address for what we want to read, plus actual count
oldest_addr, count = self._get_starting_addr(requested)
# inner loop reads records, outer loop catches any added while reading
more_records = True
while more_records:
n = 0
while n < count:
addr = oldest_addr + n * self.RECORD_SIZE
if addr < self.START_ADDRESS:
addr += self._num_rec * self.RECORD_SIZE
record = self.get_record(addr, tt.tm_year, tt.tm_mon)
n += 1
msg = "record %d of %d addr=0x%06x" % (n, count, addr)
if record and record['dateTime'] > since_ts:
msg += " %s" % timestamp_to_string(record['dateTime'])
log.debug("gen_records: yield %s" % msg)
yield record
else:
if record:
msg += " since_ts=%d %s" % (
since_ts, timestamp_to_string(record['dateTime']))
log.debug("gen_records: skip %s" % msg)
# insert a sleep to simulate slow reads
# see if reading has taken so much time that more records have
# arrived. read whatever records have come in since the read began.
now = time.time()
if now - start_ts > arcint:
newreq = int((now - start_ts) / arcint)
newreq += 1 # safety margin
log.debug("gen_records: reading %d more records" % newreq)
oldest_addr, count = self._get_starting_addr(newreq)
start_ts = now
else:
more_records = False
def get_record(self, addr, now_year, now_month):
"""Return a single record from station."""
log.debug("get_record at address 0x%06x (year=%s month=%s)" %
(addr, now_year, now_month))
buf = self._read(addr)
if DEBUG_DECODE:
log.debug("REC %02x %02x %02x %02x" %
(buf[1], buf[2], buf[3], buf[4]))
if buf[1] == 0xff:
log.debug("get_record: no data at address 0x%06x" % addr)
return None
year = now_year
month = buf[1] & 0x0f
if month > now_month:
year -= 1
day = bcd2int(buf[2])
hour = bcd2int(buf[3])
minute = bcd2int(buf[4])
ts = time.mktime((year, month, day, hour, minute, 0, 0, 0, -1))
if DEBUG_DECODE:
log.debug("REC %d/%02d/%02d %02d:%02d = %d" %
(year, month, day, hour, minute, ts))
tmpbuf = buf[5:16]
buf = self._read(addr + 0x10)
tmpbuf.extend(buf[1:22])
data = decode(tmpbuf)
data['dateTime'] = int(ts)
log.debug("get_record: found record %s" % data)
return data
def _read_minmax(self):
buf = self._read(0x24)
tmpbuf = self._read(0x40)
buf[28:37] = tmpbuf[1:10]
tmpbuf = self._read(0xaa)
buf[37:47] = tmpbuf[1:11]
tmpbuf = self._read(0x60)
buf[47:74] = tmpbuf[1:28]
tmpbuf = self._read(0x7c)
buf[74:101] = tmpbuf[1:28]
return buf
def get_minmax(self):
buf = self._read_minmax()
data = dict()
data['t_in_min'], _ = decode_temp(buf[1], buf[2], 0)
data['t_in_max'], _ = decode_temp(buf[3], buf[4], 0)
data['h_in_min'], _ = decode_humid(buf[5])
data['h_in_max'], _ = decode_humid(buf[6])
for i in range(5):
label = 't_%d_%%s' % (i + 1)
data[label % 'min'], _ = decode_temp(buf[7+i*6], buf[8 +i*6], 1)
data[label % 'max'], _ = decode_temp(buf[9+i*6], buf[10+i*6], 1)
label = 'h_%d_%%s' % (i + 1)
data[label % 'min'], _ = decode_humid(buf[11+i*6])
data[label % 'max'], _ = decode_humid(buf[12+i*6])
data['windspeed_max'], _ = decode_ws(buf[37], buf[38])
data['windgust_max'], _ = decode_ws(buf[39], buf[40])
# not sure if this is the correct units here...
data['rain_yesterday'] = (buf[42] * 0x100 + buf[41]) * 0.705555556
data['rain_week'] = (buf[44] * 0x100 + buf[43]) * 0.705555556
data['rain_month'] = (buf[46] * 0x100 + buf[45]) * 0.705555556
tt = time.localtime()
offset = 1 if tt[3] < 12 else 0
month = bcd2int(buf[47] & 0xf)
day = bcd2int(buf[48])
hour = bcd2int(buf[49])
minute = bcd2int(buf[50])
year = tt.tm_year
if month > tt.tm_mon:
year -= 1
ts = time.mktime((year, month, day - offset, hour, minute, 0, 0, 0, 0))
data['barometer_ts'] = ts
for i in range(25):
data['barometer_%d' % i] = (buf[52+i*2]*0x100 + buf[51+i*2])*0.0625
return data
def _read_date(self):
buf = self._read(0x0)
return buf[1:33]
def _write_date(self, buf):
self._write(0x0, buf)
def get_date(self):
tt = time.localtime()
offset = 1 if tt[3] < 12 else 0
buf = self._read_date()
day = rev_bcd2int(buf[2])
month = (buf[5] & 0xF0) // 0x10
year = rev_bcd2int(buf[4]) + 2000
ts = time.mktime((year, month, day + offset, 0, 0, 0, 0, 0, 0))
return ts
def set_date(self, ts):
tt = time.localtime(ts)
buf = self._read_date()
buf[2] = rev_int2bcd(tt[2])
buf[4] = rev_int2bcd(tt[0] - 2000)
buf[5] = tt[1] * 0x10 + (tt[6] + 1) * 2 + (buf[5] & 1)
buf[15] = self._checksum(buf[0:15])
self._write_date(buf)
def _read_loc(self, loc_type):
addr = 0x0 if loc_type == 0 else 0x16
buf = self._read(addr)
return buf[1:33]
def _write_loc(self, loc_type, buf):
addr = 0x0 if loc_type == 0 else 0x16
self._write(addr, buf)
def get_loc(self, loc_type):
buf = self._read_loc(loc_type)
offset = 6 if loc_type == 0 else 0
data = dict()
data['city_time'] = (buf[6 + offset] & 0xF0) + (buf[7 + offset] & 0xF)
data['lat_deg'] = rev_bcd2int(buf[0 + offset])
data['lat_min'] = rev_bcd2int(buf[1 + offset])
data['lat_dir'] = "S" if buf[4 + offset] & 0x80 == 0x80 else "N"
data['long_deg'] = (buf[5 + offset] & 0xF0) // 0x10 * 100 + rev_bcd2int(buf[2 + offset])
data['long_min'] = rev_bcd2int(buf[3 + offset])
data['long_dir'] = "E" if buf[4 + offset] & 0x40 == 0x40 else "W"
data['tz_hr'] = (buf[7 + offset] & 0xF0) // 0x10
if buf[4 + offset] & 0x8 == 0x8:
data['tz_hr'] *= -1
data['tz_min'] = 30 if buf[4 + offset] & 0x3 == 0x3 else 0
if buf[4 + offset] & 0x10 == 0x10:
data['dst_always_on'] = True
else:
data['dst_always_on'] = False
data['dst'] = buf[5 + offset] & 0xf
return data
def set_loc(self, loc_type, city_index, dst_on, dst_index, tz_hr, tz_min,
lat_deg, lat_min, lat_dir, long_deg, long_min, long_dir):
buf = self._read_loc(loc_type)
offset = 6 if loc_type == 0 else 0
buf[0 + offset] = rev_int2bcd(lat_deg)
buf[1 + offset] = rev_int2bcd(lat_min)
buf[2 + offset] = rev_int2bcd(long_deg % 100)
buf[3 + offset] = rev_int2bcd(long_min)
buf[4 + offset] = (lat_dir == "S") * 0x80 + (long_dir == "E") * 0x40 + (tz_hr < 0) + dst_on * 0x10 * 0x8 + (tz_min == 30) * 3
buf[5 + offset] = (long_deg > 99) * 0x10 + dst_index
buf[6 + offset] = (buf[28] & 0x0F) + int(city_index / 0x10) * 0x10
buf[7 + offset] = city_index % 0x10 + abs(tz_hr) * 0x10
if loc_type == 0:
buf[15] = self._checksum(buf[0:15])
else:
buf[8] = self._checksum(buf[0:8])
self._write_loc(loc_type, buf)
def _read_alt(self):
buf = self._read(0x5a)
return buf[1:33]
def _write_alt(self, buf):
self._write(0x5a, buf)
def get_alt(self):
buf = self._read_alt()
altitude = buf[1] * 0x100 + buf[0]
if buf[3] & 0x8 == 0x8:
altitude *= -1
return altitude
def set_alt(self, altitude):
buf = self._read_alt()
buf[0] = abs(altitude) & 0xff
buf[1] = abs(altitude) // 0x100
buf[2] = buf[2] & 0x7 + (altitude < 0) * 0x8
buf[3] = self._checksum(buf[0:3])
self._write_alt(buf)
def _read_alarms(self):
buf = self._read(0x10)
tmpbuf = self._read(0x1F)
buf[33:65] = tmpbuf[1:33]
tmpbuf = self._read(0xA0)
buf[65:97] = tmpbuf[1:33]
return buf[1:97]
def _write_alarms(self, buf):
self._write(0x10, buf[0:32])
self._write(0x1F, buf[32:64])
self._write(0xA0, buf[64:96])
def get_alarms(self):
buf = self._read_alarms()
data = dict()
data['weekday_active'] = buf[0] & 0x4 == 0x4
data['single_active'] = buf[0] & 0x8 == 0x8
data['prealarm_active'] = buf[2] & 0x8 == 0x8
data['weekday_hour'] = rev_bcd2int(buf[0] & 0xF1)
data['weekday_min'] = rev_bcd2int(buf[1])
data['single_hour'] = rev_bcd2int(buf[2] & 0xF1)
data['single_min'] = rev_bcd2int(buf[3])
data['prealarm_period'] = (buf[4] & 0xF0) // 0x10
data['snooze'] = buf[4] & 0xF
data['max_temp'], _ = decode_temp(buf[32], buf[33], 0)
data['min_temp'], _ = decode_temp(buf[34], buf[35], 0)
data['rain_active'] = buf[64] & 0x4 == 0x4
data['windspeed_active'] = buf[64] & 0x2 == 0x2
data['windgust_active'] = buf[64] & 0x1 == 0x1
data['rain'] = bcd2int(buf[66]) * 100 + bcd2int(buf[65])
data['windspeed'], _ = decode_ws(buf[68], buf[69])
data['windgust'], _ = decode_ws(buf[71], buf[72])
return data
def set_alarms(self, weekday, single, prealarm, snooze,
maxtemp, mintemp, rain, wind, gust):
buf = self._read_alarms()
if weekday.lower() != 'off':
weekday_list = weekday.split(':')
buf[0] = rev_int2bcd(int(weekday_list[0])) | 0x4
buf[1] = rev_int2bcd(int(weekday_list[1]))
else:
buf[0] &= 0xFB
if single.lower() != 'off':
single_list = single.split(':')
buf[2] = rev_int2bcd(int(single_list[0]))
buf[3] = rev_int2bcd(int(single_list[1]))
buf[0] |= 0x8
else:
buf[0] &= 0xF7
if (prealarm.lower() != 'off' and
(weekday.lower() != 'off' or single.lower() != 'off')):
if int(prealarm) == 15:
buf[4] = 0x10
elif int(prealarm) == 30:
buf[4] = 0x20
elif int(prealarm) == 45:
buf[4] = 0x30
elif int(prealarm) == 60:
buf[4] = 0x40
elif int(prealarm) == 90:
buf[4] = 0x50
buf[2] |= 0x8
else:
buf[2] &= 0xF7
buf[4] = (buf[4] & 0xF0) + int(snooze)
buf[5] = self._checksum(buf[0:5])
buf[32] = int2bcd(int(abs(float(maxtemp)) * 10) % 100)
buf[33] = int2bcd(int(abs(float(maxtemp)) / 10))
if float(maxtemp) >= 0:
buf[33] |= 0x80
if (abs(float(maxtemp)) * 100) % 10 == 5:
buf[33] |= 0x20
buf[34] = int2bcd(int(abs(float(mintemp)) * 10) % 100)
buf[35] = int2bcd(int(abs(float(mintemp)) / 10))
if float(mintemp) >= 0:
buf[35] |= 0x80
if (abs(float(mintemp)) * 100) % 10 == 5:
buf[35] |= 0x20
buf[36] = self._checksum(buf[32:36])
if rain.lower() != 'off':
buf[65] = int2bcd(int(rain) % 100)
buf[66] = int2bcd(int(int(rain) / 100))
buf[64] |= 0x4
else:
buf[64] = buf[64] & 0xFB
if wind.lower() != 'off':
buf[68] = int2bcd(int(float(wind) * 10) % 100)
buf[69] = int2bcd(int(float(wind) / 10))
buf[64] |= 0x2
else:
buf[64] = buf[64] & 0xFD
if gust.lower() != 'off':
buf[71] = int2bcd(int(float(gust) * 10) % 100)
buf[72] = int2bcd(int(float(gust) / 10))
buf[64] |= 0x1
else:
buf[64] |= 0xFE
buf[73] = self._checksum(buf[64:73])
self._write_alarms(buf)
def get_interval(self):
buf = self._read(0xFE)
return buf[1]
def get_interval_seconds(self):
idx = self.get_interval()
interval = self.idx_to_interval_sec.get(idx)
if interval is None:
msg = "Unrecognized archive interval '%s'" % idx
log.error(msg)
raise weewx.WeeWxIOError(msg)
return interval
def set_interval(self, idx):
buf = self._read(0xFE)
buf = buf[1:33]
buf[0] = idx
self._write(0xFE, buf)
@staticmethod
def _checksum(buf):
crc = 0x100
for i in range(len(buf)):
crc -= buf[i]
if crc < 0:
crc += 0x100
return crc
if __name__ == '__main__':
import optparse
import weewx
import weeutil.logger
FMT_TE923TOOL = 'te923tool'
FMT_DICT = 'dict'
FMT_TABLE = 'table'
usage = """%prog [options] [--debug] [--help]"""
def main():
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', dest='version', action='store_true',
help='display driver version')
parser.add_option('--debug', dest='debug', action='store_true',
help='display diagnostic information while running')
parser.add_option('--status', dest='status', action='store_true',
help='display station status')
parser.add_option('--readings', dest='readings', action='store_true',
help='display sensor readings')
parser.add_option("--records", dest="records", type=int, metavar="N",
help="display N station records, oldest to newest")
parser.add_option('--blocks', dest='blocks', type=int, metavar="N",
help='display N 32-byte blocks of station memory')
parser.add_option("--format", dest="format", type=str,metavar="FORMAT",
default=FMT_TE923TOOL,
help="format for output: te923tool, table, or dict")
(options, _) = parser.parse_args()
if options.version:
print("te923 driver version %s" % DRIVER_VERSION)
exit(1)
if options.debug:
weewx.debug = 1
weeutil.logger.setup('te923', {})
if (options.format.lower() != FMT_TE923TOOL and
options.format.lower() != FMT_TABLE and
options.format.lower() != FMT_DICT):
print("Unknown format '%s'. Known formats include: %s" % (
options.format, ','.join([FMT_TE923TOOL, FMT_TABLE, FMT_DICT])))
exit(1)
with TE923Station() as station:
if options.status:
data = station.get_versions()
data.update(station.get_status())
if options.format.lower() == FMT_TE923TOOL:
print_status(data)
else:
print_data(data, options.format)
if options.readings:
data = station.get_readings()
if options.format.lower() == FMT_TE923TOOL:
print_readings(data)
else:
print_data(data, options.format)
if options.records is not None:
for data in station.gen_records(requested=options.records):
if options.format.lower() == FMT_TE923TOOL:
print_readings(data)
else:
print_data(data, options.format)
if options.blocks is not None:
for ptr, block in station.gen_blocks(count=options.blocks):
print_hex(ptr, block)
def print_data(data, fmt):
if fmt.lower() == FMT_TABLE:
print_table(data)
else:
print(data)
def print_hex(ptr, data):
print("0x%06x %s" % (ptr, _fmt(data)))
def print_table(data):
"""output entire dictionary contents in two columns"""
for key in sorted(data):
print("%s: %s" % (key.rjust(16), data[key]))
def print_status(data):
"""output status fields in te923tool format"""
print("0x%x:0x%x:0x%x:0x%x:0x%x:%d:%d:%d:%d:%d:%d:%d:%d" % (
data['version_sys'], data['version_bar'], data['version_uv'],
data['version_rcc'], data['version_wind'],
data['bat_rain'], data['bat_uv'], data['bat_wind'], data['bat_5'],
data['bat_4'], data['bat_3'], data['bat_2'], data['bat_1']))
def print_readings(data):
"""output sensor readings in te923tool format"""
output = [str(data['dateTime'])]
output.append(getvalue(data, 't_in', '%0.2f'))
output.append(getvalue(data, 'h_in', '%d'))
for i in range(1, 6):
output.append(getvalue(data, 't_%d' % i, '%0.2f'))
output.append(getvalue(data, 'h_%d' % i, '%d'))
output.append(getvalue(data, 'slp', '%0.1f'))
output.append(getvalue(data, 'uv', '%0.1f'))
output.append(getvalue(data, 'forecast', '%d'))
output.append(getvalue(data, 'storm', '%d'))
output.append(getvalue(data, 'winddir', '%d'))
output.append(getvalue(data, 'windspeed', '%0.1f'))
output.append(getvalue(data, 'windgust', '%0.1f'))
output.append(getvalue(data, 'windchill', '%0.1f'))
output.append(getvalue(data, 'rain', '%d'))
print(':'.join(output))
def getvalue(data, label, fmt):
if label + '_state' in data:
if data[label + '_state'] == STATE_OK:
return fmt % data[label]
else:
return data[label + '_state']
else:
if data[label] is None:
return 'x'
else:
return fmt % data[label]
if __name__ == '__main__':
main()
|
import codecs
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
|
"""
in nodetree_name s d='' n=2
in node_name s d='' n=2
in input_name s d='' n=2
out data s
"""
if nodetree_name and node_name and input_name:
data = bpy.data.node_groups[nodetree_name].nodes[node_name].inputs[input_name].sv_get()
|
from __future__ import absolute_import
from .biobloomtools import MultiqcModule
|
"""
GRIn is a program which computed a set of read's Genome Repeat Index (GRI). The
GRI is defined as being the proportion of k-mers in a set of read's k-mer
spectrum which are repetitive. All of this is explained in much more detail in
the paper accompanying this program (still being written as of this commit).
For anything to do with this program, contact George Hall (gh10@sanger.ac.uk).
"""
from __future__ import print_function, division
import math
import sys
import subprocess as sp
import spectrum_related as spec
import custom_argument_parser
import cutoff_related as cutoffs
try:
import matplotlib.pyplot as plt
except ImportError:
MATPLOTLIB_PRESENT = False
else:
MATPLOTLIB_PRESENT = True
try:
import numpy as np
except ImportError:
NUMPY_PRESENT = False
else:
NUMPY_PRESENT = True
MY_EMAIL = "gh10@sanger.ac.uk"
def check_matplotlib_present():
"""
Print error message and exit if Matplotlib was not successfully imported.
"""
if not MATPLOTLIB_PRESENT:
print("ERROR: Could not find Matplotlib installation. Exiting.",
file=sys.stderr)
sys.exit(1)
else:
return
def calculate_gri(number_repetitive_kmers, total_number_kmers):
"""
Return the GRI (that is, the percentage of repetitive k-mers).
"""
if total_number_kmers == 0:
print("ERROR: It seems that there were zero total k-mers. It's weird ",
"this has happened - it's probably an issue with your cutoffs. ",
"Please email me and let me know about this happening (",
MY_EMAIL, "). Skipping this file...", file=sys.stderr, sep='')
return -1
gri = number_repetitive_kmers / total_number_kmers
if not (0 <= gri <= 1):
print("ERROR: GRI is not between 0 and 1. This should never ",
"happen!!! Please email me and let me know about this (",
MY_EMAIL, "). Skipping this file...", file=sys.stderr, sep='')
return -1
return gri
def run_jellyfish(file_paths, verbosity):
"""Generate histogram using Jellyfish"""
# Options used for Jellyfish. Change them here if you want:
JELLYFISH_BIN = "jellyfish"
K_MER_SIZE = "31"
HASH_TABLE_SIZE = "100M" # Can use S.I. units M & G,
NUM_THREADS = "25"
if verbosity > 0:
print("Counting k-mers with Jellyfish...")
sp.call([JELLYFISH_BIN, "count", "-m", K_MER_SIZE, "-s", HASH_TABLE_SIZE,
"-t", NUM_THREADS, "-C"] + file_paths)
hist_name = generate_hist_file_name(file_paths)
if verbosity > 0:
print("Storing histogram in file '", hist_name, "'", sep='')
with open(hist_name, 'w') as hist_file:
sp.call([JELLYFISH_BIN, "histo", "mer_counts.jf"], stdout=hist_file)
return
def generate_hist_file_name(file_names):
"""
Return the name of the histogram file, computed by concatenating all input
file names together using underscores
"""
return "_".join(file_names) + ".hist"
def plot_histogram(hist_dict, error_cutoff, repeat_cutoff, upper_cutoff):
"""Plot histogram using hist_dict"""
data = [[], []]
data[0] = list(hist_dict.keys())
data[1] = list(hist_dict.values())
plt.plot(data[0], data[1])
plt.axvline(error_cutoff)
plt.axvline(repeat_cutoff)
plt.axvline(upper_cutoff)
x_vals_array = np.array(data[0])
y_vals_array = np.array(data[1])
boundary_func = lambda lower, upper: np.logical_and(x_vals_array >= lower,
x_vals_array <= upper)
# Shade error curve
plt.fill_between(x_vals_array, y_vals_array,
where=(boundary_func(1, error_cutoff)), interpolate=True,
alpha=0.5, color="red")
# Shade main peak
plt.fill_between(x_vals_array, y_vals_array,
where=(boundary_func(error_cutoff, repeat_cutoff)),
interpolate=True, alpha=0.5, color="blue")
# Shade repetitive k-mers
plt.fill_between(x_vals_array, y_vals_array,
where=(boundary_func(repeat_cutoff, upper_cutoff)),
interpolate=True, alpha=0.5, color="green")
# Shade abundant k-mers
plt.fill_between(x_vals_array, y_vals_array,
where=(x_vals_array > upper_cutoff), interpolate=True,
alpha=0.5, color="black")
plt.xlim(1, upper_cutoff * 1.05)
plt.ylim(1, max(hist_dict.values()) * 1.1)
return
def convert_bp_to_SI(num_bp):
"""
Returns a string with the number of base pairs num_bp expressed in 'SI'
units (i.e. Mbp) with the correct suffix attached.
"""
thousands_power = math.log(num_bp, 1000)
if thousands_power < 1:
return str(num_bp) + "bp"
elif 1 <= thousands_power < 2:
num_kbp = num_bp / 1000
return "{0:.1f}Kbp".format(num_kbp)
elif 2 <= thousands_power < 3:
num_mbp = num_bp / 1000000
return "{0:.1f}Mbp".format(num_mbp)
else:
num_gbp = num_bp / 1000000000
return "{0:.1f}Gbp".format(num_gbp)
def process_histogram_file(file_name, initial_error_cutoff,
initial_repeat_cutoff, initial_upper_cutoff,
verbosity):
"""
Main function for interacting with an individual histogram file. This
function creates a hist dict for the file, sets the cutoffs for the file,
and computes and prints the GRI for the file.
"""
with open(file_name, 'r') as hist_file:
print("Processing", file_name)
hist_dict = spec.create_hist_dict(hist_file)
error_cutoff = cutoffs.set_error_cutoff(hist_dict,
initial_error_cutoff,
verbosity)
if error_cutoff == -1:
return -1
repeat_cutoff = cutoffs.set_repeat_cutoff(hist_dict,
initial_repeat_cutoff,
error_cutoff,
verbosity)
if repeat_cutoff == -1:
return -1
upper_cutoff = cutoffs.set_upper_cutoff(hist_dict,
initial_upper_cutoff,
verbosity)
if upper_cutoff == -1:
return -1
if cutoffs.check_cutoff_consistency(error_cutoff, repeat_cutoff,
upper_cutoff) == -1:
return -1
total_num_kmers_used = spec.count_num_kmers(hist_dict, error_cutoff,
upper_cutoff)
number_repetitive_kmers = spec.count_num_kmers(hist_dict,
repeat_cutoff,
upper_cutoff)
if verbosity > 0:
print("Total number of k-mers", total_num_kmers_used)
print("Number of repetitive k-mers", number_repetitive_kmers)
# Don't exit if not able to import Scipy and Numpy
if spec.SCIPY_PRESENT and NUMPY_PRESENT:
kmer_depth = spec.find_kmer_depth(hist_dict)
print("K-mer depth =", kmer_depth)
genome_size_est = int(total_num_kmers_used / kmer_depth)
print("Genome size estmination =", genome_size_est,
"(" + convert_bp_to_SI(genome_size_est) + ")")
if verbosity == 2:
plot_histogram(hist_dict, error_cutoff, repeat_cutoff,
upper_cutoff)
plt.title(file_name)
gri = calculate_gri(number_repetitive_kmers, total_num_kmers_used)
if gri != -1:
print("GRI = %0.4f" %(gri))
return
def error_check_user_input(args):
"""
Perform the following checks on the command line options given by the user:
* Check that --full-auto has not been set if any manual cutoff has also
been set
* Check that --verbosity is no greater than 2
"""
if args.full_auto and cutoffs.any_cutoff_set(args):
print("ERROR: Cannot set both --full-auto and any manual cutoff",
file=sys.stderr)
sys.exit(1)
if args.verbosity > 2:
print("ERROR: --verbosity cannot be greater than 2 (either -v or -vv",
"must be used)", file=sys.stdout)
sys.exit(1)
return
def generate_subplot_func(num_subplots):
"""
Returns a function which can be called to generate the next subplot. I've
made it work like this because the same function needs to be called every
time except the value x needs to be incremented by 1.
"""
return lambda x: plt.subplot(num_subplots, num_subplots, x,
xlabel="Number of Occurrences",
ylabel="Distinct k-mers with Occurence",
yscale="log")
def main():
"""
This is the main function for the program. It just calls the argument
parser, calls the cutoff list constructors, and then iterates over the
input files, calculating and printing their GRIs.
"""
args = custom_argument_parser.parser_main()
file_paths = args.file
verbosity = args.verbosity
if verbosity > 0:
print("Command ran:", " ".join(sys.argv))
if verbosity == 2:
check_matplotlib_present()
# num_subplots is the required number of subplots per row and column
# in the window in order to accomodate all files
num_subplots = math.ceil(math.sqrt(len(args.file)))
# file_counter keeps track of which file we are on
file_counter = 1
subplot_func = generate_subplot_func(num_subplots)
error_check_user_input(args)
if args.full_auto:
# Jellyfish needs to be run first in order to generate histogram file
run_jellyfish(file_paths, verbosity)
file_paths = [generate_hist_file_name(file_paths)]
(error_cutoffs,
repeat_cutoffs,
upper_cutoffs) = cutoffs.construct_all_cutoff_lists(args)
for (file_name, repeat_cutoff, error_cutoff, upper_cutoff) in \
zip(file_paths, repeat_cutoffs, error_cutoffs, upper_cutoffs):
try:
if verbosity == 2:
subplot_func(file_counter)
file_counter += 1
process_histogram_file(file_name, error_cutoff, repeat_cutoff,
upper_cutoff, verbosity)
except IOError:
print("ERROR: Could not open file \"" + file_name + "\".",
"Skipping...", file=sys.stderr)
if verbosity == 2:
plt.gcf().canvas.set_window_title("GRIn Histogram plot")
plt.show()
if __name__ == "__main__":
main()
|
import sys
import math
import cmath
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
def main():
# files having only one received block
files = ["rtl1.dat", "rtl2.dat", "rtl3.dat", "rtl4.dat"]
signals = []
index = 1;
for filename in files:
a = np.fromfile(filename, dtype=np.dtype("u1"))
a = a[int(sys.argv[index]):int(sys.argv[index + 1])] # take short block
a = a.astype(np.float32).view(np.complex64)
a -= (127.4 + 127.4j)
signals.append(a)
index = index + 2
for i in range(3):
sig1 = signals[0]
sig2 = signals[1+i]
corr = signal.fftconvolve(sig1, np.conj(sig2[::-1]))
peakpos = np.argmax(np.abs(corr))
print str(peakpos)
#plt.subplot(3,1,1+i)
#plt.plot(np.abs(corr))
#plt.show()
if __name__ == '__main__':
main()
|
import bpy
from bpy.props import IntProperty, EnumProperty, StringProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, match_long_repeat
'''
- range exclusive n
Start, stop, step. Like range()
Start, step, count
See class unit tests for behaviours
'''
def intRange(start=0, step=1, stop=1):
'''
slightly different behaviour: "lazy range"
- step is always |step| (absolute)
- step is converted to negative if stop is less than start
'''
if start == stop:
return []
step = max(step, 1)
if stop < start:
step *= -1
return list(range(start, stop, step))
def countRange(start=0, step=1, count=10):
count = max(count, 0)
if count == 0:
return []
if step == 0:
return [start] * count
stop = (count*step) + start
return list(range(start, stop, step))
class GenListRangeIntNode(bpy.types.Node, SverchCustomTreeNode):
''' Generator range list of ints '''
bl_idname = 'GenListRangeIntNode'
bl_label = 'Range Int'
bl_icon = 'IPO_CONSTANT'
start_: IntProperty(
name='start', description='start',
default=0, update=updateNode)
stop_: IntProperty(
name='stop', description='stop',
default=10, update=updateNode)
count_: IntProperty(
name='count', description='num items',
default=10, update=updateNode)
step_: IntProperty(
name='step', description='step',
default=1, update=updateNode)
current_mode: StringProperty(default="LAZYRANGE")
modes = [
("LAZYRANGE", "Range", "Use python Range function", 1),
("COUNTRANGE", "Count", "Create range based on count", 2)
]
def mode_change(self, context):
# just because click doesn't mean we need to change mode
mode = self.mode
if mode == self.current_mode:
return
self.inputs[-1].prop_name = {'LAZYRANGE': 'stop_'}.get(mode, 'count_')
self.current_mode = mode
updateNode(self, context)
mode: EnumProperty(items=modes, default='LAZYRANGE', update=mode_change)
replacement_nodes = [('SvGenNumberRange', dict(Step='Stop', Stop='Step'), None)]
def update_mapping(self):
if self.mode == 'COUNTRANGE':
inputs_mapping = dict(Step='Step', Stop='Stop')
else:
inputs_mapping = dict(Step='Stop', Stop='Step')
GenListRangeIntNode.replacement_nodes = [('SvGenNumberRange', inputs_mapping, None)]
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "Start").prop_name = 'start_'
self.inputs.new('SvStringsSocket', "Step").prop_name = 'step_'
self.inputs.new('SvStringsSocket', "Stop").prop_name = 'stop_'
self.outputs.new('SvStringsSocket', "Range")
def draw_buttons(self, context, layout):
layout.prop(self, "mode", expand=True)
func_dict = {'LAZYRANGE': intRange,
'COUNTRANGE': countRange}
def process(self):
self.update_mapping()
inputs = self.inputs
outputs = self.outputs
if not outputs[0].is_linked:
return
param = [inputs[i].sv_get()[0] for i in range(3)]
f = self.func_dict[self.mode]
out = [f(*args) for args in zip(*match_long_repeat(param))]
outputs['Range'].sv_set(out)
def register():
bpy.utils.register_class(GenListRangeIntNode)
def unregister():
bpy.utils.unregister_class(GenListRangeIntNode)
|
from abc import abstractmethod
from ...plugins import PluginBase
class Painter(PluginBase):
"""
Mount point for plugins which refer to the painting of data, i.e.,
gridding a field to a mesh
Plugins of this type should provide the following attributes:
plugin_name : str
A class attribute that defines the name of the plugin in
the registry
register : classmethod
A class method taking no arguments that updates the
:class:`~nbodykit.utils.config.ConstructorSchema` with
the arguments needed to initialize the class
paint : method
A method that performs the painting of the field.
"""
required_attributes = ['paintbrush']
@abstractmethod
def __init__(self, paintbrush):
self.paintbrush = paintbrush
@abstractmethod
def paint(self, pm, datasource):
"""
Paint the DataSource specified to a mesh
Parameters
----------
pm : :class:`~pmesh.particlemesh.ParticleMesh`
particle mesh object that does the painting
datasource : DataSource
the data source object representing the field to paint onto the mesh
Returns
-------
stats : dict
dictionary of statistics related to painting and reading of the DataSource
"""
pass
def basepaint(self, real, position, paintbrush, weight=None):
"""
The base function for painting that is used by default.
This handles the domain decomposition steps that are necessary to
complete before painting.
Parameters
----------
pm : :class:`~pmesh.particlemesh.ParticleMesh`
particle mesh object that does the painting
position : array_like
the position data
paintbrush : string
picking the paintbrush. Available ones are from documentation of pm.RealField.paint().
weight : array_like, optional
the weight value to use when painting
"""
assert real.pm.comm == self.comm # pm must be from the same communicator!
layout = real.pm.decompose(position)
position = layout.exchange(position)
if weight is not None:
weight = layout.exchange(weight)
real.paint(position, weight, method=paintbrush, hold=True)
else:
real.paint(position, method=paintbrush, hold=True)
def shiftedpaint(self, real1, real2, position, paintbrush, weight=None, shift=0.5):
"""
paint to two real fields for interlacing
"""
assert real1.pm.comm == self.comm # pm must be from the same communicator!
assert real2.pm.comm == self.comm # pm must be from the same communicator!
from pmesh import window
smoothing = window.methods[paintbrush].support * 0.5
# interlacing is shifted, thus we create a bigger buffer region
smoothing = smoothing + shift
shifted = real1.pm.affine.shift(shift)
layout = real1.pm.decompose(position, smoothing=smoothing)
position = layout.exchange(position)
if weight is not None:
weight = layout.exchange(weight)
real1.paint(position, weight, method=paintbrush, hold=True)
real2.paint(position, weight, method=paintbrush, hold=True, transform=shifted)
else:
real1.paint(position, method=paintbrush, hold=True)
real2.paint(position, method=paintbrush, hold=True, transform=shifted)
|
"""
The API basically only provides one class. You can create a :class:`Script` and
use its methods.
Additionally you can add a debug function with :func:`set_debug_function`.
Alternatively, if you don't need a custom function and are happy with printing
debug messages to stdout, simply call :func:`set_debug_function` without
arguments.
"""
import sys
from pathlib import Path
import parso
from parso.python import tree
from jedi._compatibility import cast_path
from jedi.parser_utils import get_executable_nodes
from jedi import debug
from jedi import settings
from jedi import cache
from jedi.file_io import KnownContentFileIO
from jedi.api import classes
from jedi.api import interpreter
from jedi.api import helpers
from jedi.api.helpers import validate_line_column
from jedi.api.completion import Completion, search_in_module
from jedi.api.keywords import KeywordName
from jedi.api.environment import InterpreterEnvironment
from jedi.api.project import get_default_project, Project
from jedi.api.errors import parso_to_jedi_errors
from jedi.api import refactoring
from jedi.api.refactoring.extract import extract_function, extract_variable
from jedi.inference import InferenceState
from jedi.inference import imports
from jedi.inference.references import find_references
from jedi.inference.arguments import try_iter_content
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.sys_path import transform_path_to_dotted
from jedi.inference.syntax_tree import tree_name_to_values
from jedi.inference.value import ModuleValue
from jedi.inference.base_value import ValueSet
from jedi.inference.value.iterable import unpack_tuple_to_dict
from jedi.inference.gradual.conversion import convert_names, convert_values
from jedi.inference.gradual.utils import load_proper_stub_module
from jedi.inference.utils import to_list
sys.setrecursionlimit(3000)
class Script:
"""
A Script is the base for completions, goto or whatever you want to do with
Jedi. The counter part of this class is :class:`Interpreter`, which works
with actual dictionaries and can work with a REPL. This class
should be used when a user edits code in an editor.
You can either use the ``code`` parameter or ``path`` to read a file.
Usually you're going to want to use both of them (in an editor).
The Script's ``sys.path`` is very customizable:
- If `project` is provided with a ``sys_path``, that is going to be used.
- If `environment` is provided, its ``sys.path`` will be used
(see :func:`Environment.get_sys_path <jedi.api.environment.Environment.get_sys_path>`);
- Otherwise ``sys.path`` will match that of the default environment of
Jedi, which typically matches the sys path that was used at the time
when Jedi was imported.
Most methods have a ``line`` and a ``column`` parameter. Lines in Jedi are
always 1-based and columns are always zero based. To avoid repetition they
are not always documented. You can omit both line and column. Jedi will
then just do whatever action you are calling at the end of the file. If you
provide only the line, just will complete at the end of that line.
.. warning:: By default :attr:`jedi.settings.fast_parser` is enabled, which means
that parso reuses modules (i.e. they are not immutable). With this setting
Jedi is **not thread safe** and it is also not safe to use multiple
:class:`.Script` instances and its definitions at the same time.
If you are a normal plugin developer this should not be an issue. It is
an issue for people that do more complex stuff with Jedi.
This is purely a performance optimization and works pretty well for all
typical usages, however consider to turn the setting off if it causes
you problems. See also
`this discussion <https://github.com/davidhalter/jedi/issues/1240>`_.
:param code: The source code of the current file, separated by newlines.
:type code: str
:param path: The path of the file in the file system, or ``''`` if
it hasn't been saved yet.
:type path: str or pathlib.Path or None
:param Environment environment: Provide a predefined :ref:`Environment <environments>`
to work with a specific Python version or virtualenv.
:param Project project: Provide a :class:`.Project` to make sure finding
references works well, because the right folder is searched. There are
also ways to modify the sys path and other things.
"""
def __init__(self, code=None, *, path=None, environment=None, project=None):
self._orig_path = path
# An empty path (also empty string) should always result in no path.
if isinstance(path, str):
path = Path(path)
self.path = path.absolute() if path else None
if code is None:
# TODO add a better warning than the traceback!
with open(path, 'rb') as f:
code = f.read()
if project is None:
# Load the Python grammar of the current interpreter.
project = get_default_project(None if self.path is None else self.path.parent)
self._inference_state = InferenceState(
project, environment=environment, script_path=self.path
)
debug.speed('init')
self._module_node, code = self._inference_state.parse_and_get_code(
code=code,
path=self.path,
use_latest_grammar=path and path.suffix == '.pyi',
cache=False, # No disk cache, because the current script often changes.
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory,
)
debug.speed('parsed')
self._code_lines = parso.split_lines(code, keepends=True)
self._code = code
cache.clear_time_caches()
debug.reset_time()
# Cache the module, this is mostly useful for testing, since this shouldn't
# be called multiple times.
@cache.memoize_method
def _get_module(self):
names = None
is_package = False
if self.path is not None:
import_names, is_p = transform_path_to_dotted(
self._inference_state.get_sys_path(add_parent_paths=False),
self.path
)
if import_names is not None:
names = import_names
is_package = is_p
if self.path is None:
file_io = None
else:
file_io = KnownContentFileIO(cast_path(self.path), self._code)
if self.path is not None and self.path.suffix == '.pyi':
# We are in a stub file. Try to load the stub properly.
stub_module = load_proper_stub_module(
self._inference_state,
self._inference_state.latest_grammar,
file_io,
names,
self._module_node
)
if stub_module is not None:
return stub_module
if names is None:
names = ('__main__',)
module = ModuleValue(
self._inference_state, self._module_node,
file_io=file_io,
string_names=names,
code_lines=self._code_lines,
is_package=is_package,
)
if names[0] not in ('builtins', 'typing'):
# These modules are essential for Jedi, so don't overwrite them.
self._inference_state.module_cache.add(names, ValueSet([module]))
return module
def _get_module_context(self):
return self._get_module().as_context()
def __repr__(self):
return '<%s: %s %r>' % (
self.__class__.__name__,
repr(self._orig_path),
self._inference_state.environment,
)
@validate_line_column
def complete(self, line=None, column=None, *, fuzzy=False):
"""
Completes objects under the cursor.
Those objects contain information about the completions, more than just
names.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:return: Completion objects, sorted by name. Normal names appear
before "private" names that start with ``_`` and those appear
before magic methods and name mangled names that start with ``__``.
:rtype: list of :class:`.Completion`
"""
with debug.increase_indent_cm('complete'):
completion = Completion(
self._inference_state, self._get_module_context(), self._code_lines,
(line, column), self.get_signatures, fuzzy=fuzzy,
)
return completion.complete()
@validate_line_column
def infer(self, line=None, column=None, *, only_stubs=False, prefer_stubs=False):
"""
Return the definitions of under the cursor. It is basically a wrapper
around Jedi's type inference.
This method follows complicated paths and returns the end, not the
first definition. The big difference between :meth:`goto` and
:meth:`infer` is that :meth:`goto` doesn't
follow imports and statements. Multiple objects may be returned,
because depending on an option you can have two different versions of a
function.
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
pos = line, column
leaf = self._module_node.get_name_of_position(pos)
if leaf is None:
leaf = self._module_node.get_leaf_for_position(pos)
if leaf is None or leaf.type == 'string':
return []
if leaf.end_pos == (line, column) and leaf.type == 'operator':
next_ = leaf.get_next_leaf()
if next_.start_pos == leaf.end_pos \
and next_.type in ('number', 'string', 'keyword'):
leaf = next_
context = self._get_module_context().create_context(leaf)
values = helpers.infer(self._inference_state, context, leaf)
values = convert_values(
values,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, c.name) for c in values]
# The additional set here allows the definitions to become unique in an
# API sense. In the internals we want to separate more things than in
# the API.
return helpers.sorted_definitions(set(defs))
@validate_line_column
def goto(self, line=None, column=None, *, follow_imports=False, follow_builtin_imports=False,
only_stubs=False, prefer_stubs=False):
"""
Goes to the name that defined the object under the cursor. Optionally
you can follow imports.
Multiple objects may be returned, depending on an if you can have two
different versions of a function.
:param follow_imports: The method will follow imports.
:param follow_builtin_imports: If ``follow_imports`` is True will try
to look up names in builtins (i.e. compiled or extension modules).
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Without a name we really just want to jump to the result e.g.
# executed by `foo()`, if we the cursor is after `)`.
return self.infer(line, column, only_stubs=only_stubs, prefer_stubs=prefer_stubs)
name = self._get_module_context().create_name(tree_name)
# Make it possible to goto the super class function/attribute
# definitions, when they are overwritten.
names = []
if name.tree_name.is_definition() and name.parent_context.is_class():
class_node = name.parent_context.tree_node
class_value = self._get_module_context().create_value(class_node)
mro = class_value.py__mro__()
next(mro) # Ignore the first entry, because it's the class itself.
for cls in mro:
names = cls.goto(tree_name.value)
if names:
break
if not names:
names = list(name.goto())
if follow_imports:
names = helpers.filter_follow_imports(names, follow_builtin_imports)
names = convert_names(
names,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, d) for d in set(names)]
# Avoid duplicates
return list(set(helpers.sorted_definitions(defs)))
def search(self, string, *, all_scopes=False):
"""
Searches a name in the current file. For a description of how the
search string should look like, please have a look at
:meth:`.Project.search`.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Name`
"""
return self._search_func(string, all_scopes=all_scopes)
@to_list
def _search_func(self, string, all_scopes=False, complete=False, fuzzy=False):
names = self._names(all_scopes=all_scopes)
wanted_type, wanted_names = helpers.split_search_string(string)
return search_in_module(
self._inference_state,
self._get_module_context(),
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
fuzzy=fuzzy,
)
def complete_search(self, string, **kwargs):
"""
Like :meth:`.Script.search`, but completes that string. If you want to
have all possible definitions in a file you can also provide an empty
string.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:yields: :class:`.Completion`
"""
return self._search_func(string, complete=True, **kwargs)
@validate_line_column
def help(self, line=None, column=None):
"""
Used to display a help window to users. Uses :meth:`.Script.goto` and
returns additional definitions for keywords and operators.
Typically you will want to display :meth:`.BaseName.docstring` to the
user for all the returned definitions.
The additional definitions are ``Name(...).type == 'keyword'``.
These definitions do not have a lot of value apart from their docstring
attribute, which contains the output of Python's :func:`help` function.
:rtype: list of :class:`.Name`
"""
definitions = self.goto(line, column, follow_imports=True)
if definitions:
return definitions
leaf = self._module_node.get_leaf_for_position((line, column))
if leaf is not None and leaf.type in ('keyword', 'operator', 'error_leaf'):
def need_pydoc():
if leaf.value in ('(', ')', '[', ']'):
if leaf.parent.type == 'trailer':
return False
if leaf.parent.type == 'atom':
return False
grammar = self._inference_state.grammar
# This parso stuff is not public, but since I control it, this
# is fine :-) ~dave
reserved = grammar._pgen_grammar.reserved_syntax_strings.keys()
return leaf.value in reserved
if need_pydoc():
name = KeywordName(self._inference_state, leaf.value)
return [classes.Name(self._inference_state, name)]
return []
@validate_line_column
def get_references(self, line=None, column=None, **kwargs):
"""
Lists all references of a variable in a project. Since this can be
quite hard to do for Jedi, if it is too complicated, Jedi will stop
searching.
:param include_builtins: Default ``True``. If ``False``, checks if a reference
is a builtin (e.g. ``sys``) and in that case does not return it.
:param scope: Default ``'project'``. If ``'file'``, include references in
the current module only.
:rtype: list of :class:`.Name`
"""
def _references(include_builtins=True, scope='project'):
if scope not in ('project', 'file'):
raise ValueError('Only the scopes "file" and "project" are allowed')
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Must be syntax
return []
names = find_references(self._get_module_context(), tree_name, scope == 'file')
definitions = [classes.Name(self._inference_state, n) for n in names]
if not include_builtins or scope == 'file':
definitions = [d for d in definitions if not d.in_builtin_module()]
return helpers.sorted_definitions(definitions)
return _references(**kwargs)
@validate_line_column
def get_signatures(self, line=None, column=None):
"""
Return the function object of the call under the cursor.
E.g. if the cursor is here::
abs(# <-- cursor is here
This would return the ``abs`` function. On the other hand::
abs()# <-- cursor is here
This would return an empty list..
:rtype: list of :class:`.Signature`
"""
pos = line, column
call_details = helpers.get_signature_details(self._module_node, pos)
if call_details is None:
return []
context = self._get_module_context().create_context(call_details.bracket_leaf)
definitions = helpers.cache_signatures(
self._inference_state,
context,
call_details.bracket_leaf,
self._code_lines,
pos
)
debug.speed('func_call followed')
# TODO here we use stubs instead of the actual values. We should use
# the signatures from stubs, but the actual values, probably?!
return [classes.Signature(self._inference_state, signature, call_details)
for signature in definitions.get_signatures()]
@validate_line_column
def get_context(self, line=None, column=None):
"""
Returns the scope context under the cursor. This basically means the
function, class or module where the cursor is at.
:rtype: :class:`.Name`
"""
pos = (line, column)
leaf = self._module_node.get_leaf_for_position(pos, include_prefixes=True)
if leaf.start_pos > pos or leaf.type == 'endmarker':
previous_leaf = leaf.get_previous_leaf()
if previous_leaf is not None:
leaf = previous_leaf
module_context = self._get_module_context()
n = tree.search_ancestor(leaf, 'funcdef', 'classdef')
if n is not None and n.start_pos < pos <= n.children[-1].start_pos:
# This is a bit of a special case. The context of a function/class
# name/param/keyword is always it's parent context, not the
# function itself. Catch all the cases here where we are before the
# suite object, but still in the function.
context = module_context.create_value(n).as_context()
else:
context = module_context.create_context(leaf)
while context.name is None:
context = context.parent_context # comprehensions
definition = classes.Name(self._inference_state, context.name)
while definition.type != 'module':
name = definition._name # TODO private access
tree_name = name.tree_name
if tree_name is not None: # Happens with lambdas.
scope = tree_name.get_definition()
if scope.start_pos[1] < column:
break
definition = definition.parent()
return definition
def _analysis(self):
self._inference_state.is_analysis = True
self._inference_state.analysis_modules = [self._module_node]
module = self._get_module_context()
try:
for node in get_executable_nodes(self._module_node):
context = module.create_context(node)
if node.type in ('funcdef', 'classdef'):
# Resolve the decorators.
tree_name_to_values(self._inference_state, context, node.children[1])
elif isinstance(node, tree.Import):
import_names = set(node.get_defined_names())
if node.is_nested():
import_names |= set(path[-1] for path in node.get_paths())
for n in import_names:
imports.infer_import(context, n)
elif node.type == 'expr_stmt':
types = context.infer_node(node)
for testlist in node.children[:-1:2]:
# Iterate tuples.
unpack_tuple_to_dict(context, types, testlist)
else:
if node.type == 'name':
defs = self._inference_state.infer(context, node)
else:
defs = infer_call_of_leaf(context, node)
try_iter_content(defs)
self._inference_state.reset_recursion_limitations()
ana = [a for a in self._inference_state.analysis if self.path == a.path]
return sorted(set(ana), key=lambda x: x.line)
finally:
self._inference_state.is_analysis = False
def get_names(self, **kwargs):
"""
Returns names defined in the current file.
:param all_scopes: If True lists the names of all scopes instead of
only the module namespace.
:param definitions: If True lists the names that have been defined by a
class, function or a statement (``a = b`` returns ``a``).
:param references: If True lists all the names that are not listed by
``definitions=True``. E.g. ``a = b`` returns ``b``.
:rtype: list of :class:`.Name`
"""
names = self._names(**kwargs)
return [classes.Name(self._inference_state, n) for n in names]
def get_syntax_errors(self):
"""
Lists all syntax errors in the current file.
:rtype: list of :class:`.SyntaxError`
"""
return parso_to_jedi_errors(self._inference_state.grammar, self._module_node)
def _names(self, all_scopes=False, definitions=True, references=False):
# Set line/column to a random position, because they don't matter.
module_context = self._get_module_context()
defs = [
module_context.create_name(name)
for name in helpers.get_module_names(
self._module_node,
all_scopes=all_scopes,
definitions=definitions,
references=references,
)
]
return sorted(defs, key=lambda x: x.start_pos)
def rename(self, line=None, column=None, *, new_name):
"""
Renames all references of the variable under the cursor.
:param new_name: The variable under the cursor will be renamed to this
string.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
definitions = self.get_references(line, column, include_builtins=False)
return refactoring.rename(self._inference_state, definitions, new_name)
@validate_line_column
def extract_variable(self, line, column, *, new_name, until_line=None, until_column=None):
"""
Moves an expression to a new statemenet.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
foo = 3.1
x = int(foo + 1)
the code above will become::
foo = 3.1
bar = foo + 1
x = int(bar)
:param new_name: The expression under the cursor will be renamed to
this string.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_variable(
self._inference_state, self.path, self._module_node,
new_name, (line, column), until_pos
)
@validate_line_column
def extract_function(self, line, column, *, new_name, until_line=None, until_column=None):
"""
Moves an expression to a new function.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
global_var = 3
def x():
foo = 3.1
x = int(foo + 1 + global_var)
the code above will become::
global_var = 3
def bar(foo):
return int(foo + 1 + global_var)
def x():
foo = 3.1
x = bar(foo)
:param new_name: The expression under the cursor will be replaced with
a function with this name.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_function(
self._inference_state, self.path, self._get_module_context(),
new_name, (line, column), until_pos
)
def inline(self, line=None, column=None):
"""
Inlines a variable under the cursor. This is basically the opposite of
extracting a variable. For example with the cursor on bar::
foo = 3.1
bar = foo + 1
x = int(bar)
the code above will become::
foo = 3.1
x = int(foo + 1)
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
names = [d._name for d in self.get_references(line, column, include_builtins=True)]
return refactoring.inline(self._inference_state, names)
class Interpreter(Script):
"""
Jedi's API for Python REPLs.
Implements all of the methods that are present in :class:`.Script` as well.
In addition to completions that normal REPL completion does like
``str.upper``, Jedi also supports code completion based on static code
analysis. For example Jedi will complete ``str().upper``.
>>> from os.path import join
>>> namespace = locals()
>>> script = Interpreter('join("").up', [namespace])
>>> print(script.complete()[0].name)
upper
All keyword arguments are same as the arguments for :class:`.Script`.
:param str code: Code to parse.
:type namespaces: typing.List[dict]
:param namespaces: A list of namespace dictionaries such as the one
returned by :func:`globals` and :func:`locals`.
"""
_allow_descriptor_getattr_default = True
def __init__(self, code, namespaces, **kwds):
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
environment = kwds.get('environment', None)
if environment is None:
environment = InterpreterEnvironment()
else:
if not isinstance(environment, InterpreterEnvironment):
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
super().__init__(code, environment=environment,
project=Project(Path.cwd()), **kwds)
self.namespaces = namespaces
self._inference_state.allow_descriptor_getattr = self._allow_descriptor_getattr_default
@cache.memoize_method
def _get_module_context(self):
tree_module_value = ModuleValue(
self._inference_state, self._module_node,
file_io=KnownContentFileIO(str(self.path), self._code),
string_names=('__main__',),
code_lines=self._code_lines,
)
return interpreter.MixedModuleContext(
tree_module_value,
self.namespaces,
)
def preload_module(*modules):
"""
Preloading modules tells Jedi to load a module now, instead of lazy parsing
of modules. This can be useful for IDEs, to control which modules to load
on startup.
:param modules: different module names, list of string.
"""
for m in modules:
s = "import %s as x; x." % m
Script(s).complete(1, len(s))
def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
notices=True, speed=True):
"""
Define a callback debug function to get all the debug messages.
If you don't specify any arguments, debug messages will be printed to stdout.
:param func_cb: The callback function for debug messages.
"""
debug.debug_function = func_cb
debug.enable_warning = warnings
debug.enable_notice = notices
debug.enable_speed = speed
|
import sys
import os
from daemon import Daemon
from server import HydraServer
class HydraDaemon(Daemon):
def run(self):
server = HydraServer()
server.run_server()
if __name__ == '__main__':
HOMEDIR = os.path.expanduser('~')
pidfile = HOMEDIR + '/.hydra/hydra_server.pid'
outfile = HOMEDIR + '/.hydra/hydra.out'
errfile = HOMEDIR + '/.hydra/hydra.err'
daemon = HydraDaemon(pidfile, stdout=outfile, stderr=errfile)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
|
"""Tests for qutebrowser.misc.msgbox."""
import pytest
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QMessageBox, QWidget
from qutebrowser.misc import msgbox
from qutebrowser.utils import utils
@pytest.fixture(autouse=True)
def patch_args(fake_args):
fake_args.no_err_windows = False
def test_attributes(qtbot):
"""Test basic QMessageBox attributes."""
title = 'title'
text = 'text'
parent = QWidget()
qtbot.add_widget(parent)
icon = QMessageBox.Critical
buttons = QMessageBox.Ok | QMessageBox.Cancel
box = msgbox.msgbox(parent=parent, title=title, text=text, icon=icon,
buttons=buttons)
qtbot.add_widget(box)
if not utils.is_mac:
assert box.windowTitle() == title
assert box.icon() == icon
assert box.standardButtons() == buttons
assert box.text() == text
assert box.parent() is parent
@pytest.mark.parametrize('plain_text, expected', [
(True, Qt.PlainText),
(False, Qt.RichText),
(None, Qt.AutoText),
])
def test_plain_text(qtbot, plain_text, expected):
box = msgbox.msgbox(parent=None, title='foo', text='foo',
icon=QMessageBox.Information, plain_text=plain_text)
qtbot.add_widget(box)
assert box.textFormat() == expected
def test_finished_signal(qtbot):
"""Make sure we can pass a slot to be called when the dialog finished."""
signal_triggered = False
def on_finished():
nonlocal signal_triggered
signal_triggered = True
box = msgbox.msgbox(parent=None, title='foo', text='foo',
icon=QMessageBox.Information, on_finished=on_finished)
qtbot.add_widget(box)
with qtbot.waitSignal(box.finished):
box.accept()
assert signal_triggered
def test_information(qtbot):
box = msgbox.information(parent=None, title='foo', text='bar')
qtbot.add_widget(box)
if not utils.is_mac:
assert box.windowTitle() == 'foo'
assert box.text() == 'bar'
assert box.icon() == QMessageBox.Information
def test_no_err_windows(fake_args, capsys):
fake_args.no_err_windows = True
box = msgbox.information(parent=None, title='foo', text='bar')
box.exec_() # should do nothing
out, err = capsys.readouterr()
assert not out
assert err == 'Message box: foo; bar\n'
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: get_url
short_description: Downloads files from HTTP, HTTPS, or FTP to node
description:
- Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
server I(must) have direct access to the remote resource.
- By default, if an environment variable C(<protocol>_proxy) is set on
the target host, requests will be sent through that proxy. This
behaviour can be overridden by setting a variable for this task
(see R(setting the environment,playbooks_environment)),
or by using the use_proxy option.
- HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
your proxy environment for both protocols is correct.
- From Ansible 2.4 when run with C(--check), it will do a HEAD request to validate the URL but
will not download the entire file or verify it against hashes.
- For Windows targets, use the M(ansible.windows.win_get_url) module instead.
version_added: '0.6'
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
type: str
required: true
dest:
description:
- Absolute path of where to download the file to.
- If C(dest) is a directory, either the server provided filename or, if
none provided, the base name of the URL on the remote server will be
used. If a directory, C(force) has no effect.
- If C(dest) is a directory, the file will always be downloaded
(regardless of the C(force) option), but replaced only if the contents changed..
type: path
required: true
tmp_dest:
description:
- Absolute path of where temporary file is downloaded to.
- When run on Ansible 2.5 or greater, path defaults to ansible's remote_tmp setting
- When run on Ansible prior to 2.5, it defaults to C(TMPDIR), C(TEMP) or C(TMP) env variables or a platform specific value.
- U(https://docs.python.org/2/library/tempfile.html#tempfile.tempdir)
type: path
version_added: '2.1'
force:
description:
- If C(yes) and C(dest) is not a directory, will download the file every
time and replace the file if the contents change. If C(no), the file
will only be downloaded if the destination does not exist. Generally
should be C(yes) only for small local files.
- Prior to 0.6, this module behaved as if C(yes) was the default.
- Alias C(thirsty) has been deprecated and will be removed in 2.13.
type: bool
default: no
aliases: [ thirsty ]
version_added: '0.7'
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
version_added: '2.1'
sha256sum:
description:
- If a SHA-256 checksum is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
This option is deprecated and will be removed in version 2.14. Use
option C(checksum) instead.
default: ''
type: str
version_added: "1.3"
checksum:
description:
- 'If a checksum is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
Format: <algorithm>:<checksum|url>, e.g. checksum="sha256:D98291AC[...]B6DC7B97",
checksum="sha256:http://example.com/path/sha256sum.txt"'
- If you worry about portability, only the sha1 algorithm is available
on all platforms and python versions.
- The third party hashlib library can be installed for access to additional algorithms.
- Additionally, if a checksum is passed to this parameter, and the file exist under
the C(dest) location, the I(destination_checksum) would be calculated, and if
checksum equals I(destination_checksum), the file download would be skipped
(unless C(force) is true). If the checksum does not equal I(destination_checksum),
the destination file is deleted.
type: str
default: ''
version_added: "2.0"
use_proxy:
description:
- if C(no), it will not use a proxy, even if one is defined in
an environment variable on the target hosts.
type: bool
default: yes
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: yes
timeout:
description:
- Timeout in seconds for URL request.
type: int
default: 10
version_added: '1.8'
headers:
description:
- Add custom HTTP headers to a request in hash/dict format.
- The hash/dict format was added in Ansible 2.6.
- Previous versions used a C("key:value,key:value") string format.
- The C("key:value,key:value") string format is deprecated and has been removed in version 2.10.
type: dict
version_added: '2.0'
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without C(url_password) for sites that allow empty passwords.
- Since version 2.8 you can also use the C(username) alias for this option.
type: str
aliases: ['username']
version_added: '1.6'
url_password:
description:
- The password for use in HTTP basic authentication.
- If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
- Since version 2.8 you can also use the 'password' alias for this option.
type: str
aliases: ['password']
version_added: '1.6'
force_basic_auth:
description:
- Force the sending of the Basic authentication header upon initial request.
- httplib2, the library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail.
type: bool
default: no
version_added: '2.0'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, C(client_key) is not required.
type: path
version_added: '2.4'
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If C(client_cert) contains both the certificate and key, this option is not required.
type: path
version_added: '2.4'
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
use_gssapi:
description:
- Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
authentication.
- Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
- Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
C(KRB5CCNAME) that specified a custom Kerberos credential cache.
- NTLM authentication is C(not) supported even if the GSSAPI mech for NTLM has been installed.
type: bool
default: no
version_added: '2.11'
extends_documentation_fragment:
- files
notes:
- For Windows targets, use the M(ansible.windows.win_get_url) module instead.
seealso:
- module: ansible.builtin.uri
- module: ansible.windows.win_get_url
author:
- Jan-Piet Mens (@jpmens)
'''
EXAMPLES = r'''
- name: Download foo.conf
get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
mode: '0440'
- name: Download file and force basic auth
get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
force_basic_auth: yes
- name: Download file with custom HTTP headers
get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
headers:
key1: one
key2: two
- name: Download file with check (sha256)
get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
checksum: sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
- name: Download file with check (md5)
get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
checksum: md5:66dffb5228a211e61d6d7ef4a86f5758
- name: Download file with checksum url (sha256)
get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
checksum: sha256:http://example.com/path/sha256sum.txt
- name: Download file from a file path
get_url:
url: file:///tmp/afile.txt
dest: /tmp/afilecopy.txt
- name: < Fetch file that requires authentication.
username/password only available since 2.8, in older versions you need to use url_username/url_password
get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
username: bar
password: '{{ mysecret }}'
'''
RETURN = r'''
backup_file:
description: name of backup file created after download
returned: changed and if backup=yes
type: str
sample: /path/to/file.txt.2015-02-12@22:09~
checksum_dest:
description: sha1 checksum of the file after copy
returned: success
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
checksum_src:
description: sha1 checksum of the file
returned: success
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
dest:
description: destination file/path
returned: success
type: str
sample: /path/to/file.txt
elapsed:
description: The number of seconds that elapsed while performing the download
returned: always
type: int
sample: 23
gid:
description: group id of the file
returned: success
type: int
sample: 100
group:
description: group of the file
returned: success
type: str
sample: "httpd"
md5sum:
description: md5 checksum of the file after download
returned: when supported
type: str
sample: "2a5aeecc61dc98c4d780b14b330e3282"
mode:
description: permissions of the target
returned: success
type: str
sample: "0644"
msg:
description: the HTTP message from the request
returned: always
type: str
sample: OK (unknown bytes)
owner:
description: owner of the file
returned: success
type: str
sample: httpd
secontext:
description: the SELinux security context of the file
returned: success
type: str
sample: unconfined_u:object_r:user_tmp_t:s0
size:
description: size of the target
returned: success
type: int
sample: 1220
src:
description: source file used after download
returned: always
type: str
sample: /tmp/tmpAdFLdV
state:
description: state of the target
returned: success
type: str
sample: file
status_code:
description: the HTTP status code from the request
returned: always
type: int
sample: 200
uid:
description: owner id of the file, after execution
returned: success
type: int
sample: 100
url:
description: the actual URL used for the request
returned: always
type: str
sample: https://www.ansible.com/
'''
import datetime
import os
import re
import shutil
import tempfile
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url, url_argument_spec
def url_filename(url):
fn = os.path.basename(urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest='', method='GET'):
"""
Download data from the url and store in a temporary file.
Return (tempfile, info about the request)
"""
start = datetime.datetime.utcnow()
rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers, method=method)
elapsed = (datetime.datetime.utcnow() - start).seconds
if info['status'] == 304:
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''), status_code=info['status'], elapsed=elapsed)
# Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
if info['status'] == -1:
module.fail_json(msg=info['msg'], url=url, dest=dest, elapsed=elapsed)
if info['status'] != 200 and not url.startswith('file:/') and not (url.startswith('ftp:/') and info.get('msg', '').startswith('OK')):
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest, elapsed=elapsed)
# create a temporary file and copy content to do checksum-based replacement
if tmp_dest:
# tmp_dest should be an existing dir
tmp_dest_is_dir = os.path.isdir(tmp_dest)
if not tmp_dest_is_dir:
if os.path.exists(tmp_dest):
module.fail_json(msg="%s is a file but should be a directory." % tmp_dest, elapsed=elapsed)
else:
module.fail_json(msg="%s directory does not exist." % tmp_dest, elapsed=elapsed)
else:
tmp_dest = module.tmpdir
fd, tempname = tempfile.mkstemp(dir=tmp_dest)
f = os.fdopen(fd, 'wb')
try:
shutil.copyfileobj(rsp, f)
except Exception as e:
os.remove(tempname)
module.fail_json(msg="failed to create temporary content file: %s" % to_native(e), elapsed=elapsed, exception=traceback.format_exc())
f.close()
rsp.close()
return tempname, info
def extract_filename_from_headers(headers):
"""
Extracts a filename from the given dict of HTTP headers.
Looks for the content-disposition header and applies a regex.
Returns the filename if successful, else None."""
cont_disp_regex = 'attachment; ?filename="?([^"]+)'
res = None
if 'content-disposition' in headers:
cont_disp = headers['content-disposition']
match = re.match(cont_disp_regex, cont_disp)
if match:
res = match.group(1)
# Try preventing any funny business.
res = os.path.basename(res)
return res
def is_url(checksum):
"""
Returns True if checksum value has supported URL scheme, else False."""
supported_schemes = ('http', 'https', 'ftp', 'file')
return urlsplit(checksum).scheme in supported_schemes
def main():
argument_spec = url_argument_spec()
# setup aliases
argument_spec['url_username']['aliases'] = ['username']
argument_spec['url_password']['aliases'] = ['password']
argument_spec.update(
url=dict(type='str', required=True),
dest=dict(type='path', required=True),
backup=dict(type='bool', default=False),
sha256sum=dict(type='str', default=''),
checksum=dict(type='str', default=''),
timeout=dict(type='int', default=10),
headers=dict(type='dict'),
tmp_dest=dict(type='path'),
)
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
mutually_exclusive=[['checksum', 'sha256sum']],
)
if module.params.get('thirsty'):
module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
version='2.13', collection_name='ansible.builtin')
if module.params.get('sha256sum'):
module.deprecate('The parameter "sha256sum" has been deprecated and will be removed, use "checksum" instead',
version='2.14', collection_name='ansible.builtin')
url = module.params['url']
dest = module.params['dest']
backup = module.params['backup']
force = module.params['force']
sha256sum = module.params['sha256sum']
checksum = module.params['checksum']
use_proxy = module.params['use_proxy']
timeout = module.params['timeout']
headers = module.params['headers']
tmp_dest = module.params['tmp_dest']
result = dict(
changed=False,
checksum_dest=None,
checksum_src=None,
dest=dest,
elapsed=0,
url=url,
)
dest_is_dir = os.path.isdir(dest)
last_mod_time = None
# workaround for usage of deprecated sha256sum parameter
if sha256sum:
checksum = 'sha256:%s' % (sha256sum)
# checksum specified, parse for algorithm and checksum
if checksum:
try:
algorithm, checksum = checksum.split(':', 1)
except ValueError:
module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>", **result)
if is_url(checksum):
checksum_url = checksum
# download checksum file to checksum_tmpsrc
checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
with open(checksum_tmpsrc) as f:
lines = [line.rstrip('\n') for line in f]
os.remove(checksum_tmpsrc)
checksum_map = []
for line in lines:
# Split by one whitespace to keep the leading type char ' ' (whitespace) for text and '*' for binary
parts = line.split(" ", 1)
if len(parts) == 2:
# Remove the leading type char, we expect
if parts[1].startswith((" ", "*",)):
parts[1] = parts[1][1:]
# Append checksum and path without potential leading './'
checksum_map.append((parts[0], parts[1].lstrip("./")))
filename = url_filename(url)
# Look through each line in the checksum file for a hash corresponding to
# the filename in the url, returning the first hash that is found.
for cksum in (s for (s, f) in checksum_map if f == filename):
checksum = cksum
break
else:
checksum = None
if checksum is None:
module.fail_json(msg="Unable to find a checksum for file '%s' in '%s'" % (filename, checksum_url))
# Remove any non-alphanumeric characters, including the infamous
# Unicode zero-width space
checksum = re.sub(r'\W+', '', checksum).lower()
# Ensure the checksum portion is a hexdigest
try:
int(checksum, 16)
except ValueError:
module.fail_json(msg='The checksum format is invalid', **result)
if not dest_is_dir and os.path.exists(dest):
checksum_mismatch = False
# If the download is not forced and there is a checksum, allow
# checksum match to skip the download.
if not force and checksum != '':
destination_checksum = module.digest_from_file(dest, algorithm)
if checksum != destination_checksum:
checksum_mismatch = True
# Not forcing redownload, unless checksum does not match
if not force and checksum and not checksum_mismatch:
# Not forcing redownload, unless checksum does not match
# allow file attribute changes
file_args = module.load_file_common_arguments(module.params, path=dest)
result['changed'] = module.set_fs_attributes_if_different(file_args, False)
if result['changed']:
module.exit_json(msg="file already exists but file attributes changed", **result)
module.exit_json(msg="file already exists", **result)
# If the file already exists, prepare the last modified time for the
# request.
mtime = os.path.getmtime(dest)
last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
# If the checksum does not match we have to force the download
# because last_mod_time may be newer than on remote
if checksum_mismatch:
force = True
# download to tmpsrc
start = datetime.datetime.utcnow()
method = 'HEAD' if module.check_mode else 'GET'
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest, method)
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
result['src'] = tmpsrc
# Now the request has completed, we can finally generate the final
# destination file name from the info dict.
if dest_is_dir:
filename = extract_filename_from_headers(info)
if not filename:
# Fall back to extracting the filename from the URL.
# Pluck the URL from the info, since a redirect could have changed
# it.
filename = url_filename(info['url'])
dest = os.path.join(dest, filename)
result['dest'] = dest
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], **result)
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Source %s is not readable" % (tmpsrc), **result)
result['checksum_src'] = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not writable" % (dest), **result)
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not readable" % (dest), **result)
result['checksum_dest'] = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(dest)):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s does not exist" % (os.path.dirname(dest)), **result)
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not writable" % (os.path.dirname(dest)), **result)
if module.check_mode:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
result['changed'] = ('checksum_dest' not in result or
result['checksum_src'] != result['checksum_dest'])
module.exit_json(msg=info.get('msg', ''), **result)
backup_file = None
if result['checksum_src'] != result['checksum_dest']:
try:
if backup:
if os.path.exists(dest):
backup_file = module.backup_local(dest)
module.atomic_move(tmpsrc, dest, unsafe_writes=module.params['unsafe_writes'])
except Exception as e:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)),
exception=traceback.format_exc(), **result)
result['changed'] = True
else:
result['changed'] = False
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
if checksum != '':
destination_checksum = module.digest_from_file(dest, algorithm)
if checksum != destination_checksum:
os.remove(dest)
module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum), **result)
# allow file attribute changes
file_args = module.load_file_common_arguments(module.params, path=dest)
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'])
# Backwards compat only. We'll return None on FIPS enabled systems
try:
result['md5sum'] = module.md5(dest)
except ValueError:
result['md5sum'] = None
if backup_file:
result['backup_file'] = backup_file
# Mission complete
module.exit_json(msg=info.get('msg', ''), status_code=info.get('status', ''), **result)
if __name__ == '__main__':
main()
|
from bottle import *
from xml.dom import minidom
import xml.etree.cElementTree as ET
import json
import datetime as dt
import sys
_startup_cwd = os.getcwd()
""" Bottle helper functions """
def raise404():
raise HTTPError(404, "Not found: " + repr(request.path))
def get_type(key, type):
if type == 'int':
return get_int(key)
else:
return get_string(key)
def get_int(key):
num = get_string(key) or ''
try:
return int(num)
except ValueError:
return 0
def get_string(key):
return request.query.get(key) or request.forms.get(key)
def format_response(input, out_format, root_tag, pretty_print = False):
if (out_format.lower() == 'xml'):
root = ET.Element(root_tag)
_convert_dict_to_xml_recurse(root, input, {})
converted = prettify(root)
response.content_type = 'application/xml'
else:
# Crude. Need to loop through properly and rip out keys starting with @
if isinstance(input, dict):
for child in input.values():
if isinstance(child, list):
for index in range(len(child)):
if isinstance(child[index], dict):
child[index] = hide_attribute(child[index])
converted = json.dumps(input, indent = 4 if pretty_print else None)
callback_function = request.query.get('callback')
if callback_function:
converted = callback_function + '(' + converted + ');'
response.content_type = 'application/json'
return converted
def success_response():
return { 'status': 'success' }
def set_attribute(dictionary):
return dict(("@" + k, v) for k, v in dictionary.items())
def hide_attribute(dictionary):
return dict((k[1:] if k.startswith('@') else k, v) for k, v in dictionary.items())
def prettify(elem):
"""Return a pretty-printed XML string for the Element."""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def _convert_dict_to_xml_recurse(parent, dictitem, listnames):
"""Helper Function for XML conversion."""
# we can't convert bare lists
assert not isinstance(dictitem, list)
if isinstance(dictitem, dict):
for (tag, child) in sorted(dictitem.iteritems()):
if isinstance(child, list):
for listchild in child:
elem = ET.Element(tag)
parent.append(elem)
_convert_dict_to_xml_recurse(elem, listchild, listnames)
else:
if tag.startswith('@'):
parent.attrib[str(tag[1:])] = str(child)
else:
elem = ET.Element(tag)
parent.append(elem)
_convert_dict_to_xml_recurse(elem, child, listnames)
elif not dictitem is None:
parent.text = unicode(dictitem)
def calcNextRunTime(job):
hour = int(job['hour'])
minute = int(job['minute'])
weekdays = job['weekdays']
currentTime = dt.datetime.now().replace(second=59, microsecond=0)
currentWeekday = currentTime.weekday()
def calcRunTime(weekday):
weekdayOffset = weekday - currentWeekday
newDate = currentTime.replace(hour = hour, minute = minute, second = 0) + dt.timedelta(days=weekdayOffset)
if newDate < currentTime:
newDate += dt.timedelta(days=7)
return newDate
# Convert days into 0 based integers
daysToRun = (int(d) - 1 for d in weekdays.split(','))
allDays = [calcRunTime(day) for day in daysToRun]
allDays.sort()
if allDays:
nextRunTime = allDays[0]
nextRunTime = dateTimeToEpoch(nextRunTime)
else:
nextRunTime = None
job['nextRunTime'] = nextRunTime
def dateTimeToEpoch(timeObj):
return int(time.mktime(timeObj.timetuple()))
def shutdown():
return "not implemented"
def restart():
args = sys.argv[:]
args.insert(0, sys.executable)
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
os.execv(sys.executable, args)
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
statefile = "ventstate.html"
GPIO.setup(11, GPIO.OUT)
GPIO.output(11, False)
time.sleep(1)
GPIO.output(11, True)
FILE = open(statefile,"w")
FILE.writelines("Vent is is <b>CLOSED</b>")
FILE.close()
print 'Done\n'
|
""" Mission 5-CORAL SURVEY
Set map border, size and origin
Set quadrants of interest
Do zigzag scouting to respective quadrants (perpetual)
If shape identified or duration timeout
Shape identified->set global parameter for gui
continue to next quadrant
If both shapes identified or total time exceeded, terminate mission
task 5:
-----------------
Created by Reinaldo@ 2016-12-06
Authors: Reinaldo
-----------------
"""
import rospy
import multiprocessing as mp
import math
import time
import numpy as np
import os
import tf
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_zigzag import Zigzag
from move_base_force_cancel import ForceCancel
from move_base_waypoint import MoveTo
from tf.transformations import euler_from_quaternion
from nav_msgs.msg import Odometry
def zigzag(quadrant=1, map_length=40, map_width=40, half_period=2, half_amplitude=10, offset=3):
print("zigzag starts")
zigzag=Zigzag(nodename="zigzag", quadrant=quadrant, map_length=map_length, map_width=map_width, half_period=half_period, half_amplitude=half_amplitude, offset=offset)
print("zigzag returns")
class CoralSurvey(object):
x0, y0, yaw0= 0, 0, 0
shape_counter=0
shape_found=[0, 0, 0] #Tri, Cru , Cir
current_quadrant=0
def __init__(self, quadrant_list):
print("starting task 5")
rospy.init_node('task_5', anonymous=True)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.marker_callback, queue_size = 50)
self.marker_pub= rospy.Publisher('waypoint_markers', Marker, queue_size=5)
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
self.odom_received = False
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
print("odom received")
self.zigzag_obj = Zigzag("zigzag", is_newnode=False, quadrant=None, map_length=40, map_width=40, half_period=2, half_amplitude=10, offset=3)
self.moveto_obj = MoveTo("moveto", is_newnode=False, target=None, mode=1, mode_param=1, is_relative=False)
zigzag_half_period=4
zigzag_half_amplitude=9
self.quadrant_visited=list()
self.doing_zigzag=list()
for i in range(len(quadrant_list)):
self.quadrant_visited.append(0) #not visited
self.doing_zigzag.append(0)
while not rospy.is_shutdown():
#main loop
#visit quadrant that is not yet visited
for i in range(len(quadrant_list)):
if self.quadrant_visited[i]==0:
self.current_quadrant=i
break
if self.doing_zigzag[self.current_quadrant]==0:
self.zigzag_obj.respawn(quadrant_list[self.current_quadrant], zigzag_half_period, zigzag_half_amplitude) #zigzag only once
if self.quadrant_visited[self.current_quadrant]==0 and zigzag_half_period>1:
zigzag_half_period-=1
time.sleep(1)
self.pool.close()
self.pool.join()
def marker_callback(self, msg):
if len(msg.markers)>0:
#find black totem
#find underwater shapes during zigzag
if msg.markers[0].type == 0 and self.shape_found[0]==0:
#triangle
if self.shape_counter==0:
rospy.set_param("/gui/shape1", "TRI")
self.shape_counter+=1
elif self.shape_counter==1:
rospy.set_param("/gui/shape2", "TRI")
self.shape_found[0]=1
self.quadrant_visited[self.current_quadrant]=1
self.doing_zigzag[self.current_quadrant]=1
print("found Triangle")
elif msg.markers[0].type == 1 and self.shape_found[1]==0:
#cruciform
if self.shape_counter==0:
rospy.set_param("/gui/shape1", "CRU")
self.shape_counter+=1
elif self.shape_counter==1:
rospy.set_param("/gui/shape2", "CRU")
self.shape_found[1]=1
self.quadrant_visited[self.current_quadrant]=1
self.doing_zigzag[self.current_quadrant]=1
print("found Crux")
elif msg.markers[0].type == 2 and self.shape_found[2]==0:
#circle
if self.shape_counter==0:
rospy.set_param("/gui/shape1", "CIR")
self.shape_counter+=1
elif self.shape_counter==1:
rospy.set_param("/gui/shape2", "CIR")
self.shape_found[2]=1
self.quadrant_visited[self.current_quadrant]=1
self.doing_zigzag[self.current_quadrant]=1
print("found Circle")
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
def is_complete(self):
pass
if __name__ == '__main__':
try:
CoralSurvey([1, 3])
# stage 1: gps
except rospy.ROSInterruptException:
rospy.loginfo("Task 5 Finished")
|
from ladybug.config import folders
def test_config_init():
"""Test the initialization of the config module and basic properties."""
assert hasattr(folders, 'default_epw_folder')
assert isinstance(folders.default_epw_folder, str)
|
import contextlib
import fnmatch
import imp
import itertools
import json
import os
import re
import StringIO
import subprocess
import sys
import colorama
import toml
import yaml
from licenseck import MPL, APACHE, COPYRIGHT, licenses_toml, licenses_dep_toml
CONFIG_FILE_PATH = os.path.join(".", "servo-tidy.toml")
config = {
"skip-check-length": False,
"skip-check-licenses": False,
"check-ordered-json-keys": [],
"lint-scripts": [],
"blocked-packages": {},
"ignore": {
"files": [
os.path.join(".", "."), # ignore hidden files
],
"directories": [
os.path.join(".", "."), # ignore hidden directories
],
"packages": [],
},
"check_ext": {}
}
COMMENTS = ["// ", "# ", " *", "/* "]
FILE_PATTERNS_TO_CHECK = ["*.rs", "*.rc", "*.cpp", "*.c",
"*.h", "Cargo.lock", "*.py", "*.sh",
"*.toml", "*.webidl", "*.json", "*.html",
"*.yml"]
FILE_PATTERNS_TO_IGNORE = ["*.#*", "*.pyc", "fake-ld.sh"]
SPEC_BASE_PATH = "components/script/dom/"
WEBIDL_STANDARDS = [
"//www.khronos.org/registry/webgl/extensions",
"//www.khronos.org/registry/webgl/specs",
"//developer.mozilla.org/en-US/docs/Web/API",
"//dev.w3.org/2006/webapi",
"//dev.w3.org/csswg",
"//dev.w3.org/fxtf",
"//dvcs.w3.org/hg",
"//dom.spec.whatwg.org",
"//drafts.csswg.org",
"//drafts.css-houdini.org",
"//drafts.fxtf.org",
"//encoding.spec.whatwg.org",
"//fetch.spec.whatwg.org",
"//html.spec.whatwg.org",
"//url.spec.whatwg.org",
"//xhr.spec.whatwg.org",
"//w3c.github.io",
"//heycam.github.io/webidl",
"//webbluetoothcg.github.io/web-bluetooth/",
"//svgwg.org/svg2-draft",
"//wicg.github.io",
# Not a URL
"// This interface is entirely internal to Servo, and should not be" +
" accessible to\n// web pages."
]
def is_iter_empty(iterator):
try:
obj = iterator.next()
return True, itertools.chain((obj,), iterator)
except StopIteration:
return False, iterator
def normilize_paths(paths):
if isinstance(paths, basestring):
return os.path.join(*paths.split('/'))
else:
return [os.path.join(*path.split('/')) for path in paths]
def progress_wrapper(iterator):
list_of_stuff = list(iterator)
total_files, progress = len(list_of_stuff), 0
for idx, thing in enumerate(list_of_stuff):
progress = int(float(idx + 1) / total_files * 100)
sys.stdout.write('\r Progress: %s%% (%d/%d)' % (progress, idx + 1, total_files))
sys.stdout.flush()
yield thing
class FileList(object):
def __init__(self, directory, only_changed_files=False, exclude_dirs=[], progress=True):
self.directory = directory
self.excluded = exclude_dirs
iterator = self._filter_excluded() if exclude_dirs else self._default_walk()
if only_changed_files:
try:
# Fall back if git doesn't work
newiter = self._git_changed_files()
obj = next(newiter)
iterator = itertools.chain((obj,), newiter)
except subprocess.CalledProcessError:
pass
# Raise `StopIteration` if the iterator is empty
obj = next(iterator)
self.generator = itertools.chain((obj,), iterator)
if progress:
self.generator = progress_wrapper(self.generator)
def _default_walk(self):
for root, _, files in os.walk(self.directory):
for f in files:
yield os.path.join(root, f)
def _git_changed_files(self):
args = ["git", "log", "-n1", "--merges", "--format=%H"]
last_merge = subprocess.check_output(args).strip()
args = ["git", "diff", "--name-only", last_merge, self.directory]
file_list = normilize_paths(subprocess.check_output(args).splitlines())
for f in file_list:
if not any(os.path.join('.', os.path.dirname(f)).startswith(path) for path in self.excluded):
yield os.path.join('.', f)
def _filter_excluded(self):
for root, dirs, files in os.walk(self.directory, topdown=True):
# modify 'dirs' in-place so that we don't do unnecessary traversals in excluded directories
dirs[:] = [d for d in dirs if not any(os.path.join(root, d).startswith(name) for name in self.excluded)]
for rel_path in files:
yield os.path.join(root, rel_path)
def __iter__(self):
return self
def next(self):
return next(self.generator)
def filter_file(file_name):
if any(file_name.startswith(ignored_file) for ignored_file in config["ignore"]["files"]):
return False
base_name = os.path.basename(file_name)
if any(fnmatch.fnmatch(base_name, pattern) for pattern in FILE_PATTERNS_TO_IGNORE):
return False
return True
def filter_files(start_dir, only_changed_files, progress):
file_iter = FileList(start_dir, only_changed_files=only_changed_files,
exclude_dirs=config["ignore"]["directories"], progress=progress)
for file_name in file_iter:
base_name = os.path.basename(file_name)
if not any(fnmatch.fnmatch(base_name, pattern) for pattern in FILE_PATTERNS_TO_CHECK):
continue
if not filter_file(file_name):
continue
yield file_name
def uncomment(line):
for c in COMMENTS:
if line.startswith(c):
if line.endswith("*/"):
return line[len(c):(len(line) - 3)].strip()
return line[len(c):].strip()
def is_apache_licensed(header):
if APACHE in header:
return any(c in header for c in COPYRIGHT)
def check_license(file_name, lines):
if any(file_name.endswith(ext) for ext in (".yml", ".toml", ".lock", ".json", ".html")) or \
config["skip-check-licenses"]:
raise StopIteration
if lines[0].startswith("#!") and lines[1].strip():
yield (1, "missing blank line after shebang")
blank_lines = 0
max_blank_lines = 2 if lines[0].startswith("#!") else 1
license_block = []
for l in lines:
l = l.rstrip('\n')
if not l.strip():
blank_lines += 1
if blank_lines >= max_blank_lines:
break
continue
line = uncomment(l)
if line is not None:
license_block.append(line)
header = " ".join(license_block)
valid_license = MPL in header or is_apache_licensed(header)
acknowledged_bad_license = "xfail-license" in header
if not (valid_license or acknowledged_bad_license):
yield (1, "incorrect license")
def check_modeline(file_name, lines):
for idx, line in enumerate(lines[:5]):
if re.search('^.*[ \t](vi:|vim:|ex:)[ \t]', line):
yield (idx + 1, "vi modeline present")
elif re.search('-\*-.*-\*-', line, re.IGNORECASE):
yield (idx + 1, "emacs file variables present")
def check_length(file_name, idx, line):
if any(file_name.endswith(ext) for ext in (".yml", ".lock", ".json", ".html", ".toml")) or \
config["skip-check-length"]:
raise StopIteration
# Prefer shorter lines when shell scripting.
max_length = 80 if file_name.endswith(".sh") else 120
if len(line.rstrip('\n')) > max_length:
yield (idx + 1, "Line is longer than %d characters" % max_length)
def check_whatwg_specific_url(idx, line):
match = re.search(r"https://html\.spec\.whatwg\.org/multipage/[\w-]+\.html#([\w\:-]+)", line)
if match is not None:
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1))
yield (idx + 1, "link to WHATWG may break in the future, use this format instead: {}".format(preferred_link))
def check_whatwg_single_page_url(idx, line):
match = re.search(r"https://html\.spec\.whatwg\.org/#([\w\:-]+)", line)
if match is not None:
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1))
yield (idx + 1, "links to WHATWG single-page url, change to multi page: {}".format(preferred_link))
def check_whitespace(idx, line):
if line[-1] == "\n":
line = line[:-1]
else:
yield (idx + 1, "no newline at EOF")
if line.endswith(" "):
yield (idx + 1, "trailing whitespace")
if "\t" in line:
yield (idx + 1, "tab on line")
if "\r" in line:
yield (idx + 1, "CR on line")
def check_by_line(file_name, lines):
for idx, line in enumerate(lines):
errors = itertools.chain(
check_length(file_name, idx, line),
check_whitespace(idx, line),
check_whatwg_specific_url(idx, line),
check_whatwg_single_page_url(idx, line),
)
for error in errors:
yield error
def check_flake8(file_name, contents):
from flake8.main import check_code
if not file_name.endswith(".py"):
raise StopIteration
@contextlib.contextmanager
def stdout_redirect(where):
sys.stdout = where
try:
yield where
finally:
sys.stdout = sys.__stdout__
ignore = {
"W291", # trailing whitespace; the standard tidy process will enforce no trailing whitespace
"E501", # 80 character line length; the standard tidy process will enforce line length
}
output = StringIO.StringIO()
with stdout_redirect(output):
check_code(contents, ignore=ignore)
for error in output.getvalue().splitlines():
_, line_num, _, message = error.split(":", 3)
yield line_num, message.strip()
def check_lock(file_name, contents):
def find_reverse_dependencies(name, content):
for package in itertools.chain([content["root"]], content["package"]):
for dependency in package.get("dependencies", []):
if dependency.startswith("{} ".format(name)):
yield package["name"], dependency
if not file_name.endswith(".lock"):
raise StopIteration
# Package names to be neglected (as named by cargo)
exceptions = config["ignore"]["packages"]
content = toml.loads(contents)
packages_by_name = {}
for package in content.get("package", []):
if "replace" in package:
continue
source = package.get("source", "")
if source == r"registry+https://github.com/rust-lang/crates.io-index":
source = "crates.io"
packages_by_name.setdefault(package["name"], []).append((package["version"], source))
for (name, packages) in packages_by_name.iteritems():
if name in exceptions or len(packages) <= 1:
continue
message = "duplicate versions for package `{}`".format(name)
packages.sort()
packages_dependencies = list(find_reverse_dependencies(name, content))
for version, source in packages:
short_source = source.split("#")[0].replace("git+", "")
message += "\n\t\033[93mThe following packages depend on version {} from '{}':\033[0m" \
.format(version, short_source)
for name, dependency in packages_dependencies:
if version in dependency and short_source in dependency:
message += "\n\t\t" + name
yield (1, message)
# Check to see if we are transitively using any blocked packages
for package in content.get("package", []):
package_name = package.get("name")
package_version = package.get("version")
for dependency in package.get("dependencies", []):
dependency = dependency.split()
dependency_name = dependency[0]
whitelist = config['blocked-packages'].get(dependency_name)
if whitelist is not None:
if package_name not in whitelist:
fmt = "Package {} {} depends on blocked package {}."
message = fmt.format(package_name, package_version, dependency_name)
yield (1, message)
def check_toml(file_name, lines):
if not file_name.endswith("Cargo.toml"):
raise StopIteration
ok_licensed = False
for idx, line in enumerate(lines):
if idx == 0 and "[workspace]" in line:
raise StopIteration
if line.find("*") != -1:
yield (idx + 1, "found asterisk instead of minimum version number")
for license_line in licenses_toml:
ok_licensed |= (license_line in line)
if not ok_licensed:
yield (0, ".toml file should contain a valid license.")
def check_shell(file_name, lines):
if not file_name.endswith(".sh"):
raise StopIteration
shebang = "#!/usr/bin/env bash"
required_options = {"set -o errexit", "set -o nounset", "set -o pipefail"}
did_shebang_check = False
if not lines:
yield (0, 'script is an empty file')
return
if lines[0].rstrip() != shebang:
yield (1, 'script does not have shebang "{}"'.format(shebang))
for idx in range(1, len(lines)):
stripped = lines[idx].rstrip()
# Comments or blank lines are ignored. (Trailing whitespace is caught with a separate linter.)
if lines[idx].startswith("#") or stripped == "":
continue
if not did_shebang_check:
if stripped in required_options:
required_options.remove(stripped)
else:
# The first non-comment, non-whitespace, non-option line is the first "real" line of the script.
# The shebang, options, etc. must come before this.
if required_options:
formatted = ['"{}"'.format(opt) for opt in required_options]
yield (idx + 1, "script is missing options {}".format(", ".join(formatted)))
did_shebang_check = True
if "`" in stripped:
yield (idx + 1, "script should not use backticks for command substitution")
if " [ " in stripped or stripped.startswith("[ "):
yield (idx + 1, "script should use `[[` instead of `[` for conditional testing")
for dollar in re.finditer('\$', stripped):
next_idx = dollar.end()
if next_idx < len(stripped):
next_char = stripped[next_idx]
if not (next_char == '{' or next_char == '('):
yield(idx + 1, "variable substitutions should use the full \"${VAR}\" form")
def check_rust(file_name, lines):
if not file_name.endswith(".rs") or \
file_name.endswith(".mako.rs") or \
file_name.endswith(os.path.join("style", "build.rs")) or \
file_name.endswith(os.path.join("geckolib", "build.rs")) or \
file_name.endswith(os.path.join("unit", "style", "stylesheets.rs")):
raise StopIteration
comment_depth = 0
merged_lines = ''
import_block = False
whitespace = False
is_lib_rs_file = file_name.endswith("lib.rs")
prev_use = None
prev_open_brace = False
multi_line_string = False
current_indent = 0
prev_crate = {}
prev_mod = {}
prev_feature_name = ""
indent = 0
prev_indent = 0
decl_message = "{} is not in alphabetical order"
decl_expected = "\n\t\033[93mexpected: {}\033[0m"
decl_found = "\n\t\033[91mfound: {}\033[0m"
for idx, original_line in enumerate(lines):
# simplify the analysis
line = original_line.strip()
prev_indent = indent
indent = len(original_line) - len(line)
is_attribute = re.search(r"#\[.*\]", line)
is_comment = re.search(r"^//|^/\*|^\*", line)
# Simple heuristic to avoid common case of no comments.
if '/' in line:
comment_depth += line.count('/*')
comment_depth -= line.count('*/')
if line.endswith('\\'):
merged_lines += line[:-1]
continue
if comment_depth:
merged_lines += line
continue
if merged_lines:
line = merged_lines + line
merged_lines = ''
if multi_line_string:
line, count = re.subn(
r'^(\\.|[^"\\])*?"', '', line, count=1)
if count == 1:
multi_line_string = False
else:
continue
# Ignore attributes, comments, and imports
# Keep track of whitespace to enable checking for a merged import block
if import_block:
if not (is_comment or is_attribute or line.startswith("use ")):
whitespace = line == ""
if not whitespace:
import_block = False
# get rid of strings and chars because cases like regex expression, keep attributes
if not is_attribute and not is_comment:
line = re.sub(r'"(\\.|[^\\"])*?"', '""', line)
line = re.sub(
r"'(\\.|[^\\']|(\\x[0-9a-fA-F]{2})|(\\u{[0-9a-fA-F]{1,6}}))'",
"''", line)
# If, after parsing all single-line strings, we still have
# an odd number of double quotes, this line starts a
# multiline string
if line.count('"') % 2 == 1:
line = re.sub(r'"(\\.|[^\\"])*?$', '""', line)
multi_line_string = True
# get rid of comments
line = re.sub('//.*?$|/\*.*?$|^\*.*?$', '//', line)
# get rid of attributes that do not contain =
line = re.sub('^#[A-Za-z0-9\(\)\[\]_]*?$', '#[]', line)
# flag this line if it matches one of the following regular expressions
# tuple format: (pattern, format_message, filter_function(match, line))
no_filter = lambda match, line: True
regex_rules = [
(r",[^\s]", "missing space after ,",
lambda match, line: '$' not in line and not is_attribute),
(r"([A-Za-z0-9_]+) (\()", "extra space after {0}",
lambda match, line: not (
is_attribute or
re.match(r"\bmacro_rules!\s+", line[:match.start()]) or
re.search(r"[^']'[A-Za-z0-9_]+ \($", line[:match.end()]) or
match.group(1) in ['const', 'fn', 'for', 'if', 'in',
'let', 'match', 'mut', 'return'])),
(r"[A-Za-z0-9\"]=", "missing space before =",
lambda match, line: is_attribute),
(r"=[A-Za-z0-9\"]", "missing space after =",
lambda match, line: is_attribute),
(r"^=\s", "no = in the beginning of line",
lambda match, line: not is_comment),
# ignore scientific notation patterns like 1e-6
(r"[A-DF-Za-df-z0-9]-", "missing space before -",
lambda match, line: not is_attribute),
(r"[A-Za-z0-9]([\+/\*%=])", "missing space before {0}",
lambda match, line: (not is_attribute and
not is_associated_type(match, line))),
# * not included because of dereferencing and casting
# - not included because of unary negation
(r'([\+/\%=])[A-Za-z0-9"]', "missing space after {0}",
lambda match, line: (not is_attribute and
not is_associated_type(match, line))),
(r"\)->", "missing space before ->", no_filter),
(r"->[A-Za-z]", "missing space after ->", no_filter),
(r"[^ ]=>", "missing space before =>", lambda match, line: match.start() != 0),
(r"=>[^ ]", "missing space after =>", lambda match, line: match.end() != len(line)),
(r"=> ", "extra space after =>", no_filter),
# ignore " ::crate::mod" and "trait Foo : Bar"
(r" :[^:]", "extra space before :",
lambda match, line: 'trait ' not in line[:match.start()]),
# ignore "crate::mod" and ignore flagging macros like "$t1:expr"
(r"[^:]:[A-Za-z0-9\"]", "missing space after :",
lambda match, line: '$' not in line[:match.end()]),
(r"[A-Za-z0-9\)]{", "missing space before {{", no_filter),
# ignore cases like "{}", "}`", "}}" and "use::std::{Foo, Bar}"
(r"[^\s{}]}[^`]", "missing space before }}",
lambda match, line: not re.match(r'^(pub )?use', line)),
# ignore cases like "{}", "`{", "{{" and "use::std::{Foo, Bar}"
(r"[^`]{[^\s{}]", "missing space after {{",
lambda match, line: not re.match(r'^(pub )?use', line)),
# There should not be any extra pointer dereferencing
(r": &Vec<", "use &[T] instead of &Vec<T>", no_filter),
# No benefit over using &str
(r": &String", "use &str instead of &String", no_filter),
# There should be any use of banned types:
# Cell<JSVal>, Cell<Dom<T>>, DomRefCell<Dom<T>>, DomRefCell<HEAP<T>>
(r"(\s|:)+Cell<JSVal>", "Banned type Cell<JSVal> detected. Use MutDom<JSVal> instead", no_filter),
(r"(\s|:)+Cell<Dom<.+>>", "Banned type Cell<Dom<T>> detected. Use MutDom<T> instead", no_filter),
(r"DomRefCell<Dom<.+>>", "Banned type DomRefCell<Dom<T>> detected. Use MutDom<T> instead", no_filter),
(r"DomRefCell<Heap<.+>>", "Banned type DomRefCell<Heap<T>> detected. Use MutDom<T> instead", no_filter),
# No benefit to using &Root<T>
(r": &Root<", "use &T instead of &Root<T>", no_filter),
(r"^&&", "operators should go at the end of the first line", no_filter),
(r"\{[A-Za-z0-9_]+\};", "use statement contains braces for single import",
lambda match, line: line.startswith('use ')),
(r"^\s*else {", "else braces should be on the same line", no_filter),
(r"[^$ ]\([ \t]", "extra space after (", no_filter),
# This particular pattern is not reentrant-safe in script_thread.rs
(r"match self.documents.borrow", "use a separate variable for the match expression",
lambda match, line: file_name.endswith('script_thread.rs')),
# -> () is unnecessary
(r"-> \(\)", "encountered function signature with -> ()", no_filter),
]
keywords = ["if", "let", "mut", "extern", "as", "impl", "fn", "struct", "enum", "pub", "mod",
"use", "in", "ref", "type", "where", "trait"]
extra_space_after = lambda key: (r"(?<![A-Za-z0-9\-_]){key} ".format(key=key),
"extra space after {key}".format(key=key),
lambda match, line: not is_attribute)
regex_rules.extend(map(extra_space_after, keywords))
for pattern, message, filter_func in regex_rules:
for match in re.finditer(pattern, line):
if filter_func(match, line):
yield (idx + 1, message.format(*match.groups(), **match.groupdict()))
if prev_open_brace and not line:
yield (idx + 1, "found an empty line following a {")
prev_open_brace = line.endswith("{")
# ensure a line starting with { or } has a number of leading spaces that is a multiple of 4
if line.startswith(("{", "}")):
match = re.match(r"(?: {4})* {1,3}([{}])", original_line)
if match:
if indent != prev_indent - 4:
yield (idx + 1, "space before {} is not a multiple of 4".format(match.group(1)))
# check alphabetical order of extern crates
if line.startswith("extern crate "):
# strip "extern crate " from the begin and ";" from the end
crate_name = line[13:-1]
if indent not in prev_crate:
prev_crate[indent] = ""
if prev_crate[indent] > crate_name:
yield(idx + 1, decl_message.format("extern crate declaration")
+ decl_expected.format(prev_crate[indent])
+ decl_found.format(crate_name))
prev_crate[indent] = crate_name
if line == "}":
for i in [i for i in prev_crate.keys() if i > indent]:
del prev_crate[i]
# check alphabetical order of feature attributes in lib.rs files
if is_lib_rs_file:
match = re.search(r"#!\[feature\((.*)\)\]", line)
if match:
features = map(lambda w: w.strip(), match.group(1).split(','))
sorted_features = sorted(features)
if sorted_features != features:
yield(idx + 1, decl_message.format("feature attribute")
+ decl_expected.format(tuple(sorted_features))
+ decl_found.format(tuple(features)))
if prev_feature_name > sorted_features[0]:
yield(idx + 1, decl_message.format("feature attribute")
+ decl_expected.format(prev_feature_name + " after " + sorted_features[0])
+ decl_found.format(prev_feature_name + " before " + sorted_features[0]))
prev_feature_name = sorted_features[0]
else:
# not a feature attribute line, so empty previous name
prev_feature_name = ""
# imports must be in the same line, alphabetically sorted, and merged
# into a single import block
if line.startswith("use "):
import_block = True
if not line.endswith(";") and '{' in line:
yield (idx + 1, "use statement spans multiple lines")
if '{ ' in line:
yield (idx + 1, "extra space after {")
if ' }' in line:
yield (idx + 1, "extra space before }")
# strip "use" from the begin and ";" from the end
current_use = line[4:-1]
if prev_use:
current_use_cut = current_use.replace("{self,", ".").replace("{", ".")
prev_use_cut = prev_use.replace("{self,", ".").replace("{", ".")
if indent == current_indent and current_use_cut < prev_use_cut:
yield(idx + 1, decl_message.format("use statement")
+ decl_expected.format(prev_use)
+ decl_found.format(current_use))
prev_use = current_use
current_indent = indent
if whitespace or not import_block:
current_indent = 0
# do not allow blank lines in an import block
if import_block and whitespace and line.startswith("use "):
whitespace = False
yield(idx, "encountered whitespace following a use statement")
# modules must be in the same line and alphabetically sorted
if line.startswith("mod ") or line.startswith("pub mod "):
# strip /(pub )?mod/ from the left and ";" from the right
mod = line[4:-1] if line.startswith("mod ") else line[8:-1]
if (idx - 1) < 0 or "#[macro_use]" not in lines[idx - 1]:
match = line.find(" {")
if indent not in prev_mod:
prev_mod[indent] = ""
if match == -1 and not line.endswith(";"):
yield (idx + 1, "mod declaration spans multiple lines")
if prev_mod[indent] and mod < prev_mod[indent]:
yield(idx + 1, decl_message.format("mod declaration")
+ decl_expected.format(prev_mod[indent])
+ decl_found.format(mod))
prev_mod[indent] = mod
else:
# we now erase previous entries
prev_mod = {}
# derivable traits should be alphabetically ordered
if is_attribute:
# match the derivable traits filtering out macro expansions
match = re.search(r"#\[derive\(([a-zA-Z, ]*)", line)
if match:
derives = map(lambda w: w.strip(), match.group(1).split(','))
# sort, compare and report
sorted_derives = sorted(derives)
if sorted_derives != derives:
yield(idx + 1, decl_message.format("derivable traits list")
+ decl_expected.format(", ".join(sorted_derives))
+ decl_found.format(", ".join(derives)))
def is_associated_type(match, line):
if match.group(1) != '=':
return False
open_angle = line[0:match.end()].rfind('<')
close_angle = line[open_angle:].find('>') if open_angle != -1 else -1
generic_open = open_angle != -1 and open_angle < match.start()
generic_close = close_angle != -1 and close_angle + open_angle >= match.end()
return generic_open and generic_close
def check_webidl_spec(file_name, contents):
# Sorted by this function (in pseudo-Rust). The idea is to group the same
# organization together.
# fn sort_standards(a: &Url, b: &Url) -> Ordering {
# let a_domain = a.domain().split(".");
# a_domain.pop();
# a_domain.reverse();
# let b_domain = b.domain().split(".");
# b_domain.pop();
# b_domain.reverse();
# for i in a_domain.into_iter().zip(b_domain.into_iter()) {
# match i.0.cmp(b.0) {
# Less => return Less,
# Greater => return Greater,
# _ => (),
# }
# }
# a_domain.path().cmp(b_domain.path())
# }
if not file_name.endswith(".webidl"):
raise StopIteration
for i in WEBIDL_STANDARDS:
if contents.find(i) != -1:
raise StopIteration
yield (0, "No specification link found.")
def duplicate_key_yaml_constructor(loader, node, deep=False):
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
if key in mapping:
raise KeyError(key)
value = loader.construct_object(value_node, deep=deep)
mapping[key] = value
return loader.construct_mapping(node, deep)
def lint_buildbot_steps_yaml(mapping):
# Check for well-formedness of contents
# A well-formed buildbot_steps.yml should be a map to list of strings
for k in mapping.keys():
if not isinstance(mapping[k], list):
raise ValueError("Key '{}' maps to type '{}', but list expected".format(k, type(mapping[k]).__name__))
# check if value is a list of strings
for item in itertools.ifilter(lambda i: not isinstance(i, str), mapping[k]):
raise ValueError("List mapped to '{}' contains non-string element".format(k))
class SafeYamlLoader(yaml.SafeLoader):
"""Subclass of yaml.SafeLoader to avoid mutating the global SafeLoader."""
pass
def check_yaml(file_name, contents):
if not file_name.endswith("buildbot_steps.yml"):
raise StopIteration
# YAML specification doesn't explicitly disallow
# duplicate keys, but they shouldn't be allowed in
# buildbot_steps.yml as it could lead to confusion
SafeYamlLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
duplicate_key_yaml_constructor
)
try:
contents = yaml.load(contents, Loader=SafeYamlLoader)
lint_buildbot_steps_yaml(contents)
except yaml.YAMLError as e:
line = e.problem_mark.line + 1 if hasattr(e, 'problem_mark') else None
yield (line, e)
except KeyError as e:
yield (None, "Duplicated Key ({})".format(e.message))
except ValueError as e:
yield (None, e.message)
def check_for_possible_duplicate_json_keys(key_value_pairs):
keys = [x[0] for x in key_value_pairs]
seen_keys = set()
for key in keys:
if key in seen_keys:
raise KeyError("Duplicated Key (%s)" % key)
seen_keys.add(key)
def check_for_alphabetical_sorted_json_keys(key_value_pairs):
for a, b in zip(key_value_pairs[:-1], key_value_pairs[1:]):
if a[0] > b[0]:
raise KeyError("Unordered key (found %s before %s)" % (a[0], b[0]))
def check_json_requirements(filename):
def check_fn(key_value_pairs):
check_for_possible_duplicate_json_keys(key_value_pairs)
if filename in normilize_paths(config["check-ordered-json-keys"]):
check_for_alphabetical_sorted_json_keys(key_value_pairs)
return check_fn
def check_json(filename, contents):
if not filename.endswith(".json"):
raise StopIteration
try:
json.loads(contents, object_pairs_hook=check_json_requirements(filename))
except ValueError as e:
match = re.search(r"line (\d+) ", e.message)
line_no = match and match.group(1)
yield (line_no, e.message)
except KeyError as e:
yield (None, e.message)
def check_spec(file_name, lines):
if SPEC_BASE_PATH not in file_name:
raise StopIteration
file_name = os.path.relpath(os.path.splitext(file_name)[0], SPEC_BASE_PATH)
patt = re.compile("^\s*\/\/.+")
# Pattern representing a line with a macro
macro_patt = re.compile("^\s*\S+!(.*)$")
# Pattern representing a line with comment containing a spec link
link_patt = re.compile("^\s*///? (<https://.+>|https://.+)$")
# Pattern representing a line with comment or attribute
comment_patt = re.compile("^\s*(///?.+|#\[.+\])$")
brace_count = 0
in_impl = False
pattern = "impl {}Methods for {} {{".format(file_name, file_name)
for idx, line in enumerate(lines):
if "// check-tidy: no specs after this line" in line:
break
if not patt.match(line):
if pattern.lower() in line.lower():
in_impl = True
if ("fn " in line or macro_patt.match(line)) and brace_count == 1:
for up_idx in range(1, idx + 1):
up_line = lines[idx - up_idx]
if link_patt.match(up_line):
# Comment with spec link exists
break
if not comment_patt.match(up_line):
# No more comments exist above, yield warning
yield (idx + 1, "method declared in webidl is missing a comment with a specification link")
break
if in_impl:
brace_count += line.count('{')
brace_count -= line.count('}')
if brace_count < 1:
break
def check_config_file(config_file, print_text=True):
# Check if config file exists
if not os.path.exists(config_file):
print("%s config file is required but was not found" % config_file)
sys.exit(1)
# Load configs from servo-tidy.toml
with open(config_file) as content:
conf_file = content.read()
lines = conf_file.splitlines(True)
if print_text:
print '\rChecking the config file...'
config_content = toml.loads(conf_file)
exclude = config_content.get("ignore", {})
# Check for invalid listed ignored directories
exclude_dirs = exclude.get("directories", [])
skip_dirs = ["./target", "./tests"]
invalid_dirs = [d for d in exclude_dirs if not os.path.isdir(d) and not any(s in d for s in skip_dirs)]
# Check for invalid listed ignored files
invalid_files = [f for f in exclude.get("files", []) if not os.path.exists(f)]
current_table = ""
for idx, line in enumerate(lines):
# Ignore comment lines
if line.strip().startswith("#"):
continue
# Check for invalid tables
if re.match("\[(.*?)\]", line.strip()):
table_name = re.findall(r"\[(.*?)\]", line)[0].strip()
if table_name not in ("configs", "blocked-packages", "ignore", "check_ext"):
yield config_file, idx + 1, "invalid config table [%s]" % table_name
current_table = table_name
continue
# Print invalid listed ignored directories
if current_table == "ignore" and invalid_dirs:
for d in invalid_dirs:
if line.strip().strip('\'",') == d:
yield config_file, idx + 1, "ignored directory '%s' doesn't exist" % d
invalid_dirs.remove(d)
break
# Print invalid listed ignored files
if current_table == "ignore" and invalid_files:
for f in invalid_files:
if line.strip().strip('\'",') == f:
yield config_file, idx + 1, "ignored file '%s' doesn't exist" % f
invalid_files.remove(f)
break
# Skip if there is no equal sign in line, assuming it's not a key
if "=" not in line:
continue
key = line.split("=")[0].strip()
# Check for invalid keys inside [configs] and [ignore] table
if (current_table == "configs" and key not in config or
current_table == "ignore" and key not in config["ignore"] or
# Any key outside of tables
current_table == ""):
yield config_file, idx + 1, "invalid config key '%s'" % key
# Parse config file
parse_config(config_content)
def parse_config(config_file):
exclude = config_file.get("ignore", {})
# Add list of ignored directories to config
config["ignore"]["directories"] += normilize_paths(exclude.get("directories", []))
# Add list of ignored files to config
config["ignore"]["files"] += normilize_paths(exclude.get("files", []))
# Add list of ignored packages to config
config["ignore"]["packages"] = exclude.get("packages", [])
# Add dict of dir, list of expected ext to config
dirs_to_check = config_file.get("check_ext", {})
# Fix the paths (OS-dependent)
for path, exts in dirs_to_check.items():
config['check_ext'][normilize_paths(path)] = exts
# Add list of blocked packages
config["blocked-packages"] = config_file.get("blocked-packages", {})
# Override default configs
user_configs = config_file.get("configs", [])
for pref in user_configs:
if pref in config:
config[pref] = user_configs[pref]
def check_directory_files(directories, print_text=True):
if print_text:
print '\rChecking directories for correct file extensions...'
for directory, file_extensions in directories.items():
files = sorted(os.listdir(directory))
for filename in files:
if not any(filename.endswith(ext) for ext in file_extensions):
details = {
"name": os.path.basename(filename),
"ext": ", ".join(file_extensions),
"dir_name": directory
}
message = '''Unexpected extension found for {name}. \
We only expect files with {ext} extensions in {dir_name}'''.format(**details)
yield (filename, 1, message)
def collect_errors_for_files(files_to_check, checking_functions, line_checking_functions, print_text=True):
(has_element, files_to_check) = is_iter_empty(files_to_check)
if not has_element:
raise StopIteration
if print_text:
print '\rChecking files for tidiness...'
for filename in files_to_check:
if not os.path.exists(filename):
continue
with open(filename, "r") as f:
contents = f.read()
if not contents.strip():
yield filename, 0, "file is empty"
continue
for check in checking_functions:
for error in check(filename, contents):
# the result will be: `(filename, line, message)`
yield (filename,) + error
lines = contents.splitlines(True)
for check in line_checking_functions:
for error in check(filename, lines):
yield (filename,) + error
def get_dep_toml_files(only_changed_files=False):
if not only_changed_files:
print '\nRunning the dependency licensing lint...'
for root, directories, filenames in os.walk(".cargo"):
for filename in filenames:
if filename == "Cargo.toml":
yield os.path.join(root, filename)
def check_dep_license_errors(filenames, progress=True):
filenames = progress_wrapper(filenames) if progress else filenames
for filename in filenames:
with open(filename, "r") as f:
ok_licensed = False
lines = f.readlines()
for idx, line in enumerate(lines):
for license_line in licenses_dep_toml:
ok_licensed |= (license_line in line)
if not ok_licensed:
yield (filename, 0, "dependency should contain a valid license.")
class LintRunner(object):
def __init__(self, lint_path=None, only_changed_files=True, exclude_dirs=[], progress=True, stylo=False):
self.only_changed_files = only_changed_files
self.exclude_dirs = exclude_dirs
self.progress = progress
self.path = lint_path
self.stylo = stylo
def check(self):
if not os.path.exists(self.path):
yield (self.path, 0, "file does not exist")
return
if not self.path.endswith('.py'):
yield (self.path, 0, "lint should be a python script")
return
dir_name, filename = os.path.split(self.path)
sys.path.append(dir_name)
module = imp.load_source(filename[:-3], self.path)
sys.path.remove(dir_name)
if not hasattr(module, 'Lint'):
yield (self.path, 1, "script should contain a class named 'Lint'")
return
if not issubclass(module.Lint, LintRunner):
yield (self.path, 1, "class 'Lint' should inherit from 'LintRunner'")
return
lint = module.Lint(self.path, self.only_changed_files,
self.exclude_dirs, self.progress, stylo=self.stylo)
for error in lint.run():
if type(error) is not tuple or (type(error) is tuple and len(error) != 3):
yield (self.path, 1, "errors should be a tuple of (path, line, reason)")
return
yield error
def get_files(self, path, **kwargs):
args = ['only_changed_files', 'exclude_dirs', 'progress']
kwargs = {k: kwargs.get(k, getattr(self, k)) for k in args}
return FileList(path, **kwargs)
def run(self):
yield (self.path, 0, "class 'Lint' should implement 'run' method")
def run_lint_scripts(only_changed_files=False, progress=True, stylo=False):
runner = LintRunner(only_changed_files=only_changed_files, progress=progress, stylo=stylo)
for path in config['lint-scripts']:
runner.path = path
for error in runner.check():
yield error
def scan(only_changed_files=False, progress=True, stylo=False):
# check config file for errors
config_errors = check_config_file(CONFIG_FILE_PATH)
# check directories contain expected files
directory_errors = check_directory_files(config['check_ext'])
# standard checks
files_to_check = filter_files('.', only_changed_files and not stylo, progress)
checking_functions = (check_flake8, check_lock, check_webidl_spec, check_json, check_yaml)
line_checking_functions = (check_license, check_by_line, check_toml, check_shell,
check_rust, check_spec, check_modeline)
file_errors = collect_errors_for_files(files_to_check, checking_functions, line_checking_functions)
# check dependecy licenses
dep_license_errors = check_dep_license_errors(get_dep_toml_files(only_changed_files), progress)
# other lint checks
lint_errors = run_lint_scripts(only_changed_files, progress, stylo=stylo)
# chain all the iterators
errors = itertools.chain(config_errors, directory_errors, lint_errors,
file_errors, dep_license_errors)
error = None
for error in errors:
colorama.init()
print "\r\033[94m{}\033[0m:\033[93m{}\033[0m: \033[91m{}\033[0m".format(*error)
print
if error is None:
colorama.init()
print "\033[92mtidy reported no errors.\033[0m"
return int(error is not None)
|
from __future__ import absolute_import, print_function, unicode_literals
from ctypes import c_uint64, c_char_p
import sys
from .utils import CxxPointer, _call_with_growing_buffer
from .frame import Frame, Topology
from .misc import ChemfilesError
if sys.hexversion >= 0x03000000:
unicode_string = str
bytes_string = bytes
else:
unicode_string = unicode # noqa
bytes_string = str
class BaseTrajectory(CxxPointer):
def __init__(self, ptr):
self.__closed = False
super(BaseTrajectory, self).__init__(ptr, is_const=False)
def __check_opened(self):
if self.__closed:
raise ChemfilesError("Can not use a closed Trajectory")
def __del__(self):
if not self.__closed:
self.close()
def __enter__(self):
self.__check_opened()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
self.__check_opened()
for step in range(self.nsteps):
yield self.read_step(step)
def read(self):
"""
Read the next step of this :py:class:`Trajectory` and return the
corresponding :py:class:`Frame`.
"""
self.__check_opened()
frame = Frame()
self.ffi.chfl_trajectory_read(self.mut_ptr, frame.mut_ptr)
return frame
def read_step(self, step):
"""
Read a specific ``step`` in this :py:class:`Trajectory` and return the
corresponding :py:class:`Frame`.
"""
self.__check_opened()
frame = Frame()
self.ffi.chfl_trajectory_read_step(self.mut_ptr, c_uint64(step), frame.mut_ptr)
return frame
def write(self, frame):
"""Write a :py:class:`Frame` to this :py:class:`Trajectory`."""
self.__check_opened()
self.ffi.chfl_trajectory_write(self.mut_ptr, frame.ptr)
def set_topology(self, topology, format=""):
"""
Set the :py:class:`Topology` associated with this :py:class:`Trajectory`.
The new topology will be used when reading and writing the files,
replacing any topology in the frames or files.
If the ``topology`` parameter is a :py:class:`Topology` instance, it is
used directly. If the ``topology`` parameter is a string, the first
:py:class:`Frame` of the corresponding file is read, and the topology of
this frame is used.
When reading from a file, if ``format`` is not the empty string, it is
used as the file format instead of guessing it from the file extension.
"""
self.__check_opened()
if isinstance(topology, Topology):
self.ffi.chfl_trajectory_set_topology(self.mut_ptr, topology.ptr)
else:
self.ffi.chfl_trajectory_topology_file(
self.mut_ptr, topology.encode("utf8"), format.encode("utf8")
)
def set_cell(self, cell):
"""
Set the :py:class:`UnitCell` associated with this :py:class:`Trajectory`
to a copy of ``cell``.
This :py:class:`UnitCell` will be used when reading and writing the
files, replacing any unit cell in the frames or files.
"""
self.__check_opened()
self.ffi.chfl_trajectory_set_cell(self.mut_ptr, cell.ptr)
@property
def nsteps(self):
"""Get the current number of steps in this :py:class:`Trajectory`."""
self.__check_opened()
nsteps = c_uint64()
self.ffi.chfl_trajectory_nsteps(self.mut_ptr, nsteps)
return nsteps.value
@property
def path(self):
"""Get the path used to open this :py:class:`Trajectory`."""
self.__check_opened()
return _call_with_growing_buffer(
lambda buffer, size: self.ffi.chfl_trajectory_path(self.ptr, buffer, size),
initial=256,
)
def close(self):
"""
Close this :py:class:`Trajectory` and write any buffered content to the
file.
"""
self.__check_opened()
self.__closed = True
self.ffi.chfl_trajectory_close(self.ptr)
class Trajectory(BaseTrajectory):
"""
A :py:class:`Trajectory` represent a physical file from which we can read
:py:class:`Frame`.
"""
def __init__(self, path, mode="r", format=""):
"""
Open the file at the given ``path`` using the given ``mode`` and
optional file ``format``.
Valid modes are ``'r'`` for read, ``'w'`` for write and ``'a'`` for
append.
The ``format`` parameter is needed when the file format does not match
the extension, or when there is not standard extension for this format.
If `format` is an empty string, the format will be guessed from the
file extension.
"""
ptr = self.ffi.chfl_trajectory_with_format(
path.encode("utf8"), mode.encode("utf8"), format.encode("utf8")
)
# Store mode and format for __repr__
self.__mode = mode
self.__format = format
super(Trajectory, self).__init__(ptr)
def __repr__(self):
return "Trajectory('{}', '{}', '{}')".format(
self.path, self.__mode, self.__format
)
class MemoryTrajectory(BaseTrajectory):
"""
A :py:class:`MemoryTrajectory` allow to read/write in-memory data as though
it was a formatted file.
"""
def __init__(self, data="", mode="r", format=""):
"""
The ``format`` parameter is always required.
When reading (``mode`` is ``'r'``), the ``data`` parameter will be used
as the formatted file.
When writing (``mode`` is ``'w'``), the ``data`` parameter is ignored.
To get the memory buffer containing everything already written, use the
:py:func:`buffer` function.
"""
if not format:
raise ChemfilesError(
"'format' is required when creating a MemoryTrajectory"
)
if mode == "r":
if isinstance(data, unicode_string):
data = data.encode("utf8")
elif not isinstance(data, bytes_string):
raise ChemfilesError("the 'data' parameter must be a string")
ptr = self.ffi.chfl_trajectory_memory_reader(
data, len(data), format.encode("utf8")
)
elif mode == "w":
ptr = self.ffi.chfl_trajectory_memory_writer(format.encode("utf8"))
else:
raise ChemfilesError(
"invalid mode '{}' passed to MemoryTrajectory".format(mode)
)
super(MemoryTrajectory, self).__init__(ptr)
def __repr__(self):
return "MemoryTrajectory({}', '{}')".format(self.__mode, self.__format)
def buffer(self):
"""
Get the data written to this in-memory trajectory. This is not valid to
call when reading in-memory data.
"""
buffer = c_char_p()
size = c_uint64()
self.ffi.chfl_trajectory_memory_buffer(self.ptr, buffer, size)
return buffer.value
|
from errno import ENOENT as NO_SUCH_FILE_OR_DIRECTORY
from glob import glob
import shutil
import gzip
import itertools
import locale
import os
from os import path
import platform
import re
import contextlib
import subprocess
from subprocess import PIPE
import sys
import tarfile
import zipfile
from xml.etree.ElementTree import XML
from servo.util import download_file
import urllib2
from bootstrap import check_gstreamer_lib
from mach.decorators import CommandArgument
from mach.registrar import Registrar
import toml
from servo.packages import WINDOWS_MSVC as msvc_deps
from servo.util import host_triple
BIN_SUFFIX = ".exe" if sys.platform == "win32" else ""
NIGHTLY_REPOSITORY_URL = "https://servo-builds.s3.amazonaws.com/"
@contextlib.contextmanager
def cd(new_path):
"""Context manager for changing the current working directory"""
previous_path = os.getcwd()
try:
os.chdir(new_path)
yield
finally:
os.chdir(previous_path)
@contextlib.contextmanager
def setlocale(name):
"""Context manager for changing the current locale"""
saved_locale = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, saved_locale)
def find_dep_path_newest(package, bin_path):
deps_path = path.join(path.split(bin_path)[0], "build")
candidates = []
with cd(deps_path):
for c in glob(package + '-*'):
candidate_path = path.join(deps_path, c)
if path.exists(path.join(candidate_path, "output")):
candidates.append(candidate_path)
if candidates:
return max(candidates, key=lambda c: path.getmtime(path.join(c, "output")))
return None
def archive_deterministically(dir_to_archive, dest_archive, prepend_path=None):
"""Create a .tar.gz archive in a deterministic (reproducible) manner.
See https://reproducible-builds.org/docs/archives/ for more details."""
def reset(tarinfo):
"""Helper to reset owner/group and modification time for tar entries"""
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = "root"
tarinfo.mtime = 0
return tarinfo
dest_archive = os.path.abspath(dest_archive)
with cd(dir_to_archive):
current_dir = "."
file_list = []
for root, dirs, files in os.walk(current_dir):
if dest_archive.endswith(".zip"):
for f in files:
file_list.append(os.path.join(root, f))
else:
for name in itertools.chain(dirs, files):
file_list.append(os.path.join(root, name))
# Sort file entries with the fixed locale
with setlocale('C'):
file_list.sort(cmp=locale.strcoll)
# Use a temporary file and atomic rename to avoid partially-formed
# packaging (in case of exceptional situations like running out of disk space).
# TODO do this in a temporary folder after #11983 is fixed
temp_file = '{}.temp~'.format(dest_archive)
with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0644), 'w') as out_file:
if dest_archive.endswith('.zip'):
with zipfile.ZipFile(temp_file, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for entry in file_list:
arcname = entry
if prepend_path is not None:
arcname = os.path.normpath(os.path.join(prepend_path, arcname))
zip_file.write(entry, arcname=arcname)
else:
with gzip.GzipFile('wb', fileobj=out_file, mtime=0) as gzip_file:
with tarfile.open(fileobj=gzip_file, mode='w:') as tar_file:
for entry in file_list:
arcname = entry
if prepend_path is not None:
arcname = os.path.normpath(os.path.join(prepend_path, arcname))
tar_file.add(entry, filter=reset, recursive=False, arcname=arcname)
os.rename(temp_file, dest_archive)
def normalize_env(env):
# There is a bug in subprocess where it doesn't like unicode types in
# environment variables. Here, ensure all unicode are converted to
# binary. utf-8 is our globally assumed default. If the caller doesn't
# want UTF-8, they shouldn't pass in a unicode instance.
normalized_env = {}
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('utf-8', 'strict')
if isinstance(v, unicode):
v = v.encode('utf-8', 'strict')
normalized_env[k] = v
return normalized_env
def call(*args, **kwargs):
"""Wrap `subprocess.call`, printing the command if verbose=True."""
verbose = kwargs.pop('verbose', False)
if verbose:
print(' '.join(args[0]))
if 'env' in kwargs:
kwargs['env'] = normalize_env(kwargs['env'])
# we have to use shell=True in order to get PATH handling
# when looking for the binary on Windows
return subprocess.call(*args, shell=sys.platform == 'win32', **kwargs)
def check_output(*args, **kwargs):
"""Wrap `subprocess.call`, printing the command if verbose=True."""
verbose = kwargs.pop('verbose', False)
if verbose:
print(' '.join(args[0]))
if 'env' in kwargs:
kwargs['env'] = normalize_env(kwargs['env'])
# we have to use shell=True in order to get PATH handling
# when looking for the binary on Windows
return subprocess.check_output(*args, shell=sys.platform == 'win32', **kwargs)
def check_call(*args, **kwargs):
"""Wrap `subprocess.check_call`, printing the command if verbose=True.
Also fix any unicode-containing `env`, for subprocess """
verbose = kwargs.pop('verbose', False)
if 'env' in kwargs:
kwargs['env'] = normalize_env(kwargs['env'])
if verbose:
print(' '.join(args[0]))
# we have to use shell=True in order to get PATH handling
# when looking for the binary on Windows
proc = subprocess.Popen(*args, shell=sys.platform == 'win32', **kwargs)
status = None
# Leave it to the subprocess to handle Ctrl+C. If it terminates as
# a result of Ctrl+C, proc.wait() will return a status code, and,
# we get out of the loop. If it doesn't, like e.g. gdb, we continue
# waiting.
while status is None:
try:
status = proc.wait()
except KeyboardInterrupt:
pass
if status:
raise subprocess.CalledProcessError(status, ' '.join(*args))
def is_windows():
return sys.platform == 'win32'
def is_macosx():
return sys.platform == 'darwin'
def is_linux():
return sys.platform.startswith('linux')
def append_to_path_env(string, env, name):
variable = ""
if name in env:
variable = env[name]
if len(variable) > 0:
variable += os.pathsep
variable += string
env[name] = variable
def set_osmesa_env(bin_path, env):
"""Set proper LD_LIBRARY_PATH and DRIVE for software rendering on Linux and OSX"""
if is_linux():
dep_path = find_dep_path_newest('osmesa-src', bin_path)
if not dep_path:
return None
osmesa_path = path.join(dep_path, "out", "lib", "gallium")
append_to_path_env(osmesa_path, env, "LD_LIBRARY_PATH")
env["GALLIUM_DRIVER"] = "softpipe"
elif is_macosx():
osmesa_dep_path = find_dep_path_newest('osmesa-src', bin_path)
if not osmesa_dep_path:
return None
osmesa_path = path.join(osmesa_dep_path,
"out", "src", "gallium", "targets", "osmesa", ".libs")
glapi_path = path.join(osmesa_dep_path,
"out", "src", "mapi", "shared-glapi", ".libs")
append_to_path_env(osmesa_path + ":" + glapi_path, env, "DYLD_LIBRARY_PATH")
env["GALLIUM_DRIVER"] = "softpipe"
return env
def gstreamer_root(target, env, topdir=None):
if is_windows():
arch = {
"x86_64": "X86_64",
"x86": "X86",
"aarch64": "ARM64",
}
gst_x64 = arch[target.split('-')[0]]
gst_default_path = path.join("C:\\gstreamer\\1.0", gst_x64)
gst_env = "GSTREAMER_1_0_ROOT_" + gst_x64
if env.get(gst_env) is not None:
return env.get(gst_env)
elif os.path.exists(path.join(gst_default_path, "bin", "ffi-7.dll")):
return gst_default_path
elif sys.platform == "linux2":
return path.join(topdir, "support", "linux", "gstreamer", "gst")
return None
class BuildNotFound(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class CommandBase(object):
"""Base class for mach command providers.
This mostly handles configuration management, such as .servobuild."""
def __init__(self, context):
self.context = context
def get_env_bool(var, default):
# Contents of env vars are strings by default. This returns the
# boolean value of the specified environment variable, or the
# speciried default if the var doesn't contain True or False
return {'True': True, 'False': False}.get(os.environ.get(var), default)
def resolverelative(category, key):
# Allow ~
self.config[category][key] = path.expanduser(self.config[category][key])
# Resolve relative paths
self.config[category][key] = path.join(context.topdir,
self.config[category][key])
if not hasattr(self.context, "bootstrapped"):
self.context.bootstrapped = False
config_path = path.join(context.topdir, ".servobuild")
if path.exists(config_path):
with open(config_path) as f:
self.config = toml.loads(f.read())
else:
self.config = {}
# Handle missing/default items
self.config.setdefault("tools", {})
default_cache_dir = os.environ.get("SERVO_CACHE_DIR",
path.join(context.topdir, ".servo"))
self.config["tools"].setdefault("cache-dir", default_cache_dir)
resolverelative("tools", "cache-dir")
default_cargo_home = os.environ.get("CARGO_HOME",
path.join(context.topdir, ".cargo"))
self.config["tools"].setdefault("cargo-home-dir", default_cargo_home)
resolverelative("tools", "cargo-home-dir")
context.sharedir = self.config["tools"]["cache-dir"]
self.config["tools"].setdefault("use-rustup", True)
self.config["tools"].setdefault("rustc-with-gold", get_env_bool("SERVO_RUSTC_WITH_GOLD", True))
self.config.setdefault("build", {})
self.config["build"].setdefault("android", False)
self.config["build"].setdefault("mode", "")
self.config["build"].setdefault("debug-mozjs", False)
self.config["build"].setdefault("ccache", "")
self.config["build"].setdefault("rustflags", "")
self.config["build"].setdefault("incremental", None)
self.config["build"].setdefault("thinlto", False)
self.config["build"].setdefault("webgl-backtrace", False)
self.config["build"].setdefault("dom-backtrace", False)
self.config.setdefault("android", {})
self.config["android"].setdefault("sdk", "")
self.config["android"].setdefault("ndk", "")
self.config["android"].setdefault("toolchain", "")
# Set default android target
self.handle_android_target("armv7-linux-androideabi")
_default_toolchain = None
def toolchain(self):
return self.default_toolchain()
def default_toolchain(self):
if self._default_toolchain is None:
filename = path.join(self.context.topdir, "rust-toolchain")
with open(filename) as f:
self._default_toolchain = f.read().strip()
return self._default_toolchain
def call_rustup_run(self, args, **kwargs):
if self.config["tools"]["use-rustup"]:
try:
version_line = subprocess.check_output(["rustup" + BIN_SUFFIX, "--version"])
except OSError as e:
if e.errno == NO_SUCH_FILE_OR_DIRECTORY:
print "It looks like rustup is not installed. See instructions at " \
"https://github.com/servo/servo/#setting-up-your-environment"
print
return 1
raise
version = tuple(map(int, re.match("rustup (\d+)\.(\d+)\.(\d+)", version_line).groups()))
if version < (1, 11, 0):
print "rustup is at version %s.%s.%s, Servo requires 1.11.0 or more recent." % version
print "Try running 'rustup self update'."
return 1
toolchain = self.toolchain()
if platform.system() == "Windows":
toolchain += "-x86_64-pc-windows-msvc"
args = ["rustup" + BIN_SUFFIX, "run", "--install", toolchain] + args
else:
args[0] += BIN_SUFFIX
return call(args, **kwargs)
def get_top_dir(self):
return self.context.topdir
def get_target_dir(self):
if "CARGO_TARGET_DIR" in os.environ:
return os.environ["CARGO_TARGET_DIR"]
else:
return path.join(self.context.topdir, "target")
def get_apk_path(self, release):
base_path = self.get_target_dir()
base_path = path.join(base_path, "android", self.config["android"]["target"])
apk_name = "servoapp.apk"
build_type = "release" if release else "debug"
return path.join(base_path, build_type, apk_name)
def get_binary_path(self, release, dev, target=None, android=False, magicleap=False, simpleservo=False):
# TODO(autrilla): this function could still use work - it shouldn't
# handle quitting, or printing. It should return the path, or an error.
base_path = self.get_target_dir()
binary_name = "servo" + BIN_SUFFIX
if magicleap:
base_path = path.join(base_path, "magicleap", "aarch64-linux-android")
binary_name = "libmlservo.a"
elif android:
base_path = path.join(base_path, "android", self.config["android"]["target"])
simpleservo = True
elif target:
base_path = path.join(base_path, target)
if simpleservo:
binary_name = "simpleservo.dll" if sys.platform == "win32" else "libsimpleservo.so"
release_path = path.join(base_path, "release", binary_name)
dev_path = path.join(base_path, "debug", binary_name)
# Prefer release if both given
if release and dev:
dev = False
release_exists = path.exists(release_path)
dev_exists = path.exists(dev_path)
if not release_exists and not dev_exists:
raise BuildNotFound('No Servo binary found.'
' Perhaps you forgot to run `./mach build`?')
if release and release_exists:
return release_path
if dev and dev_exists:
return dev_path
if not dev and not release and release_exists and dev_exists:
print("You have multiple profiles built. Please specify which "
"one to run with '--release' or '--dev'.")
sys.exit()
if not dev and not release:
if release_exists:
return release_path
else:
return dev_path
print("The %s profile is not built. Please run './mach build%s' "
"and try again." % ("release" if release else "dev",
" --release" if release else ""))
sys.exit()
def detach_volume(self, mounted_volume):
print("Detaching volume {}".format(mounted_volume))
try:
subprocess.check_call(['hdiutil', 'detach', mounted_volume])
except subprocess.CalledProcessError as e:
print("Could not detach volume {} : {}".format(mounted_volume, e.returncode))
sys.exit(1)
def detach_volume_if_attached(self, mounted_volume):
if os.path.exists(mounted_volume):
self.detach_volume(mounted_volume)
def mount_dmg(self, dmg_path):
print("Mounting dmg {}".format(dmg_path))
try:
subprocess.check_call(['hdiutil', 'attach', dmg_path])
except subprocess.CalledProcessError as e:
print("Could not mount Servo dmg : {}".format(e.returncode))
sys.exit(1)
def extract_nightly(self, nightlies_folder, destination_folder, destination_file):
print("Extracting to {} ...".format(destination_folder))
if is_macosx():
mounted_volume = path.join(path.sep, "Volumes", "Servo")
self.detach_volume_if_attached(mounted_volume)
self.mount_dmg(destination_file)
# Servo folder is always this one
servo_directory = path.join(path.sep, "Volumes", "Servo", "Servo.app", "Contents", "MacOS")
print("Copying files from {} to {}".format(servo_directory, destination_folder))
shutil.copytree(servo_directory, destination_folder)
self.detach_volume(mounted_volume)
else:
if is_windows():
command = 'msiexec /a {} /qn TARGETDIR={}'.format(
os.path.join(nightlies_folder, destination_file), destination_folder)
if subprocess.call(command, stdout=PIPE, stderr=PIPE) != 0:
print("Could not extract the nightly executable from the msi package.")
sys.exit(1)
else:
with tarfile.open(os.path.join(nightlies_folder, destination_file), "r") as tar:
tar.extractall(destination_folder)
def get_executable(self, destination_folder):
if is_windows():
return path.join(destination_folder, "PFiles", "Mozilla research", "Servo Tech Demo")
if is_linux:
return path.join(destination_folder, "servo", "servo")
return path.join(destination_folder, "servo")
def get_nightly_binary_path(self, nightly_date):
if nightly_date is None:
return
if not nightly_date:
print(
"No nightly date has been provided although the --nightly or -n flag has been passed.")
sys.exit(1)
# Will alow us to fetch the relevant builds from the nightly repository
os_prefix = "linux"
if is_windows():
os_prefix = "windows-msvc"
if is_macosx():
os_prefix = "mac"
nightly_date = nightly_date.strip()
# Fetch the filename to download from the build list
repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly"
req = urllib2.Request(
"{}/{}/{}".format(repository_index, os_prefix, nightly_date))
try:
response = urllib2.urlopen(req).read()
tree = XML(response)
namespaces = {'ns': tree.tag[1:tree.tag.index('}')]}
file_to_download = tree.find('ns:Contents', namespaces).find(
'ns:Key', namespaces).text
except urllib2.URLError as e:
print("Could not fetch the available nightly versions from the repository : {}".format(
e.reason))
sys.exit(1)
except AttributeError as e:
print("Could not fetch a nightly version for date {} and platform {}".format(
nightly_date, os_prefix))
sys.exit(1)
nightly_target_directory = path.join(self.context.topdir, "target")
# ':' is not an authorized character for a file name on Windows
# make sure the OS specific separator is used
target_file_path = file_to_download.replace(':', '-').split('/')
destination_file = os.path.join(
nightly_target_directory, os.path.join(*target_file_path))
# Once extracted, the nightly folder name is the tar name without the extension
# (eg /foo/bar/baz.tar.gz extracts to /foo/bar/baz)
destination_folder = os.path.splitext(destination_file)[0]
nightlies_folder = path.join(
nightly_target_directory, 'nightly', os_prefix)
# Make sure the target directory exists
if not os.path.isdir(nightlies_folder):
print("The nightly folder for the target does not exist yet. Creating {}".format(
nightlies_folder))
os.makedirs(nightlies_folder)
# Download the nightly version
if os.path.isfile(path.join(nightlies_folder, destination_file)):
print("The nightly file {} has already been downloaded.".format(
destination_file))
else:
print("The nightly {} does not exist yet, downloading it.".format(
destination_file))
download_file(destination_file, NIGHTLY_REPOSITORY_URL +
file_to_download, destination_file)
# Extract the downloaded nightly version
if os.path.isdir(destination_folder):
print("The nightly folder {} has already been extracted.".format(
destination_folder))
else:
self.extract_nightly(nightlies_folder, destination_folder, destination_file)
return self.get_executable(destination_folder)
def needs_gstreamer_env(self, target, env):
try:
if check_gstreamer_lib():
return False
except:
# Some systems don't have pkg-config; we can't probe in this case
# and must hope for the best
return False
effective_target = target or host_triple()
if "x86_64" not in effective_target or "android" in effective_target:
# We don't build gstreamer for non-x86_64 / android yet
return False
if sys.platform == "linux2" or is_windows():
if path.isdir(gstreamer_root(effective_target, env, self.get_top_dir())):
return True
else:
raise Exception("Your system's gstreamer libraries are out of date \
(we need at least 1.12). Please run ./mach bootstrap-gstreamer")
else:
raise Exception("Your system's gstreamer libraries are out of date \
(we need at least 1.12). If you're unable to \
install them, let us know by filing a bug!")
return False
def set_run_env(self, android=False):
"""Some commands, like test-wpt, don't use a full build env,
but may still need dynamic search paths. This command sets that up"""
if not android and self.needs_gstreamer_env(None, os.environ):
gstpath = gstreamer_root(host_triple(), os.environ, self.get_top_dir())
if gstpath is None:
return
os.environ["LD_LIBRARY_PATH"] = path.join(gstpath, "lib")
os.environ["GST_PLUGIN_SYSTEM_PATH"] = path.join(gstpath, "lib", "gstreamer-1.0")
os.environ["PKG_CONFIG_PATH"] = path.join(gstpath, "lib", "pkgconfig")
os.environ["GST_PLUGIN_SCANNER"] = path.join(gstpath, "libexec", "gstreamer-1.0", "gst-plugin-scanner")
def msvc_package_dir(self, package):
return path.join(self.context.sharedir, "msvc-dependencies", package, msvc_deps[package])
def build_env(self, hosts_file_path=None, target=None, is_build=False, test_unit=False):
"""Return an extended environment dictionary."""
env = os.environ.copy()
if sys.platform == "win32" and type(env['PATH']) == unicode:
# On win32, the virtualenv's activate_this.py script sometimes ends up
# turning os.environ['PATH'] into a unicode string. This doesn't work
# for passing env vars in to a process, so we force it back to ascii.
# We don't use UTF8 since that won't be correct anyway; if you actually
# have unicode stuff in your path, all this PATH munging would have broken
# it in any case.
env['PATH'] = env['PATH'].encode('ascii', 'ignore')
extra_path = []
extra_lib = []
if "msvc" in (target or host_triple()):
extra_path += [path.join(self.msvc_package_dir("cmake"), "bin")]
extra_path += [path.join(self.msvc_package_dir("llvm"), "bin")]
extra_path += [path.join(self.msvc_package_dir("ninja"), "bin")]
extra_path += [self.msvc_package_dir("nuget")]
arch = (target or host_triple()).split('-')[0]
vcpkg_arch = {
"x86_64": "x64-windows",
"i686": "x86-windows",
"aarch64": "arm64-windows",
}
openssl_base_dir = path.join(self.msvc_package_dir("openssl"), vcpkg_arch[arch])
# Link openssl
env["OPENSSL_INCLUDE_DIR"] = path.join(openssl_base_dir, "include")
env["OPENSSL_LIB_DIR"] = path.join(openssl_base_dir, "lib")
env["OPENSSL_LIBS"] = "libssl:libcrypto"
# Link moztools, used for building SpiderMonkey
env["MOZTOOLS_PATH"] = os.pathsep.join([
path.join(self.msvc_package_dir("moztools"), "bin"),
path.join(self.msvc_package_dir("moztools"), "msys", "bin"),
])
# Link autoconf 2.13, used for building SpiderMonkey
env["AUTOCONF"] = path.join(self.msvc_package_dir("moztools"), "msys", "local", "bin", "autoconf-2.13")
# Link LLVM
env["LIBCLANG_PATH"] = path.join(self.msvc_package_dir("llvm"), "lib")
if not os.environ.get("NATIVE_WIN32_PYTHON"):
env["NATIVE_WIN32_PYTHON"] = sys.executable
# Always build harfbuzz from source
env["HARFBUZZ_SYS_NO_PKG_CONFIG"] = "true"
if self.needs_gstreamer_env(target or host_triple(), env):
gstpath = gstreamer_root(target or host_triple(), env, self.get_top_dir())
extra_path += [path.join(gstpath, "bin")]
libpath = path.join(gstpath, "lib")
# we append in the reverse order so that system gstreamer libraries
# do not get precedence
extra_path = [libpath] + extra_path
extra_lib = [libpath] + extra_path
append_to_path_env(path.join(libpath, "pkgconfig"), env, "PKG_CONFIG_PATH")
if sys.platform == "linux2":
distro, version, _ = platform.linux_distribution()
if distro == "Ubuntu" and (version == "16.04" or version == "14.04"):
env["HARFBUZZ_SYS_NO_PKG_CONFIG"] = "true"
if extra_path:
append_to_path_env(os.pathsep.join(extra_path), env, "PATH")
if self.config["build"]["incremental"]:
env["CARGO_INCREMENTAL"] = "1"
elif self.config["build"]["incremental"] is not None:
env["CARGO_INCREMENTAL"] = "0"
if extra_lib:
path_var = "DYLD_LIBRARY_PATH" if sys.platform == "darwin" else "LD_LIBRARY_PATH"
append_to_path_env(os.pathsep.join(extra_lib), env, path_var)
# Paths to Android build tools:
if self.config["android"]["sdk"]:
env["ANDROID_SDK"] = self.config["android"]["sdk"]
if self.config["android"]["ndk"]:
env["ANDROID_NDK"] = self.config["android"]["ndk"]
if self.config["android"]["toolchain"]:
env["ANDROID_TOOLCHAIN"] = self.config["android"]["toolchain"]
if self.config["android"]["platform"]:
env["ANDROID_PLATFORM"] = self.config["android"]["platform"]
toolchains = path.join(self.context.topdir, "android-toolchains")
for kind in ["sdk", "ndk"]:
default = os.path.join(toolchains, kind)
if os.path.isdir(default):
env.setdefault("ANDROID_" + kind.upper(), default)
tools = os.path.join(toolchains, "sdk", "platform-tools")
if os.path.isdir(tools):
env["PATH"] = "%s%s%s" % (tools, os.pathsep, env["PATH"])
# These are set because they are the variable names that build-apk
# expects. However, other submodules have makefiles that reference
# the env var names above. Once glutin is enabled and set as the
# default, we could modify the subproject makefiles to use the names
# below and remove the vars above, to avoid duplication.
if "ANDROID_SDK" in env:
env["ANDROID_HOME"] = env["ANDROID_SDK"]
if "ANDROID_NDK" in env:
env["NDK_HOME"] = env["ANDROID_NDK"]
if "ANDROID_TOOLCHAIN" in env:
env["NDK_STANDALONE"] = env["ANDROID_TOOLCHAIN"]
if hosts_file_path:
env['HOST_FILE'] = hosts_file_path
if not test_unit:
# This wrapper script is in bash and doesn't work on Windows
# where we want to run doctests as part of `./mach test-unit`
env['RUSTDOC'] = path.join(self.context.topdir, 'etc', 'rustdoc-with-private')
if self.config["build"]["rustflags"]:
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " " + self.config["build"]["rustflags"]
# Don't run the gold linker if on Windows https://github.com/servo/servo/issues/9499
if self.config["tools"]["rustc-with-gold"] and sys.platform != "win32":
if subprocess.call(['which', 'ld.gold'], stdout=PIPE, stderr=PIPE) == 0:
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C link-args=-fuse-ld=gold"
if not (self.config["build"]["ccache"] == ""):
env['CCACHE'] = self.config["build"]["ccache"]
# Ensure Rust uses hard floats and SIMD on ARM devices
if target:
if target.startswith('arm') or target.startswith('aarch64'):
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C target-feature=+neon"
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -W unused-extern-crates"
git_info = []
if os.path.isdir('.git') and is_build:
git_sha = subprocess.check_output([
'git', 'rev-parse', '--short', 'HEAD'
]).strip()
git_is_dirty = bool(subprocess.check_output([
'git', 'status', '--porcelain'
]).strip())
git_info.append('')
git_info.append(git_sha)
if git_is_dirty:
git_info.append('dirty')
env['GIT_INFO'] = '-'.join(git_info)
if self.config["build"]["thinlto"]:
env['RUSTFLAGS'] += " -Z thinlto"
return env
@staticmethod
def build_like_command_arguments(decorated_function):
decorators = [
CommandArgument(
'--target', '-t',
default=None,
help='Cross compile for given target platform',
),
CommandArgument(
'--android',
default=None,
action='store_true',
help='Build for Android',
),
CommandArgument(
'--magicleap',
default=None,
action='store_true',
help='Build for Magic Leap',
),
CommandArgument(
'--libsimpleservo',
default=None,
action='store_true',
help='Build the libsimpleservo library instead of the servo executable',
),
CommandArgument(
'--features',
default=None,
help='Space-separated list of features to also build',
nargs='+',
),
CommandArgument(
'--debug-mozjs',
default=None,
action='store_true',
help='Enable debug assertions in mozjs',
),
CommandArgument(
'--with-debug-assertions',
default=None,
action='store_true',
help='Enable debug assertions in release',
),
CommandArgument(
'--with-frame-pointer',
default=None,
action='store_true',
help='Build with frame pointer enabled, used by the background hang monitor.',
),
CommandArgument(
'--uwp',
default=None,
action='store_true',
help='Build for HoloLens (x64)'),
CommandArgument('--with-raqote', default=None, action='store_true'),
CommandArgument('--with-layout-2020', default=None, action='store_true'),
CommandArgument('--without-wgl', default=None, action='store_true'),
]
for decorator in decorators:
decorated_function = decorator(decorated_function)
return decorated_function
def pick_target_triple(self, target, android, magicleap):
if android is None:
android = self.config["build"]["android"]
if target and android:
assert self.handle_android_target(target)
if android and not target:
target = self.config["android"]["target"]
if magicleap and not target:
target = "aarch64-linux-android"
if target and not android and not magicleap:
android = self.handle_android_target(target)
return target, android
def run_cargo_build_like_command(
self, command, cargo_args,
env=None, verbose=False,
target=None, android=False, magicleap=False, libsimpleservo=False,
features=None, debug_mozjs=False, with_debug_assertions=False,
with_frame_pointer=False, with_raqote=False, with_layout_2020=False, without_wgl=False,
uwp=False,
):
env = env or self.build_env()
target, android = self.pick_target_triple(target, android, magicleap)
args = []
if "--manifest-path" not in args:
if libsimpleservo or android:
if android:
api = "jniapi"
else:
api = "capi"
port = path.join("libsimpleservo", api)
else:
port = "glutin"
args += [
"--manifest-path",
path.join(self.context.topdir, "ports", port, "Cargo.toml"),
]
if target:
args += ["--target", target]
if features is None: # If we're passed a list, mutate it even if it's empty
features = []
if self.config["build"]["debug-mozjs"] or debug_mozjs:
features.append("debugmozjs")
if not magicleap:
features.append("native-bluetooth")
if uwp:
features.append("canvas2d-raqote")
features.append("no_wgl")
features.append("uwp")
else:
# Non-UWP builds provide their own libEGL via mozangle.
features.append("egl")
if with_raqote and "canvas2d-azure" not in features:
features.append("canvas2d-raqote")
elif "canvas2d-raqote" not in features:
features.append("canvas2d-azure")
if with_layout_2020 and "layout-2013" not in features:
features.append("layout-2020")
elif "layout-2020" not in features:
features.append("layout-2013")
if with_frame_pointer:
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C force-frame-pointers=yes"
features.append("profilemozjs")
if without_wgl:
features.append("no_wgl")
if self.config["build"]["webgl-backtrace"]:
features.append("webgl-backtrace")
if self.config["build"]["dom-backtrace"]:
features.append("dom-backtrace")
if with_debug_assertions:
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C debug_assertions"
assert "--features" not in cargo_args
args += ["--features", " ".join(features)]
return self.call_rustup_run(["cargo", command] + args + cargo_args, env=env, verbose=verbose)
def android_support_dir(self):
return path.join(self.context.topdir, "support", "android")
def android_aar_dir(self):
return path.join(self.context.topdir, "target", "android", "aar")
def android_adb_path(self, env):
if "ANDROID_SDK" in env:
sdk_adb = path.join(env["ANDROID_SDK"], "platform-tools", "adb")
if path.exists(sdk_adb):
return sdk_adb
return "adb"
def android_emulator_path(self, env):
if "ANDROID_SDK" in env:
sdk_adb = path.join(env["ANDROID_SDK"], "emulator", "emulator")
if path.exists(sdk_adb):
return sdk_adb
return "emulator"
def handle_android_target(self, target):
if target == "armv7-linux-androideabi":
self.config["android"]["platform"] = "android-21"
self.config["android"]["target"] = target
self.config["android"]["toolchain_prefix"] = "arm-linux-androideabi"
self.config["android"]["arch"] = "arm"
self.config["android"]["lib"] = "armeabi-v7a"
self.config["android"]["toolchain_name"] = "arm-linux-androideabi"
return True
elif target == "aarch64-linux-android":
self.config["android"]["platform"] = "android-21"
self.config["android"]["target"] = target
self.config["android"]["toolchain_prefix"] = target
self.config["android"]["arch"] = "arm64"
self.config["android"]["lib"] = "arm64-v8a"
self.config["android"]["toolchain_name"] = target
return True
elif target == "i686-linux-android":
# https://github.com/jemalloc/jemalloc/issues/1279
self.config["android"]["platform"] = "android-21"
self.config["android"]["target"] = target
self.config["android"]["toolchain_prefix"] = "x86"
self.config["android"]["arch"] = "x86"
self.config["android"]["lib"] = "x86"
self.config["android"]["toolchain_name"] = target
return True
return False
def ensure_bootstrapped(self, target=None):
if self.context.bootstrapped:
return
target_platform = target or host_triple()
# Always check if all needed MSVC dependencies are installed
if "msvc" in target_platform:
Registrar.dispatch("bootstrap", context=self.context)
self.context.bootstrapped = True
def ensure_clobbered(self, target_dir=None):
if target_dir is None:
target_dir = self.get_target_dir()
auto = True if os.environ.get('AUTOCLOBBER', False) else False
src_clobber = os.path.join(self.context.topdir, 'CLOBBER')
target_clobber = os.path.join(target_dir, 'CLOBBER')
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.exists(target_clobber):
# Simply touch the file.
with open(target_clobber, 'a'):
pass
if auto:
if os.path.getmtime(src_clobber) > os.path.getmtime(target_clobber):
print('Automatically clobbering target directory: {}'.format(target_dir))
try:
Registrar.dispatch("clean", context=self.context, verbose=True)
print('Successfully completed auto clobber.')
except subprocess.CalledProcessError as error:
sys.exit(error)
else:
print("Clobber not needed.")
|
from __future__ import unicode_literals
import requests
from django.db import models
from django.contrib.postgres import fields
from django.utils import timezone
from django.dispatch import receiver
from guardian.models import GroupObjectPermissionBase
from guardian.models import UserObjectPermissionBase
from .api_client import ApiClient
import json
from django.db.models import signals
from datetime import datetime, timedelta
from .exceptions import ProcessingError, ProcessingTimeout
import simplejson
def api(func):
"""
Catches JSON decoding errors that might happen when the server
answers unexpectedly
"""
def wrapper(*args,**kwargs):
try:
return func(*args, **kwargs)
except (json.decoder.JSONDecodeError, simplejson.JSONDecodeError) as e:
raise ProcessingError(str(e))
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
raise ProcessingTimeout(str(e))
return wrapper
OFFLINE_MINUTES = 5 # Number of minutes a node hasn't been seen before it should be considered offline
class ProcessingNode(models.Model):
hostname = models.CharField(max_length=255, help_text="Hostname or IP address where the node is located (can be an internal hostname as well). If you are using Docker, this is never 127.0.0.1 or localhost. Find the IP address of your host machine by running ifconfig on Linux or by checking your network settings.")
port = models.PositiveIntegerField(help_text="Port that connects to the node's API")
api_version = models.CharField(max_length=32, null=True, help_text="API version used by the node")
last_refreshed = models.DateTimeField(null=True, help_text="When was the information about this node last retrieved?")
queue_count = models.PositiveIntegerField(default=0, help_text="Number of tasks currently being processed by this node (as reported by the node itself)")
available_options = fields.JSONField(default=dict(), help_text="Description of the options that can be used for processing")
def __str__(self):
return '{}:{}'.format(self.hostname, self.port)
@staticmethod
def find_best_available_node():
"""
Attempts to find an available node (seen in the last 5 minutes, and with lowest queue count)
:return: ProcessingNode | None
"""
return ProcessingNode.objects.filter(last_refreshed__gte=timezone.now() - timedelta(minutes=OFFLINE_MINUTES)) \
.order_by('queue_count').first()
def is_online(self):
return self.last_refreshed is not None and \
self.last_refreshed >= timezone.now() - timedelta(minutes=OFFLINE_MINUTES)
@api
def update_node_info(self):
"""
Retrieves information and options from the node API
and saves it into the database.
:returns: True if information could be updated, False otherwise
"""
api_client = self.api_client()
try:
info = api_client.info()
self.api_version = info['version']
self.queue_count = info['taskQueueCount']
options = api_client.options()
self.available_options = options
self.last_refreshed = timezone.now()
self.save()
return True
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, json.decoder.JSONDecodeError, simplejson.JSONDecodeError):
return False
def api_client(self):
return ApiClient(self.hostname, self.port)
def get_available_options_json(self, pretty=False):
"""
:returns available options in JSON string format
"""
kwargs = dict(indent=4, separators=(',', ": ")) if pretty else dict()
return json.dumps(self.available_options, **kwargs)
@api
def process_new_task(self, images, name=None, options=[]):
"""
Sends a set of images (and optional GCP file) via the API
to start processing.
:param images: list of path images
:param name: name of the task
:param options: options to be used for processing ([{'name': optionName, 'value': optionValue}, ...])
:returns UUID of the newly created task
"""
if len(images) < 2: raise ProcessingError("Need at least 2 images")
api_client = self.api_client()
try:
result = api_client.new_task(images, name, options)
except requests.exceptions.ConnectionError as e:
raise ProcessingError(e)
if isinstance(result, dict) and 'uuid' in result:
return result['uuid']
elif isinstance(result, dict) and 'error' in result:
raise ProcessingError(result['error'])
else:
raise ProcessingError("Unexpected answer from server: {}".format(result))
@api
def get_task_info(self, uuid):
"""
Gets information about this task, such as name, creation date,
processing time, status, command line options and number of
images being processed.
"""
api_client = self.api_client()
result = api_client.task_info(uuid)
if isinstance(result, dict) and 'uuid' in result:
return result
elif isinstance(result, dict) and 'error' in result:
raise ProcessingError(result['error'])
else:
raise ProcessingError("Unknown result from task info: {}".format(result))
@api
def get_task_console_output(self, uuid, line):
"""
Retrieves the console output of the OpenDroneMap's process.
Useful for monitoring execution and to provide updates to the user.
"""
api_client = self.api_client()
result = api_client.task_output(uuid, line)
if isinstance(result, dict) and 'error' in result:
raise ProcessingError(result['error'])
elif isinstance(result, list):
return "".join(result)
else:
raise ProcessingError("Unknown response for console output: {}".format(result))
@api
def cancel_task(self, uuid):
"""
Cancels a task (stops its execution, or prevents it from being executed)
"""
api_client = self.api_client()
return self.handle_generic_post_response(api_client.task_cancel(uuid))
@api
def remove_task(self, uuid):
"""
Removes a task and deletes all of its assets
"""
api_client = self.api_client()
return self.handle_generic_post_response(api_client.task_remove(uuid))
@api
def download_task_asset(self, uuid, asset):
"""
Downloads a task asset
"""
api_client = self.api_client()
res = api_client.task_download(uuid, asset)
if isinstance(res, dict) and 'error' in res:
raise ProcessingError(res['error'])
else:
return res
@api
def restart_task(self, uuid):
"""
Restarts a task that was previously canceled or that had failed to process
"""
api_client = self.api_client()
return self.handle_generic_post_response(api_client.task_restart(uuid))
@staticmethod
def handle_generic_post_response(result):
"""
Handles a POST response that has either a "success" flag, or an error message.
This is a common response in node-OpenDroneMap POST calls.
:param result: result of API call
:return: True on success, raises ProcessingException otherwise
"""
if isinstance(result, dict) and 'error' in result:
raise ProcessingError(result['error'])
elif isinstance(result, dict) and 'success' in result:
return True
else:
raise ProcessingError("Unknown response: {}".format(result))
class Meta:
permissions = (
('view_processingnode', 'Can view processing node'),
)
@receiver(signals.post_save, sender=ProcessingNode, dispatch_uid="update_processing_node_info")
def auto_update_node_info(sender, instance, created, **kwargs):
if created:
try:
instance.update_node_info()
except ProcessingError:
pass
class ProcessingNodeUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(ProcessingNode)
class ProcessingNodeGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(ProcessingNode)
|
from unittest import mock
from django.test import TestCase, override_settings
from osis_common.queue.queue_listener import SynchronousConsumerThread
@override_settings(
QUEUES={
'QUEUES_NAME': {
'QUEUE': 'NAME'
}
}
)
class WSGITestCase(TestCase):
@mock.patch.object(SynchronousConsumerThread, 'start', return_value=None)
def test_listen_to_queue_with_callback(self, mock_queue):
from backoffice.wsgi import _listen_to_queue_with_callback
_listen_to_queue_with_callback(
callback=lambda: None,
queue_name='QUEUE'
)
self.assertTrue(mock_queue.called)
|
import math
from openerp.osv import osv, fields
import openerp.addons.product.product
class res_users(osv.osv):
_inherit = 'res.users'
_columns = {
'ean13' : fields.char('EAN13', size=13, help="BarCode"),
'pos_config' : fields.many2one('pos.config', 'Default Point of Sale', domain=[('state', '=', 'active')]),
'udiscount' : fields.char('Discount assigned', help="Discount assigned"),
}
def _check_ean(self, cr, uid, ids, context=None):
return all(
openerp.addons.product.product.check_ean(user.ean13) == True
for user in self.browse(cr, uid, ids, context=context)
)
_constraints = [
(_check_ean, "Error: Invalid ean code", ['ean13'],),
]
class password(osv.osv):
_name = 'pos.password'
_columns = {
'password' : fields.char('Password', help="the password"),
'udiscount' : fields.char('Discount assigned', help="Discount assigned"),
}
|
import cPickle as pkl
import numpy
import os
from classification_network import configureNetwork
from fluent.encoders.cio_encoder import CioEncoder
from fluent.models.classification_model import ClassificationModel
from fluent.utils.network_data_generator import NetworkDataGenerator
from nupic.data.file_record_stream import FileRecordStream
class ClassificationModelHTM(ClassificationModel):
"""
Class to run the survey response classification task with nupic network
"""
def __init__(self,
networkConfig,
inputFilePath,
verbosity=1,
numLabels=3,
modelDir="ClassificationModelHTM",
prepData=True):
"""
@param networkConfig (str) Path to JSON of network configuration,
with region parameters.
@param inputFilePath (str) Path to data file.
See ClassificationModel for remaining parameters.
"""
super(ClassificationModelHTM, self).__init__(
verbosity=verbosity, numLabels=numLabels, modelDir=modelDir)
self.networkConfig = networkConfig
if prepData:
self.networkDataPath, self.networkDataGen = self.prepData(inputFilePath)
else:
self.networkDataPath = inputFilePath
self.networkDataGen = None
self.network = self.initModel()
self.learningRegions = self._getLearningRegions()
def prepData(self, dataPath, ordered=False, **kwargs):
"""
Generate the data in network API format.
@param dataPath (str) Path to input data file; format as expected
by NetworkDataGenerator.
@return networkDataPath (str) Path to data formtted for network API.
@return ndg (NetworkDataGenerator)
"""
ndg = NetworkDataGenerator()
networkDataPath = ndg.setupData(dataPath, self.numLabels, ordered, **kwargs)
return networkDataPath, ndg
def initModel(self):
"""
Initialize the network; self.networdDataPath must already be set.
"""
recordStream = FileRecordStream(streamID=self.networkDataPath)
encoder = CioEncoder(cacheDir="./experiments/cache")
return configureNetwork(recordStream, self.networkConfig, encoder)
def _getLearningRegions(self):
"""Return tuple of the network's region objects that learn."""
learningRegions = []
for region in self.network.regions.values():
try:
_ = region.getParameter("learningMode")
learningRegions.append(region)
except:
continue
return learningRegions
# TODO: is this still needed?
def encodeSample(self, sample):
"""
Put each token in its own dictionary with its bitmap
@param sample (list) Tokenized sample, where each item is a
string token.
@return (list) The sample text, sparsity, and bitmap
for each token. Since the network will
do the actual encoding, the bitmap and
sparsity will be None
Example return list:
[{
"text": "Example text",
"sparsity": 0.0,
"bitmap": None
}]
"""
return [{"text": t,
"sparsity": None,
"bitmap": None} for t in sample]
def resetModel(self):
"""
Reset the model by creating a new network since the network API does not
support resets.
"""
# TODO: test this works as expected
self.network = self.initModel()
def saveModel(self):
# TODO: test this works
try:
if not os.path.exists(self.modelDir):
os.makedirs(self.modelDir)
networkPath = os.path.join(self.modelDir, "network.nta")
# self.network = networkPath
with open(networkPath, "wb") as f:
pkl.dump(self, f)
if self.verbosity > 0:
print "Model saved to \'{}\'.".format(networkPath)
except IOError as e:
print "Could not save model to \'{}\'.".format(networkPath)
raise e
def trainModel(self, iterations=1):
"""
Run the network with all regions learning.
Note self.sampleReference doesn't get populated b/c in a network model
there's a 1-to-1 mapping of training samples.
"""
for region in self.learningRegions:
region.setParameter("learningMode", True)
classifierRegion = self.network.regions[
self.networkConfig["classifierRegionConfig"].get("regionName")]
self.network.run(iterations)
def testModel(self, numLabels=3):
"""
Test the classifier region on the input sample. Call this method for each
word of a sequence.
@param numLabels (int) Number of classification predictions.
@return (numpy array) numLabels most-frequent classifications
for the data samples; int or empty.
"""
sensorRegion = self.network.regions[
self.networkConfig["sensorRegionConfig"].get("regionName")]
classifierRegion = self.network.regions[
self.networkConfig["classifierRegionConfig"].get("regionName")]
for region in self.learningRegions:
region.setParameter("learningMode", False)
classifierRegion.setParameter("inferenceMode", True)
self.network.run(1)
return self._getClassifierInference(classifierRegion)
def _getClassifierInference(self, classifierRegion):
"""Return output categories from the classifier region."""
relevantCats = classifierRegion.getParameter("categoryCount")
if classifierRegion.type == "py.KNNClassifierRegion":
# max number of inferences = k
inferenceValues = classifierRegion.getOutputData("categoriesOut")[:relevantCats]
return self.getWinningLabels(inferenceValues, numLabels=3)
elif classifierRegion.type == "py.CLAClassifierRegion":
# TODO: test this
return classifierRegion.getOutputData("categoriesOut")[:relevantCats]
|
from django.db import migrations
from oscar.core.loading import get_model
from ecommerce.core.constants import COUPON_PRODUCT_CLASS_NAME
ProductAttribute = get_model("catalogue", "ProductAttribute")
ProductClass = get_model("catalogue", "ProductClass")
def create_inactive_attribute(apps, schema_editor):
"""Create coupon inactive attribute."""
ProductAttribute.skip_history_when_saving = True
coupon = ProductClass.objects.get(name=COUPON_PRODUCT_CLASS_NAME)
product_attribute = ProductAttribute(
product_class=coupon,
name='Inactive',
code='inactive',
type=ProductAttribute.BOOLEAN,
required=False
)
product_attribute.save()
def remove_inactive_attribute(apps, schema_editor):
"""Remove coupon inactive attribute."""
coupon = ProductClass.objects.get(name=COUPON_PRODUCT_CLASS_NAME)
ProductAttribute.skip_history_when_saving = True
ProductAttribute.objects.get(product_class=coupon, code='inactive').delete()
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0045_add_edx_employee_coupon_category')
]
operations = [
migrations.RunPython(create_inactive_attribute, remove_inactive_attribute)
]
|
{
'name': 'Capture employee picture with webcam',
'version': '1.0',
'category': 'Generic Modules/Human Resources',
'description': """
HR WebCam
=========
Capture employee pictures with an attached web cam.
""",
'author': 'Michael Telahun Makonnen <mmakonnen@gmail.com>',
'website': 'http://miketelahun.wordpress.com',
'depends': [
'hr',
'web',
],
'js': [
'static/src/js/jquery.webcam.js',
'static/src/js/hr_webcam.js',
],
'css': [
'static/src/css/hr_webcam.css',
],
'qweb': [
'static/src/xml/hr_webcam.xml',
],
'init_xml': [
],
'update_xml': [
'hr_webcam_data.xml',
'hr_webcam_view.xml',
],
'test': [
],
'demo_xml': [
],
'installable': False,
'active': False,
}
|
"""
This module bundles commonly used utility methods or helper classes that are used in multiple places withing
OctoPrint's source code.
"""
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os
import traceback
import sys
import re
import tempfile
import logging
import shutil
import threading
from functools import wraps
import warnings
import contextlib
logger = logging.getLogger(__name__)
def warning_decorator_factory(warning_type):
def specific_warning(message, stacklevel=1, since=None, includedoc=None, extenddoc=False):
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
# we need to increment the stacklevel by one because otherwise we'll get the location of our
# func_wrapper in the log, instead of our caller (which is the real caller of the wrapped function)
warnings.warn(message, warning_type, stacklevel=stacklevel + 1)
return func(*args, **kwargs)
if includedoc is not None and since is not None:
docstring = "\n.. deprecated:: {since}\n {message}\n\n".format(since=since, message=includedoc)
if extenddoc and hasattr(func_wrapper, "__doc__") and func_wrapper.__doc__ is not None:
docstring = func_wrapper.__doc__ + "\n" + docstring
func_wrapper.__doc__ = docstring
return func_wrapper
return decorator
return specific_warning
deprecated = warning_decorator_factory(DeprecationWarning)
"""
A decorator for deprecated methods. Logs a deprecation warning via Python's `:mod:`warnings` module including the
supplied ``message``. The call stack level used (for adding the source location of the offending call to the
warning) can be overridden using the optional ``stacklevel`` parameter. If both ``since`` and ``includedoc`` are
provided, a deprecation warning will also be added to the function's docstring by providing or extending its ``__doc__``
property.
Arguments:
message (string): The message to include in the deprecation warning.
stacklevel (int): Stack level for including the caller of the offending method in the logged warning. Defaults to 1,
meaning the direct caller of the method. It might make sense to increase this in case of the function call
happening dynamically from a fixed position to not shadow the real caller (e.g. in case of overridden
``getattr`` methods).
includedoc (string): Message about the deprecation to include in the wrapped function's docstring.
extenddoc (boolean): If True the original docstring of the wrapped function will be extended by the deprecation
message, if False (default) it will be replaced with the deprecation message.
since (string): Version since when the function was deprecated, must be present for the docstring to get extended.
Returns:
function: The wrapped function with the deprecation warnings in place.
"""
pending_deprecation = warning_decorator_factory(PendingDeprecationWarning)
"""
A decorator for methods pending deprecation. Logs a pending deprecation warning via Python's `:mod:`warnings` module
including the supplied ``message``. The call stack level used (for adding the source location of the offending call to
the warning) can be overridden using the optional ``stacklevel`` parameter. If both ``since`` and ``includedoc`` are
provided, a deprecation warning will also be added to the function's docstring by providing or extending its ``__doc__``
property.
Arguments:
message (string): The message to include in the deprecation warning.
stacklevel (int): Stack level for including the caller of the offending method in the logged warning. Defaults to 1,
meaning the direct caller of the method. It might make sense to increase this in case of the function call
happening dynamically from a fixed position to not shadow the real caller (e.g. in case of overridden
``getattr`` methods).
extenddoc (boolean): If True the original docstring of the wrapped function will be extended by the deprecation
message, if False (default) it will be replaced with the deprecation message.
includedoc (string): Message about the deprecation to include in the wrapped function's docstring.
since (string): Version since when the function was deprecated, must be present for the docstring to get extended.
Returns:
function: The wrapped function with the deprecation warnings in place.
"""
def get_formatted_size(num):
"""
Formats the given byte count as a human readable rounded size expressed in the most pressing unit among B(ytes),
K(ilo)B(ytes), M(ega)B(ytes), G(iga)B(ytes) and T(era)B(ytes), with one decimal place.
Based on http://stackoverflow.com/a/1094933/2028598
Arguments:
num (int): The byte count to format
Returns:
string: The formatted byte count.
"""
for x in ["B","KB","MB","GB"]:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, "TB")
def is_allowed_file(filename, extensions):
"""
Determines if the provided ``filename`` has one of the supplied ``extensions``. The check is done case-insensitive.
Arguments:
filename (string): The file name to check against the extensions.
extensions (list): The extensions to check against, a list of strings
Return:
boolean: True if the file name's extension matches one of the allowed extensions, False otherwise.
"""
return "." in filename and filename.rsplit(".", 1)[1].lower() in map(str.lower, extensions)
def get_formatted_timedelta(d):
"""
Formats a timedelta instance as "HH:MM:ss" and returns the resulting string.
Arguments:
d (datetime.timedelta): The timedelta instance to format
Returns:
string: The timedelta formatted as "HH:MM:ss"
"""
if d is None:
return None
hours = d.days * 24 + d.seconds // 3600
minutes = (d.seconds % 3600) // 60
seconds = d.seconds % 60
return "%02d:%02d:%02d" % (hours, minutes, seconds)
def get_formatted_datetime(d):
"""
Formats a datetime instance as "YYYY-mm-dd HH:MM" and returns the resulting string.
Arguments:
d (datetime.datetime): The datetime instance to format
Returns:
string: The datetime formatted as "YYYY-mm-dd HH:MM"
"""
if d is None:
return None
return d.strftime("%Y-%m-%d %H:%M")
def get_class(name):
"""
Retrieves the class object for a given fully qualified class name.
Taken from http://stackoverflow.com/a/452981/2028598.
Arguments:
name (string): The fully qualified class name, including all modules separated by ``.``
Returns:
type: The class if it could be found.
Raises:
AttributeError: The class could not be found.
"""
parts = name.split(".")
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def get_exception_string():
"""
Retrieves the exception info of the last raised exception and returns it as a string formatted as
``<exception type>: <exception message> @ <source file>:<function name>:<line number>``.
Returns:
string: The formatted exception information.
"""
locationInfo = traceback.extract_tb(sys.exc_info()[2])[0]
return "%s: '%s' @ %s:%s:%d" % (str(sys.exc_info()[0].__name__), str(sys.exc_info()[1]), os.path.basename(locationInfo[0]), locationInfo[2], locationInfo[1])
@deprecated("get_free_bytes has been deprecated and will be removed in the future",
includedoc="Replaced by `psutil.disk_usage <http://pythonhosted.org/psutil/#psutil.disk_usage>`_.",
since="1.2.5")
def get_free_bytes(path):
import psutil
return psutil.disk_usage(path).free
def get_dos_filename(origin, existing_filenames=None, extension=None, **kwargs):
"""
Converts the provided input filename to a 8.3 DOS compatible filename. If ``existing_filenames`` is provided, the
conversion result will be guaranteed not to collide with any of the filenames provided thus.
Uses :func:`find_collision_free_name` internally.
Arguments:
input (string): The original filename incl. extension to convert to the 8.3 format.
existing_filenames (list): A list of existing filenames with which the generated 8.3 name must not collide.
Optional.
extension (string): The .3 file extension to use for the generated filename. If not provided, the extension of
the provided ``filename`` will simply be truncated to 3 characters.
kwargs (dict): Additional keyword arguments to provide to :func:`find_collision_free_name`.
Returns:
string: A 8.3 compatible translation of the original filename, not colliding with the optionally provided
``existing_filenames`` and with the provided ``extension`` or the original extension shortened to
a maximum of 3 characters.
Raises:
ValueError: No 8.3 compatible name could be found that doesn't collide with the provided ``existing_filenames``.
"""
if origin is None:
return None
if existing_filenames is None:
existing_filenames = []
filename, ext = os.path.splitext(origin)
if extension is None:
extension = ext
return find_collision_free_name(filename, extension, existing_filenames, **kwargs)
def find_collision_free_name(filename, extension, existing_filenames, max_power=2):
"""
Tries to find a collision free translation of "<filename>.<extension>" to the 8.3 DOS compatible format,
preventing collisions with any of the ``existing_filenames``.
First strips all of ``."/\\[]:;=,`` from the filename and extensions, converts them to lower case and truncates
the ``extension`` to a maximum length of 3 characters.
If the filename is already equal or less than 8 characters in length after that procedure and "<filename>.<extension>"
are not contained in the ``existing_files``, that concatenation will be returned as the result.
If not, the following algorithm will be applied to try to find a collision free name::
set counter := power := 1
while counter < 10^max_power:
set truncated := substr(filename, 0, 6 - power + 1) + "~" + counter
set result := "<truncated>.<extension>"
if result is collision free:
return result
counter++
if counter >= 10 ** power:
power++
raise ValueError
This will basically -- for a given original filename of ``some_filename`` and an extension of ``gco`` -- iterate
through names of the format ``some_f~1.gco``, ``some_f~2.gco``, ..., ``some_~10.gco``, ``some_~11.gco``, ...,
``<prefix>~<n>.gco`` for ``n`` less than 10 ^ ``max_power``, returning as soon as one is found that is not colliding.
Arguments:
filename (string): The filename without the extension to convert to 8.3.
extension (string): The extension to convert to 8.3 -- will be truncated to 3 characters if it's longer than
that.
existing_filenames (list): A list of existing filenames to prevent name collisions with.
max_power (int): Limits the possible attempts of generating a collision free name to 10 ^ ``max_power``
variations. Defaults to 2, so the name generation will maximally reach ``<name>~99.<ext>`` before
aborting and raising an exception.
Returns:
string: A 8.3 representation of the provided original filename, ensured to not collide with the provided
``existing_filenames``
Raises:
ValueError: No collision free name could be found.
"""
# TODO unit test!
if not isinstance(filename, unicode):
filename = unicode(filename)
if not isinstance(extension, unicode):
extension = unicode(extension)
def make_valid(text):
return re.sub(r"\s+", "_", text.translate({ord(i):None for i in ".\"/\\[]:;=,"})).lower()
filename = make_valid(filename)
extension = make_valid(extension)
extension = extension[:3] if len(extension) > 3 else extension
if len(filename) <= 8 and not filename + "." + extension in existing_filenames:
# early exit
return filename + "." + extension
counter = 1
power = 1
while counter < (10 ** max_power):
result = filename[:(6 - power + 1)] + "~" + str(counter) + "." + extension
if result not in existing_filenames:
return result
counter += 1
if counter >= 10 ** power:
power += 1
raise ValueError("Can't create a collision free filename")
def silent_remove(file):
"""
Silently removes a file. Does not raise an error if the file doesn't exist.
Arguments:
file (string): The path of the file to be removed
"""
try:
os.remove(file)
except OSError:
pass
def sanitize_ascii(line):
if not isinstance(line, basestring):
raise ValueError("Expected either str or unicode but got {} instead".format(line.__class__.__name__ if line is not None else None))
return to_unicode(line, encoding="ascii", errors="replace").rstrip()
def filter_non_ascii(line):
"""
Filter predicate to test if a line contains non ASCII characters.
Arguments:
line (string): The line to test
Returns:
boolean: True if the line contains non ASCII characters, False otherwise.
"""
try:
to_str(to_unicode(line, encoding="ascii"), encoding="ascii")
return False
except ValueError:
return True
def to_str(s_or_u, encoding="utf-8", errors="strict"):
"""Make sure ``s_or_u`` is a str."""
if isinstance(s_or_u, unicode):
return s_or_u.encode(encoding, errors=errors)
else:
return s_or_u
def to_unicode(s_or_u, encoding="utf-8", errors="strict"):
"""Make sure ``s_or_u`` is a unicode string."""
if isinstance(s_or_u, str):
return s_or_u.decode(encoding, errors=errors)
else:
return s_or_u
def dict_merge(a, b):
"""
Recursively deep-merges two dictionaries.
Taken from https://www.xormedia.com/recursively-merge-dictionaries-in-python/
Example::
>>> a = dict(foo="foo", bar="bar", fnord=dict(a=1))
>>> b = dict(foo="other foo", fnord=dict(b=2, l=["some", "list"]))
>>> expected = dict(foo="other foo", bar="bar", fnord=dict(a=1, b=2, l=["some", "list"]))
>>> dict_merge(a, b) == expected
True
Arguments:
a (dict): The dictionary to merge ``b`` into
b (dict): The dictionary to merge into ``a``
Returns:
dict: ``b`` deep-merged into ``a``
"""
from copy import deepcopy
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.iteritems():
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
def dict_sanitize(a, b):
"""
Recursively deep-sanitizes ``a`` based on ``b``, removing all keys (and
associated values) from ``a`` that do not appear in ``b``.
Example::
>>> a = dict(foo="foo", bar="bar", fnord=dict(a=1, b=2, l=["some", "list"]))
>>> b = dict(foo=None, fnord=dict(a=None, b=None))
>>> expected = dict(foo="foo", fnord=dict(a=1, b=2))
>>> dict_sanitize(a, b) == expected
True
>>> dict_clean(a, b) == expected
True
Arguments:
a (dict): The dictionary to clean against ``b``.
b (dict): The dictionary containing the key structure to clean from ``a``.
Results:
dict: A new dict based on ``a`` with all keys (and corresponding values) found in ``b`` removed.
"""
from copy import deepcopy
if not isinstance(b, dict):
return a
result = deepcopy(a)
for k, v in a.iteritems():
if not k in b:
del result[k]
elif isinstance(v, dict):
result[k] = dict_sanitize(v, b[k])
else:
result[k] = deepcopy(v)
return result
dict_clean = deprecated("dict_clean has been renamed to dict_sanitize",
includedoc="Replaced by :func:`dict_sanitize`")(dict_sanitize)
def dict_minimal_mergediff(source, target):
"""
Recursively calculates the minimal dict that would be needed to be deep merged with
a in order to produce the same result as deep merging a and b.
Example::
>>> a = dict(foo=dict(a=1, b=2), bar=dict(c=3, d=4))
>>> b = dict(bar=dict(c=3, d=5), fnord=None)
>>> c = dict_minimal_mergediff(a, b)
>>> c == dict(bar=dict(d=5), fnord=None)
True
>>> dict_merge(a, c) == dict_merge(a, b)
True
Arguments:
source (dict): Source dictionary
target (dict): Dictionary to compare to source dictionary and derive diff for
Returns:
dict: The minimal dictionary to deep merge on ``source`` to get the same result
as deep merging ``target`` on ``source``.
"""
if not isinstance(source, dict) or not isinstance(target, dict):
raise ValueError("source and target must be dictionaries")
if source == target:
# shortcut: if both are equal, we return an empty dict as result
return dict()
from copy import deepcopy
all_keys = set(source.keys() + target.keys())
result = dict()
for k in all_keys:
if k not in target:
# key not contained in target => not contained in result
continue
if k in source:
# key is present in both dicts, we have to take a look at the value
value_source = source[k]
value_target = target[k]
if value_source != value_target:
# we only need to look further if the values are not equal
if isinstance(value_source, dict) and isinstance(value_target, dict):
# both are dicts => deeper down it goes into the rabbit hole
result[k] = dict_minimal_mergediff(value_source, value_target)
else:
# new b wins over old a
result[k] = deepcopy(value_target)
else:
# key is new, add it
result[k] = deepcopy(target[k])
return result
def dict_contains_keys(keys, dictionary):
"""
Recursively deep-checks if ``dictionary`` contains all keys found in ``keys``.
Example::
>>> positive = dict(foo="some_other_bar", fnord=dict(b=100))
>>> negative = dict(foo="some_other_bar", fnord=dict(b=100, d=20))
>>> dictionary = dict(foo="bar", fnord=dict(a=1, b=2, c=3))
>>> dict_contains_keys(positive, dictionary)
True
>>> dict_contains_keys(negative, dictionary)
False
Arguments:
a (dict): The dictionary to check for the keys from ``b``.
b (dict): The dictionary whose keys to check ``a`` for.
Returns:
boolean: True if all keys found in ``b`` are also present in ``a``, False otherwise.
"""
if not isinstance(keys, dict) or not isinstance(dictionary, dict):
return False
for k, v in keys.iteritems():
if not k in dictionary:
return False
elif isinstance(v, dict):
if not dict_contains_keys(v, dictionary[k]):
return False
return True
class Object(object):
pass
def interface_addresses(family=None):
"""
Retrieves all of the host's network interface addresses.
"""
import netifaces
if not family:
family = netifaces.AF_INET
for interface in netifaces.interfaces():
try:
ifaddresses = netifaces.ifaddresses(interface)
except:
continue
if family in ifaddresses:
for ifaddress in ifaddresses[family]:
if not ifaddress["addr"].startswith("169.254."):
yield ifaddress["addr"]
def address_for_client(host, port):
"""
Determines the address of the network interface on this host needed to connect to the indicated client host and port.
"""
import socket
for address in interface_addresses():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((address, 0))
sock.connect((host, port))
return address
except:
continue
@contextlib.contextmanager
def atomic_write(filename, mode="w+b", prefix="tmp", suffix=""):
temp_config = tempfile.NamedTemporaryFile(mode=mode, prefix=prefix, suffix=suffix, delete=False)
try:
yield temp_config
finally:
temp_config.close()
shutil.move(temp_config.name, filename)
def bom_aware_open(filename, encoding="ascii", mode="r", **kwargs):
import codecs
codec = codecs.lookup(encoding)
encoding = codec.name
if kwargs is None:
kwargs = dict()
potential_bom_attribute = "BOM_" + codec.name.replace("utf-", "utf").upper()
if "r" in mode and hasattr(codecs, potential_bom_attribute):
# these encodings might have a BOM, so let's see if there is one
bom = getattr(codecs, potential_bom_attribute)
with open(filename, "rb") as f:
header = f.read(4)
if header.startswith(bom):
encoding += "-sig"
return codecs.open(filename, encoding=encoding, mode=mode, **kwargs)
def is_hidden_path(path):
if path is None:
# we define a None path as not hidden here
return False
filename = os.path.basename(path)
if filename.startswith("."):
# filenames starting with a . are hidden
return True
if sys.platform == "win32":
# if we are running on windows we also try to read the hidden file
# attribute via the windows api
try:
import ctypes
attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(path))
assert attrs != -1 # INVALID_FILE_ATTRIBUTES == -1
return bool(attrs & 2) # FILE_ATTRIBUTE_HIDDEN == 2
except (AttributeError, AssertionError):
pass
# if we reach that point, the path is not hidden
return False
class RepeatedTimer(threading.Thread):
"""
This class represents an action that should be run repeatedly in an interval. It is similar to python's
own :class:`threading.Timer` class, but instead of only running once the ``function`` will be run again and again,
sleeping the stated ``interval`` in between.
RepeatedTimers are started, as with threads, by calling their ``start()`` method. The timer can be stopped (in
between runs) by calling the :func:`cancel` method. The interval the time waited before execution of a loop may
not be exactly the same as the interval specified by the user.
For example:
.. code-block:: python
def hello():
print("Hello World!")
t = RepeatedTimer(1.0, hello)
t.start() # prints "Hello World!" every second
Another example with dynamic interval and loop condition:
.. code-block:: python
count = 0
maximum = 5
factor = 1
def interval():
global count
global factor
return count * factor
def condition():
global count
global maximum
return count <= maximum
def hello():
print("Hello World!")
global count
count += 1
t = RepeatedTimer(interval, hello, run_first=True, condition=condition)
t.start() # prints "Hello World!" 5 times, printing the first one
# directly, then waiting 1, 2, 3, 4s in between (adaptive interval)
Arguments:
interval (float or callable): The interval between each ``function`` call, in seconds. Can also be a callable
returning the interval to use, in case the interval is not static.
function (callable): The function to call.
args (list or tuple): The arguments for the ``function`` call. Defaults to an empty list.
kwargs (dict): The keyword arguments for the ``function`` call. Defaults to an empty dict.
run_first (boolean): If set to True, the function will be run for the first time *before* the first wait period.
If set to False (the default), the function will be run for the first time *after* the first wait period.
condition (callable): Condition that needs to be True for loop to continue. Defaults to ``lambda: True``.
on_condition_false (callable): Callback to call when the timer finishes due to condition becoming false. Will
be called before the ``on_finish`` callback.
on_cancelled (callable): Callback to call when the timer finishes due to being cancelled. Will be called
before the ``on_finish`` callback.
on_finish (callable): Callback to call when the timer finishes, either due to being cancelled or since
the condition became false.
daemon (bool): daemon flag to set on underlying thread.
"""
def __init__(self, interval, function, args=None, kwargs=None,
run_first=False, condition=None, on_condition_false=None,
on_cancelled=None, on_finish=None, daemon=True):
threading.Thread.__init__(self)
if args is None:
args = []
if kwargs is None:
kwargs = dict()
if condition is None:
condition = lambda: True
if not callable(interval):
self.interval = lambda: interval
else:
self.interval = interval
self.function = function
self.finished = threading.Event()
self.args = args
self.kwargs = kwargs
self.run_first = run_first
self.condition = condition
self.on_condition_false = on_condition_false
self.on_cancelled = on_cancelled
self.on_finish = on_finish
self.daemon = daemon
def cancel(self):
self._finish(self.on_cancelled)
def run(self):
while self.condition():
if self.run_first:
# if we are to run the function BEFORE waiting for the first time
self.function(*self.args, **self.kwargs)
# make sure our condition is still met before running into the downtime
if not self.condition():
break
# wait, but break if we are cancelled
self.finished.wait(self.interval())
if self.finished.is_set():
return
if not self.run_first:
# if we are to run the function AFTER waiting for the first time
self.function(*self.args, **self.kwargs)
# we'll only get here if the condition was false
self._finish(self.on_condition_false)
def _finish(self, *callbacks):
self.finished.set()
for callback in callbacks:
if not callable(callback):
continue
callback()
if callable(self.on_finish):
self.on_finish()
class CountedEvent(object):
def __init__(self, value=0, max=None, name=None):
logger_name = __name__ + ".CountedEvent" + (".{name}".format(name=name) if name is not None else "")
self._logger = logging.getLogger(logger_name)
self._counter = 0
self._max = max
self._mutex = threading.Lock()
self._event = threading.Event()
self._internal_set(value)
def set(self):
with self._mutex:
self._internal_set(self._counter + 1)
def clear(self, completely=False):
with self._mutex:
if completely:
self._internal_set(0)
else:
self._internal_set(self._counter - 1)
def wait(self, timeout=None):
self._event.wait(timeout)
def blocked(self):
with self._mutex:
return self._counter == 0
def _internal_set(self, value):
self._logger.debug("New counter value: {value}".format(value=value))
self._counter = value
if self._counter <= 0:
self._counter = 0
self._event.clear()
self._logger.debug("Cleared event")
else:
if self._max is not None and self._counter > self._max:
self._counter = self._max
self._event.set()
self._logger.debug("Set event")
class InvariantContainer(object):
def __init__(self, initial_data=None, guarantee_invariant=None):
from collections import Iterable
from threading import RLock
if guarantee_invariant is None:
guarantee_invariant = lambda data: data
self._data = []
self._mutex = RLock()
self._invariant = guarantee_invariant
if initial_data is not None and isinstance(initial_data, Iterable):
for item in initial_data:
self.append(item)
def append(self, item):
with self._mutex:
self._data.append(item)
self._data = self._invariant(self._data)
def remove(self, item):
with self._mutex:
self._data.remove(item)
self._data = self._invariant(self._data)
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.__iter__()
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SyncNode.date_last_seen'
db.add_column(u'core_syncnode', 'date_last_seen',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SyncNode.date_last_seen'
db.delete_column(u'core_syncnode', 'date_last_seen')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': '885fd954d1414be1bdff116e9d424ce4'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'publications'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.author': {
'Meta': {'unique_together': "(('origin_name', 'website'),)", 'object_name': 'Author'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Author']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_unsure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'origin_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'})
},
'core.basefeed': {
'Meta': {'object_name': 'BaseFeed'},
'closed_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_fetch': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'errors': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'fetch_interval': ('django.db.models.fields.IntegerField', [], {'default': '43200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Language']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.basefeed_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.SimpleTag']"}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authored_items'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseItem']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseitem_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sources_rel_+'", 'null': 'True', 'to': "orm['core.BaseItem']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'items'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.SimpleTag']"}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.combinedfeed': {
'Meta': {'object_name': 'CombinedFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.combinedfeedrule': {
'Meta': {'ordering': "('position',)", 'object_name': 'CombinedFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'combinedfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.CombinedFeed']"}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.BaseFeed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.corepermissions': {
'Meta': {'object_name': 'CorePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.folder': {
'Meta': {'unique_together': "(('name', 'user', 'parent'),)", 'object_name': 'Folder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'folders'", 'to': u"orm['base.User']"})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.helpwizards': {
'Meta': {'object_name': 'HelpWizards'},
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'wizards'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_all': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'welcome_beta_shown': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.historyentry': {
'Meta': {'object_name': 'HistoryEntry'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.historyentry_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.homepreferences': {
'Meta': {'object_name': 'HomePreferences'},
'experimental_features': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'home'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_shows': ('django.db.models.fields.IntegerField', [], {'default': '2', 'blank': 'True'}),
'show_advanced_preferences': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'default': "u'RL'", 'max_length': '2', 'blank': 'True'})
},
'core.language': {
'Meta': {'object_name': 'Language'},
'dj_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Language']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.mailaccount': {
'Meta': {'unique_together': "(('user', 'hostname', 'username'),)", 'object_name': 'MailAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2007, 1, 1, 0, 0)'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mail_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.MailAccount']"}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.CharField', [], {'default': "u'markread'", 'max_length': '10'}),
'match_action': ('django.db.models.fields.CharField', [], {'default': "u'scrape'", 'max_length': '10'}),
'rules_operation': ('django.db.models.fields.CharField', [], {'default': "u'any'", 'max_length': '10'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'core.mailfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'MailFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.CharField', [], {'default': "u'any'", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'header_field': ('django.db.models.fields.CharField', [], {'default': "u'any'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeed']"}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_type': ('django.db.models.fields.CharField', [], {'default': "u'contains'", 'max_length': '10'}),
'match_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '1024'}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.nodepermissions': {
'Meta': {'object_name': 'NodePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SyncNode']", 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '2', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'40c8d56c219a418ebeb335f223f8b705'", 'max_length': '32', 'blank': 'True'})
},
'core.originaldata': {
'Meta': {'object_name': 'OriginalData'},
'feedparser': ('django.db.models.fields.TextField', [], {}),
'feedparser_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'google_reader': ('django.db.models.fields.TextField', [], {}),
'google_reader_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'original_data'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.BaseItem']"}),
'raw_email': ('django.db.models.fields.TextField', [], {}),
'raw_email_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.preferences': {
'Meta': {'object_name': 'Preferences'},
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['base.User']", 'unique': 'True', 'primary_key': 'True'})
},
'core.read': {
'Meta': {'unique_together': "(('user', 'item'),)", 'object_name': 'Read'},
'bookmark_type': ('django.db.models.fields.CharField', [], {'default': "u'U'", 'max_length': '2'}),
'check_set_subscriptions_131004_done': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_analysis': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_archived': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_auto_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_bookmarked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_fact': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_fun': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowhow': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowledge': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_number': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_prospective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_quote': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_rules': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_starred': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_analysis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_auto_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowhow': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_prospective': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_quote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_rules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_starred': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reads'", 'to': "orm['core.BaseItem']"}),
'knowledge_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'senders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reads_sent'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reads'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.SimpleTag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_reads'", 'to': u"orm['base.User']"})
},
'core.readpreferences': {
'Meta': {'object_name': 'ReadPreferences'},
'auto_mark_read_delay': ('django.db.models.fields.IntegerField', [], {'default': '4500', 'blank': 'True'}),
'bookmarked_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bookmarked_marks_unread': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'read'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_switches_to_fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reading_speed': ('django.db.models.fields.IntegerField', [], {'default': '200', 'blank': 'True'}),
'show_bottom_navbar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'starred_removes_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'watch_attributes_mark_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.rssatomfeed': {
'Meta': {'object_name': 'RssAtomFeed', '_ormbases': ['core.BaseFeed']},
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'last_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'})
},
'core.selectorpreferences': {
'Meta': {'object_name': 'SelectorPreferences'},
'extended_folders_depth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folders_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lists_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'selector'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_closed_streams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscriptions_in_multiple_folders': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'titles_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.sharepreferences': {
'Meta': {'object_name': 'SharePreferences'},
'default_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'share'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"})
},
'core.simpletag': {
'Meta': {'unique_together': "(('name', 'language'),)", 'object_name': 'SimpleTag'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'origin_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'origin_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.SimpleTag']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.snappreferences': {
'Meta': {'object_name': 'SnapPreferences'},
'default_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'snap'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'select_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.staffpreferences': {
'Meta': {'object_name': 'StaffPreferences'},
'allow_all_articles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'no_home_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'staff'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'reading_lists_show_bad_articles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'selector_shows_admin_links': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'super_powers_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'core.subscription': {
'Meta': {'unique_together': "(('feed', 'user'),)", 'object_name': 'Subscription'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['core.BaseFeed']"}),
'folders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Folder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reads': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Read']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.SimpleTag']"}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_subscriptions'", 'blank': 'True', 'to': u"orm['base.User']"})
},
'core.syncnode': {
'Meta': {'object_name': 'SyncNode'},
'broadcast': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_instance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'local_token': ('django.db.models.fields.CharField', [], {'default': "'709ef143cdee4a3eafb5b0970dfd75f1'", 'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '2', 'blank': 'True'}),
'remote_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '384', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'7709f8a8dae04fafb9c532be837afffa'", 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'core.userfeeds': {
'Meta': {'object_name': 'UserFeeds'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_feeds'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"})
},
'core.userimport': {
'Meta': {'object_name': 'UserImport', '_ormbases': ['core.HistoryEntry']},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'historyentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.HistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'results': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'urls': ('django.db.models.fields.TextField', [], {})
},
'core.usersubscriptions': {
'Meta': {'object_name': 'UserSubscriptions'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Subscription']"}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_subscriptions'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"})
},
'core.website': {
'Meta': {'object_name': 'WebSite'},
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'fetch_limit_nr': ('django.db.models.fields.IntegerField', [], {'default': '16', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'mail_warned': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.WebSite']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['core']
|
{
"name": "Libro de IVA",
"version": "8.0.1.0.1",
"author": "PRAXYA, "
"Odoo Community Association (OCA)",
"website": "http://www.praxya.com",
"license": "AGPL-3",
"category": "Accounting",
"depends": [
'account',
'base_vat',
'l10n_es',
'l10n_es_aeat',
'account_refund_original',
'account_invoice_currency',
],
'data': [
'security/ir_rule.xml',
'security/ir.model.access.csv',
'data/map_taxes_vat_book.xml',
'views/l10n_es_vat_book.xml',
'views/l10n_es_vat_book_issued_lines.xml',
'views/l10n_es_vat_book_received_lines.xml',
'views/l10n_es_vat_book_received_tax_summary.xml',
'views/l10n_es_vat_book_issued_tax_summary.xml',
'views/l10n_es_vat_book_invoice_tax_lines.xml',
'views/l10n_es_vat_book_rectification_issued_lines.xml',
'views/l10n_es_vat_book_rectification_received_lines.xml',
'views/l10n_es_vat_book_rectification_received_tax_summary.xml',
'views/l10n_es_vat_book_rectification_issued_tax_summary.xml',
'report/report_paper_format.xml',
'report/report_views.xml',
'report/vat_book_invoices_issued.xml',
'report/vat_book_invoices_received.xml',
'report/vat_book_rectification_issued_invoices.xml',
'report/vat_book_rectification_received_invoices.xml',
],
"qweb": [
],
"installable": True,
}
|
import sys
sys.path.append('../../lib/python')
import signal
import unittest
import datetime
import decimal
import socket
import threading
import struct
import subprocess
import time
import array
from voltdbclient import *
SERVER_NAME = "EchoServer"
decimal.getcontext().prec = 19
def signalHandler(server, signum, frame):
server.shutdown()
server.join()
raise Exception, "Interrupted by SIGINT."
class EchoServer(threading.Thread):
def __init__(self, cmd, lock):
threading.Thread.__init__(self)
self.__server_cmd = cmd
self.__lock = threading.Event()
self.__start = lock
def run(self):
server = subprocess.Popen(self.__server_cmd, shell = True)
time.sleep(1)
self.__start.set()
self.__lock.wait()
# Get the server pid
jps = subprocess.Popen("jps", stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = jps.communicate()
pid = None
lines = stdout.split("\n")
for l in lines:
if SERVER_NAME in l:
pid = l.split()[0]
if pid == None:
return
# Should kill the server now
killer = subprocess.Popen("kill -9 %s" % (pid), shell = True)
killer.communicate()
if killer.returncode != 0:
print >> sys.stderr, \
"Failed to kill the server process %d" % (server.pid)
return
server.communicate()
def shutdown(self):
self.__lock.set()
class TestFastSerializer(unittest.TestCase):
byteArray = [None, 1, -21, 127]
int16Array = [None, 128, -256, 32767]
int32Array = [None, 0, -32768, 2147483647]
int64Array = [None, -52423, 2147483647, -9223372036854775807]
floatArray = [None, float("-inf"), float("nan"), -0.009999999776482582]
stringArray = [None, u"hello world", u"ça"]
binArray = [None, array.array('c', ['c', 'f', 'q'])]
dateArray = [None, datetime.datetime.now(),
datetime.datetime.utcfromtimestamp(0),
datetime.datetime.utcnow()]
decimalArray = [None,
decimal.Decimal("-837461"),
decimal.Decimal("8571391.193847158139"),
decimal.Decimal("-1348392.109386749180")]
ARRAY_BEGIN = 126
ARRAY_END = 127
def setUp(self):
self.fs = FastSerializer('localhost', 21212, None, None)
def tearDown(self):
self.fs.socket.close()
def sendAndCompare(self, type, value):
self.fs.writeWireType(type, value)
self.fs.prependLength()
self.fs.flush()
self.fs.bufferForRead()
t = self.fs.readByte()
self.assertEqual(t, type)
v = self.fs.read(type)
self.assertEqual(v, value)
def sendArrayAndCompare(self, type, value):
self.fs.writeWireTypeArray(type, value)
sys.stdout.flush()
self.fs.prependLength()
sys.stdout.flush()
self.fs.flush()
sys.stdout.flush()
self.fs.bufferForRead()
sys.stdout.flush()
self.assertEqual(self.fs.readByte(), type)
sys.stdout.flush()
self.assertEqual(list(self.fs.readArray(type)), value)
sys.stdout.flush()
def testByte(self):
for i in self.byteArray:
self.sendAndCompare(self.fs.VOLTTYPE_TINYINT, i)
def testShort(self):
for i in self.int16Array:
self.sendAndCompare(self.fs.VOLTTYPE_SMALLINT, i)
def testInt(self):
for i in self.int32Array:
self.sendAndCompare(self.fs.VOLTTYPE_INTEGER, i)
def testLong(self):
for i in self.int64Array:
self.sendAndCompare(self.fs.VOLTTYPE_BIGINT, i)
def testFloat(self):
type = self.fs.VOLTTYPE_FLOAT
for i in self.floatArray:
self.fs.writeWireType(type, i)
self.fs.prependLength()
self.fs.flush()
self.fs.bufferForRead()
self.assertEqual(self.fs.readByte(), type)
result = self.fs.readFloat64()
if isNaN(i):
self.assertTrue(isNaN(result))
else:
self.assertEqual(result, i)
def testString(self):
for i in self.stringArray:
self.sendAndCompare(self.fs.VOLTTYPE_STRING, i)
def testDate(self):
for i in self.dateArray:
self.sendAndCompare(self.fs.VOLTTYPE_TIMESTAMP, i)
def testDecimal(self):
for i in self.decimalArray:
self.sendAndCompare(self.fs.VOLTTYPE_DECIMAL, i)
def testArray(self):
self.fs.writeByte(self.ARRAY_BEGIN)
self.fs.prependLength()
self.fs.flush()
self.sendArrayAndCompare(self.fs.VOLTTYPE_TINYINT, self.byteArray)
self.sendArrayAndCompare(self.fs.VOLTTYPE_SMALLINT, self.int16Array)
self.sendArrayAndCompare(self.fs.VOLTTYPE_INTEGER, self.int32Array)
self.sendArrayAndCompare(self.fs.VOLTTYPE_BIGINT, self.int64Array)
self.sendArrayAndCompare(self.fs.VOLTTYPE_STRING, self.stringArray)
self.sendArrayAndCompare(self.fs.VOLTTYPE_TIMESTAMP, self.dateArray)
self.sendArrayAndCompare(self.fs.VOLTTYPE_DECIMAL, self.decimalArray)
self.fs.writeByte(self.ARRAY_END)
self.fs.prependLength()
self.fs.flush()
def testTable(self):
type = FastSerializer.VOLTTYPE_VOLTTABLE
table = VoltTable(self.fs)
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_TINYINT,
name = "id"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_BIGINT,
name = "bigint"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_STRING,
name = "name"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_VARBINARY,
name = "bin"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_TIMESTAMP,
name = "date"))
table.columns.append(VoltColumn(type = FastSerializer.VOLTTYPE_DECIMAL,
name = "money"))
table.tuples.append([self.byteArray[1], self.int64Array[2],
self.stringArray[0], self.binArray[0], self.dateArray[2],
self.decimalArray[0]])
table.tuples.append([self.byteArray[2], self.int64Array[1],
self.stringArray[2], self.binArray[1], self.dateArray[1],
self.decimalArray[1]])
#table.tuples.append([self.byteArray[0], self.int64Array[0],
# self.stringArray[1], self.binArray[1], self.dateArray[0],
# self.decimalArray[2]])
self.fs.writeByte(type)
table.writeToSerializer()
self.fs.prependLength()
self.fs.flush()
self.fs.bufferForRead()
self.assertEqual(self.fs.readByte(), type)
result = VoltTable(self.fs)
result.readFromSerializer()
self.assertEqual(result, table)
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit(-1)
lock = threading.Event()
echo = EchoServer(sys.argv[1], lock)
handler = lambda x, y: signalHandler(echo, x, y)
signal.signal(signal.SIGINT, handler)
echo.start()
lock.wait()
del sys.argv[1]
try:
unittest.main()
except SystemExit:
echo.shutdown()
echo.join()
raise
|
import imp
import sys
def create_modules(module_path):
path = ""
module = None
for element in module_path.split('.'):
path += element
try:
module = __import__(path)
except ImportError:
new = imp.new_module(path)
if module is not None:
setattr(module, element, new)
module = new
sys.modules[path] = module
__import__(path)
path += "."
return module
def stub(module_path, class_name, base_class, meta_class=type):
module = create_modules(module_path)
cls = meta_class(class_name, (base_class, ), {})
setattr(module, class_name, cls)
|
import argparse
import json
import re
import sys
parser = argparse.ArgumentParser(
description="processes prediction json and var and updates json with other "
"mutations to display in the prediction report, and includes a "
"new category of lineage mutations. Includes a set of drugs and"
"gene names"
)
parser.add_argument("var_file", help="var file")
parser.add_argument(
"lineage_snps_file",
help="file with snps that are lineage specific and not resistance causing from WALKER paper",
)
parser.add_argument(
"json_file",
help="file that contains R prediction and list of important and other mutations",
)
parser.add_argument(
"-o", default=None,
help="Output filename for the new json (overwrites if not specified)",
)
args = parser.parse_args()
name = args.var_file
drugs = set()
drug_mapping = {
"AMK": "AMIKACIN",
"CAP": "CAPREOMYCIN",
"CIP": "CIPROFLOXACIN",
"EMB": "ETHAMBUTOL",
"ETH": "ETHIONAMIDE",
"INH": "ISONIAZID",
"KAN": "KANAMYCIN",
"LEVO": "LEVOFLOXACIN",
"OFLX": "OFLOXACIN",
"PAS": "PARA-AMINOSALICYLIC_ACID",
"PZA": "PYRAZINAMIDE",
"RIF": "RIFAMPICIN",
"STR": "STREPTOMYCIN",
}
drugs_to_genes = {
"AMK": set(["rrs"]),
"CAP": set(["rrs", "tlyA"]),
"CIP": set(["gyrA", "gyrB"]),
"EMB": set(["embB", "embC", "embA", "Rv3806c", "promoter-embA-embB"]),
"ETH": set(["ethA", "promoter-fabG1-inhA", "inhA"]),
"INH": set(
[
"katG",
"promoter-fabG1-inhA",
"inhA",
"kasA",
"promoter-ahpC",
"promoter-embA-embB",
]
),
"KAN": set(["rrs", "rrl", "eis", "inter-eis-Rv2417c"]),
"LEVO": set(["gyrA", "gyrB"]),
"OFLX": set(["gyrA", "gyrB"]),
"PAS": set(["thyA", "inter-thyX-hsdS.1", "folC"]),
"PZA": set(["pncA", "promoter-pncA", "rpsA"]),
"RIF": set(["rpoB"]),
"STR": set(["rpsL", "gid", "rrs", "inter-rrs-rrl"]),
}
class Variant(object):
def __init__(self, gene_name, codon_location, AA_change,
name=None, test_name=None, drug=None):
self.gene_name = gene_name
self.codon_location = codon_location
self.AA_change = AA_change
self.name = name
self.drug = drug
# print(test_name)
def compare_variant(self, variant):
return (self.gene_name == variant.gene_name) and (
self.codon_location == variant.codon_location
) ##and (self.AA_change == variant.AA_change) ##will include the AA matching below for coding snps and Indel intergenic/promoters
def __str__(self):
if self.AA_change:
return_string = self.AA_change[0] + self.codon_location + self.AA_change[1]
else:
return_string = self.codon_location
return "{}_{}".format(self.gene_name, return_string)
"""
Example piece of data in json file, major discrepancy with current var are promoter and intergenic regions and indels:
SNP_CN_2155168_C944G_S315T_katG
SNP_P_1673425_C15T_promoter_fabG1.inhA
DEL_I_1476813_d86GGGAG_inter_rrl_rrf
for testing
imp['INH'] = ['SNP_CN_2155168_C944G_S315T_katG']
imp['EMB'] = ['SNP_CN_4247431_G918C_M306I_embB']
imp['PAS'] = ['SNP_CN_3073868_T604C_T202A_thyA','INS_P_3074519_i48G_promoter_thyA']
imp['STR'] = ['SNP_CZ_4407731_G472A_R158._gid','INS_I_1471827_i19AGA_inter_murA_rrs']
imp['ETH'] = ['DEL_CD_4326366_d1108TGTAGGCCATCG_370_ethA']
imp['PZA'] = ['SNP_P_2289253_A12C_promoter_pncA']
imp['RIF'] = ['INS_CI_761103_i1296TTC_433_rpoB']
imp['LEVO'] = ['SNP_I_7268_C34T_inter_gyrB_gyrA','SNP_P_5075_C48T_promoter_gyrB.gyrA','INS_P_5079_i44G_promoter_gyrB.gyrA', 'INS_CF_9373_i2071T_691_gyrA']
"""
with open(args.json_file, "r") as f:
datastore = json.load(f)
drs = [
"INH",
"RIF",
"PZA",
"EMB",
"STR",
"ETH",
"KAN",
"CAP",
"AMK",
"CIP",
"LEVO",
"OFLX",
"PAS",
]
imp = {}
oth = {}
j = 0
for d in drs:
imp[d] = []
oth[d] = []
for i in range(0, 5):
imp[d].append(datastore[1][datastore[1].keys()[0]][i][j])
oth[d].append(datastore[2][datastore[2].keys()[0]][i][j])
j = j + 1
imp_variants_identified = []
oth_variants_identified = []
"""
can use the code below to process all randomforest variants variant_name_list.csv if needed
"""
for d in drs:
for mut in imp[d]:
if mut:
#sys.stderr.write(mut)
#sys.stderr.write('\n')
type_change_info = mut.split("_")
if type_change_info[0] == "SNP":
if type_change_info[1] == "CN":
gene_name, codonAA = type_change_info[5], type_change_info[4]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "CZ":
gene_name, codonAA = (
type_change_info[5],
type_change_info[4].replace(".", "*"),
)
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "P":
gene_name, codonAA = type_change_info[5], type_change_info[3]
if "." in gene_name:
gene_name = "promoter-" + gene_name.replace(".", "-")
else:
gene_name = "promoter-" + gene_name
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "N":
gene_name, codonAA = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "I":
gene_name = (
"inter-" + type_change_info[5] + "-" + type_change_info[6]
)
codonAA = type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
if type_change_info[5] == "gyrB":
gene_name = "promoter-gyrB-gyrA"
elif type_change_info[0] in ["INS", "DEL"] and type_change_info[1] == "CF":
gene_name, codon_position = type_change_info[5], type_change_info[4]
codonAA = type_change_info[3]
m = re.search(r"[ACGT]+", codonAA)
if m:
indel_seq = m.group()
type_change = (
codonAA[0] + type_change_info[1][1] + indel_seq.replace("\n", "")
) # e.g dFG or iFGA
elif type_change_info[0] in ["INS", "DEL"] and type_change_info[1] in [
"CD",
"CI",
]:
gene_name, codon_position = type_change_info[5], type_change_info[4]
codonAA = type_change_info[3]
m = re.search(r"[ACGT]+", codonAA)
if m:
indel_seq = m.group()
type_change = (
codonAA[0] + type_change_info[1][1] + indel_seq.replace("\n", "")
) # e.g dIG or iDGA
elif type_change_info[0] in [
"INS",
"DEL",
]: # must be indel in promoter or intergenic region
if type_change_info[1] == "P":
gene_name, codonAA = type_change_info[5], type_change_info[3]
if "." in gene_name:
gene_name = "promoter-" + gene_name.replace(".", "-")
else:
gene_name = "promoter-" + gene_name
elif type_change_info[1] == "I":
gene_name = (
"inter-" + type_change_info[5] + "-" + type_change_info[6]
)
codonAA = type_change_info[3]
m = re.search(r"(\d+)([ACGT]+)", codonAA)
if m:
codon_position = m.group(1)
indel_seq = m.group(2)
type_change = codonAA[0] + indel_seq.replace("\n", "")
test = Variant(gene_name, codon_position, type_change)
imp_variants_identified.append(test)
drugs_to_genes[d].add(gene_name)
for d in drs:
for mut in oth[d]:
if mut:
#sys.stderr.write(mut)
#sys.stderr.write('\n')
type_change_info = mut.split("_")
if type_change_info[0] == "SNP":
if type_change_info[1] == "CN":
gene_name, codonAA = type_change_info[5], type_change_info[4]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "CZ":
gene_name, codonAA = (
type_change_info[5],
type_change_info[4].replace(".", "*"),
)
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "P":
gene_name, codonAA = type_change_info[5], type_change_info[3]
if "." in gene_name:
gene_name = "promoter-" + gene_name.replace(".", "-")
else:
gene_name = "promoter-" + gene_name
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "N":
gene_name, codonAA = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "I":
gene_name = (
"inter-" + type_change_info[5] + "-" + type_change_info[6]
)
codonAA = type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
if type_change_info[5] == "gyrB":
gene_name = "promoter-gyrB-gyrA"
elif type_change_info[0] in ["INS", "DEL"] and type_change_info[1] == "CF":
gene_name, codon_position = type_change_info[5], type_change_info[4]
codonAA = type_change_info[3]
m = re.search(r"[ACGT]+", codonAA)
if m:
indel_seq = m.group()
type_change = (
codonAA[0] + type_change_info[1][1] + indel_seq.replace("\n", "")
) # e.g dFG or iFGA
elif type_change_info[0] in ["INS", "DEL"] and type_change_info[1] in [
"CD",
"CI",
]:
gene_name, codon_position = type_change_info[5], type_change_info[4]
codonAA = type_change_info[3]
m = re.search(r"[ACGT]+", codonAA)
if m:
indel_seq = m.group()
type_change = (
codonAA[0] + type_change_info[1][1] + indel_seq.replace("\n", "")
) # e.g dIG or iDGA
elif type_change_info[0] in [
"INS",
"DEL",
]: # must be indel in promoter or intergenic region
if type_change_info[1] == "P":
gene_name, codonAA = type_change_info[5], type_change_info[3]
if "." in gene_name:
gene_name = "promoter-" + gene_name.replace(".", "-")
else:
gene_name = "promoter-" + gene_name
elif type_change_info[1] == "I":
gene_name = (
"inter-" + type_change_info[5] + "-" + type_change_info[6]
)
codonAA = type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
if type_change_info[5] == "gyrB":
gene_name = "promoter-gyrB-gyrA"
elif type_change_info[0] in ["INS", "DEL"] and type_change_info[1] == "CF":
gene_name, codon_position = type_change_info[5], type_change_info[4]
codonAA = type_change_info[3]
m = re.search(r"[ACGT]+", codonAA)
if m:
indel_seq = m.group()
type_change = (
codonAA[0] + type_change_info[1][1] + indel_seq.replace("\n", "")
) # e.g dFG or iFGA
elif type_change_info[0] in ["INS", "DEL"] and type_change_info[1] in [
"CD",
"CI",
]:
gene_name, codon_position = type_change_info[5], type_change_info[4]
codonAA = type_change_info[3]
m = re.search(r"[ACGT]+", codonAA)
if m:
indel_seq = m.group()
type_change = (
codonAA[0] + type_change_info[1][1] + indel_seq.replace("\n", "")
) # e.g dIG or iDGA
elif type_change_info[0] in [
"INS",
"DEL",
]: # must be indel in promoter or intergenic region
if type_change_info[1] == "P":
gene_name, codonAA = type_change_info[5], type_change_info[3]
if "." in gene_name:
gene_name = "promoter-" + gene_name.replace(".", "-")
else:
gene_name = "promoter-" + gene_name
elif type_change_info[1] == "I":
gene_name = (
"inter-" + type_change_info[5] + "-" + type_change_info[6]
)
codonAA = type_change_info[3]
m = re.search(r"(\d+)([ACGT]+)", codonAA)
if m:
codon_position = m.group(1)
indel_seq = m.group(2)
type_change = codonAA[0] + indel_seq.replace("\n", "")
test = Variant(gene_name, codon_position, type_change)
oth_variants_identified.append(test)
drugs_to_genes[d].add(gene_name)
"""
Now break down lineage specific snps, in same format as var, but genomic coords are placeholder
"""
lineage_snps = []
for line in open(args.lineage_snps_file, "r").readlines()[1:]:
line = line.replace("\n", "")
type_change_info = line.split("_")
# sys.stderr.write(line+"\n")
if type_change_info[1] == "CN": # not currently used or tested
gene_name, codonAA = type_change_info[5], type_change_info[4]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "P":
gene_name, codonAA = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "N":
gene_name, codonAA = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[1] == "I": # not currently used or tested
gene_name, codonAA = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif (
type_change_info[0] == "INS" and type_change_info == "F"
): # not currently used or tested
gene_name, codon_position = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1].replace("\n", ""),
)
elif (
type_change_info[0] == "DEL" and type_change_info[1] == "F"
): # not currently used or tested
gene_name, codon_position = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1].replace("\n", ""),
)
test = Variant(gene_name, codon_position, type_change)
lineage_snps.append(test)
"""
SNP_CN_7585_G284C_S95T_gyrA
SNP_P_781395_T165C_promoter-rpsL
SNP_N_1474571_G914A_rrl
INS_CF_2289050_i192A_65I_pncA
"""
all = []
with open(args.var_file, "r") as f:
for line in f:
# sys.stderr.write(line)
result = line.rstrip().split("\t")
all.append(result[5])
# sys.stderr.write(result[5]+"\n")
new_variants_identified = {}
lineage_variants_identified = {}
moth={}
for d in drs:
new_variants_identified[d] = []
lineage_variants_identified[d] = []
moth[d] = []
# sys.stderr.write(d+':\t')
for i in drugs_to_genes[d]:
# sys.stderr.write(i+'\t')
pattern = i
pattern = r"\s?([\w_\.]+%s[\w_\.]*)\s?" % pattern
muts = tuple(re.finditer(pattern, ",".join(all))) #, re.IGNORECASE)) #respect lowercase
if len(muts) >= 1:
#sys.stderr.write("with pattern "+i+" found "+str(len(muts))+" variant(s) for drug "+d+"\n")
for j in range(0, len(muts)):
var = muts[j].group()
#sys.stderr.write(str(var)+"\n")
type_change_info = var.split("_")
sys.stderr.write("variant is "+type_change_info[0]+"\n")
if type_change_info[1] != "CS":
if type_change_info[0] == "SNP" and type_change_info[1] in [
"CN",
"CZ",
]:
gene_name, codonAA = type_change_info[5], type_change_info[4]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[0] == "SNP" and type_change_info[1] == "P":
gene_name, codonAA = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[0] == "SNP" and type_change_info[1] == "N":
gene_name, codonAA = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[0] == "SNP" and type_change_info[1] == "I":
gene_name, codonAA = type_change_info[4], type_change_info[3]
type_change, codon_position = (
codonAA[0] + codonAA[len(codonAA) - 1],
codonAA[1 : len(codonAA) - 1],
)
elif type_change_info[0] in ["INS", "DEL"] and type_change_info[
1
] in ["CF", "CD", "CI"]:
gene_name, codon_position = (
type_change_info[5],
type_change_info[4],
)
if type_change_info[0] == "INS":
m = re.search(r"\d+", codon_position)
if m:
codon_position = m.group()
codonAA = type_change_info[3]
m = re.search(r"[ACGT]+", codonAA)
if m:
indel_seq = m.group()
type_change = (
codonAA[0]
+ type_change_info[1][1]
+ indel_seq.replace("\n", "")
) # e.g dFG or iFGA
elif type_change_info[0] in [
"INS",
"DEL",
]: # must be indel in promoter or intergenic region
gene_name, codonAA = type_change_info[4], type_change_info[3]
codonAA = type_change_info[3]
m = re.search(r"(\d+)([ACGT]+)", codonAA)
if m:
codon_position = m.group(1)
indel_seq = m.group(2)
type_change = codonAA[0] + indel_seq.replace("\n", "")
test = Variant(
gene_name, codon_position, type_change, name=str(var)
)
lineage_test = 0
oth_test=0
imp_test=0
for lin in lineage_snps:
if lin.compare_variant(test): #codon_location and gene_names are the same
if lin.AA_change == test.AA_change:
lineage_test = 1
for imp in imp_variants_identified:
if imp.compare_variant(test):
if imp.AA_change == test.AA_change:
imp_test = 1
elif imp.AA_change[0:2] in ["dF","iF"]:
imp_test = 1
for oth in oth_variants_identified:
if oth.compare_variant(test):
if oth.AA_change == test.AA_change:
oth_test = 1
elif oth.AA_change[0:2] in ["dF", "iF"]:
oth_test = 1
if oth_test == 1 and lineage_test == 1:
lineage_variants_identified[d].append(var)
if oth_test == 1 and lineage_test == 0:
#sys.stderr.write("other variant found and is "+ var+'\n')
moth[d].append(str(var))
if imp_test == 0 and oth_test == 0 and lineage_test == 0:
new_variants_identified[d].append(var)
results1 = {}
results2 = {}
other=datastore[2]
v1 = []
v2 = []
for d in drs:
v1.append(len(new_variants_identified[d]))
v2.append(len(lineage_variants_identified[d]))
l1 = max(v1)
l2 = max(v2)
if l1 == 0:
results1[0] = ["Null"] * len(drs)
else:
for i in range(0, l1):
results1[i] = []
results1[i] = ["Null"] * len(drs)
j = -1
# sys.stderr.write(d+' out\n')
for d in range(0, len(drs)):
j = j + 1
if v1[d] > i:
if len(new_variants_identified[drs[d]]) > 0:
# sys.stderr.write(d+' in!\n')
results1[i][j] = new_variants_identified[drs[d]][i]
if l2 == 0:
results2[0] = ["Null"] * len(drs)
else:
for i in range(0, l2):
results2[i] = []
results2[i] = ["Null"] * len(drs)
j = -1
# sys.stderr.write(d+' out\n')
for d in range(0, len(drs)):
j = j + 1
if v2[d] > i:
if len(lineage_variants_identified[drs[d]]) > 0:
# sys.stderr.write(d+' in!\n')
results2[i][j] = lineage_variants_identified[drs[d]][i]
for i in range(0, 5):
j = -1
for d in range(0, len(drs)):
j = j + 1
if len(moth[drs[d]][i:]) > 0:
other.items()[0][1][i][j] = moth[drs[d]][i]
else:
other.items()[0][1][i][j] = None
def append_results(in_name, out_name, *results):
"""Write the resulting structures to the output filename"""
with open(in_name, "r") as infile:
structure = json.loads(infile.read())
structure.pop()
structure.extend(results)
with open((out_name or in_name), "w") as outfile:
outfile.write(json.dumps(structure, indent=2))
if __name__ == '__main__':
append_results(args.json_file, args.json_file, other, results1, results2)
|
from datetime import date as real_date, datetime as real_datetime, timedelta
import dateutil.parser
import time
import re
try:
from dateutil import tz
except ImportError:
raise ImportError('Please install python-dateutil')
__all__ = ['local2utc', 'utc2local', 'LinearDateGuesser', 'date', 'datetime', 'new_date', 'new_datetime']
def local2utc(dateobj):
dateobj = dateobj.replace(tzinfo=tz.tzlocal())
dateobj = dateobj.astimezone(tz.tzutc())
return dateobj
def utc2local(dateobj):
dateobj = dateobj.replace(tzinfo=tz.tzutc())
dateobj = dateobj.astimezone(tz.tzlocal())
return dateobj
class date(real_date):
def strftime(self, fmt):
return strftime(self, fmt)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(self, fmt)
def combine(self, date, time):
return datetime(date.year, date.month, date.day, time.hour, time.minute, time.microsecond, time.tzinfo)
def date(self):
return date(self.year, self.month, self.day)
def new_date(d):
""" Generate a safe date from a datetime.date object """
return date(d.year, d.month, d.day)
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw)
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while True:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j+1
return sites
def strftime(dt, fmt):
if dt.year >= 1900:
return super(type(dt), dt).strftime(fmt)
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = _findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return s
class LinearDateGuesser(object):
"""
The aim of this class is to guess the exact date object from
a day and a month, but not a year.
It works with a start date (default is today), and all dates must be
sorted from recent to older.
"""
def __init__(self, current_date=None, date_max_bump=timedelta(7)):
self.date_max_bump = date_max_bump
if current_date is None:
current_date = date.today()
self.current_date = current_date
def try_assigning_year(self, day, month, start_year, max_year):
"""
Tries to create a date object with day, month and start_year and returns
it.
If it fails due to the year not matching the day+month combination
(i.e. due to a ValueError -- TypeError and OverflowError are not
handled), the previous or next years are tried until max_year is
reached.
In case initialization still fails with max_year, this function raises
a ValueError.
"""
while True:
try:
return date(start_year, month, day)
except ValueError as e:
if start_year == max_year:
raise e
start_year += cmp(max_year, start_year)
def set_current_date(self, current_date):
self.current_date = current_date
def guess_date(self, day, month, change_current_date=True):
""" Returns a date object built from a given day/month pair. """
today = self.current_date
# The website only provides dates using the 'DD/MM' string, so we have to
# determine the most possible year by ourselves. This implies tracking
# the current date.
# However, we may also encounter "bumps" in the dates, e.g. "12/11,
# 10/11, 10/11, 12/11, 09/11", so we have to be, well, quite tolerant,
# by accepting dates in the near future (say, 7 days) of the current
# date. (Please, kill me...)
# We first try to keep the current year
naively_parsed_date = self.try_assigning_year(day, month, today.year, today.year - 5)
if (naively_parsed_date.year != today.year):
# we most likely hit a 29/02 leading to a change of year
if change_current_date:
self.set_current_date(naively_parsed_date)
return naively_parsed_date
if (naively_parsed_date > today + self.date_max_bump):
# if the date ends up too far in the future, consider it actually
# belongs to the previous year
parsed_date = date(today.year - 1, month, day)
if change_current_date:
self.set_current_date(parsed_date)
elif (naively_parsed_date > today and naively_parsed_date <= today + self.date_max_bump):
# if the date is in the near future, consider it is a bump
parsed_date = naively_parsed_date
# do not keep it as current date though
else:
# if the date is in the past, as expected, simply keep it
parsed_date = naively_parsed_date
# and make it the new current date
if change_current_date:
self.set_current_date(parsed_date)
return parsed_date
class ChaoticDateGuesser(LinearDateGuesser):
"""
This class aim to find the guess the date when you know the
day and month and the minimum year
"""
def __init__(self, min_date, current_date=None, date_max_bump=timedelta(7)):
if min_date is None:
raise ValueError("min_date is not set")
self.min_date = min_date
super(ChaoticDateGuesser, self).__init__(current_date, date_max_bump)
def guess_date(self, day, month):
"""Returns a possible date between min_date and current_date"""
parsed_date = super(ChaoticDateGuesser, self).guess_date(day, month, False)
if parsed_date >= self.min_date:
return parsed_date
else:
raise ValueError("%s is inferior to min_date %s" % (parsed_date, self.min_date))
DATE_TRANSLATE_FR = [(re.compile(u'janvier', re.I), u'january'),
(re.compile(u'février', re.I), u'february'),
(re.compile(u'mars', re.I), u'march'),
(re.compile(u'avril', re.I), u'april'),
(re.compile(u'mai', re.I), u'may'),
(re.compile(u'juin', re.I), u'june'),
(re.compile(u'juillet', re.I), u'july'),
(re.compile(u'août', re.I), u'august'),
(re.compile(u'septembre', re.I), u'september'),
(re.compile(u'octobre', re.I), u'october'),
(re.compile(u'novembre', re.I), u'november'),
(re.compile(u'décembre', re.I), u'december'),
(re.compile(u'lundi', re.I), u'monday'),
(re.compile(u'mardi', re.I), u'tuesday'),
(re.compile(u'mercredi', re.I), u'wednesday'),
(re.compile(u'jeudi', re.I), u'thursday'),
(re.compile(u'vendredi', re.I), u'friday'),
(re.compile(u'samedi', re.I), u'saturday'),
(re.compile(u'dimanche', re.I), u'sunday')]
def parse_french_date(date):
for fr, en in DATE_TRANSLATE_FR:
date = fr.sub(en, date)
return dateutil.parser.parse(date)
WEEK = {'MONDAY': 0,
'TUESDAY': 1,
'WEDNESDAY': 2,
'THURSDAY': 3,
'FRIDAY': 4,
'SATURDAY': 5,
'SUNDAY': 6,
'LUNDI': 0,
'MARDI': 1,
'MERCREDI': 2,
'JEUDI': 3,
'VENDREDI': 4,
'SAMEDI': 5,
'DIMANCHE': 6,
}
def get_date_from_day(day):
today = date.today()
today_day_number = today.weekday()
requested_day_number = WEEK[day.upper()]
if today_day_number < requested_day_number:
day_to_go = requested_day_number - today_day_number
else:
day_to_go = 7 - today_day_number + requested_day_number
requested_date = today + timedelta(day_to_go)
return date(requested_date.year, requested_date.month, requested_date.day)
def parse_date(string):
matches = re.search('\s*([012]?[0-9]|3[01])\s*/\s*(0?[1-9]|1[012])\s*/?(\d{2}|\d{4})?$', string)
if matches:
year = matches.group(3)
if not year:
year = date.today().year
elif len(year) == 2:
year = 2000 + int(year)
return date(int(year), int(matches.group(2)), int(matches.group(1)))
elif string.upper() in WEEK.keys():
return get_date_from_day(string)
elif string.upper() == "TODAY":
return date.today()
|
import setuptools
setuptools.setup()
|
class Libksba(AutotoolsPackage):
"""Libksba is a library to make the tasks of working with X.509
certificates, CMS data and related objects easier.
"""
homepage = "https://gnupg.org/software/libksba/index.html"
url = "https://gnupg.org/ftp/gcrypt/libksba/libksba-1.3.5.tar.bz2"
maintainers = ['alalazo']
version('1.6.0', sha256='dad683e6f2d915d880aa4bed5cea9a115690b8935b78a1bbe01669189307a48b')
version('1.5.1', sha256='b0f4c65e4e447d9a2349f6b8c0e77a28be9531e4548ba02c545d1f46dc7bf921')
version('1.5.0', sha256='ae4af129216b2d7fdea0b5bf2a788cd458a79c983bb09a43f4d525cc87aba0ba')
version('1.4.0', sha256='bfe6a8e91ff0f54d8a329514db406667000cb207238eded49b599761bfca41b6')
version('1.3.5', sha256='41444fd7a6ff73a79ad9728f985e71c9ba8cd3e5e53358e70d5f066d35c1a340')
depends_on('libgpg-error@1.8:')
conflicts('%apple-clang@12:', when='@:1.3')
def configure_args(self):
return [
'--enable-static',
'--enable-shared',
'--with-libgpg-error-prefix=' + self.spec['libgpg-error'].prefix
]
|
from itertools import count
import logging
import inspect
import copy
import re
from .settings import config
from .errors import DataJointError
from .fetch import Fetch, Fetch1
from .preview import preview, repr_html
from .condition import AndList, Not, \
make_condition, assert_join_compatibility, extract_column_names, PromiscuousOperand
from .declare import CONSTANT_LITERALS
logger = logging.getLogger(__name__)
class QueryExpression:
"""
QueryExpression implements query operators to derive new entity set from its input.
A QueryExpression object generates a SELECT statement in SQL.
QueryExpression operators are restrict, join, proj, aggr, and union.
A QueryExpression object has a support, a restriction (an AndList), and heading.
Property `heading` (type dj.Heading) contains information about the attributes.
It is loaded from the database and updated by proj.
Property `support` is the list of table names or other QueryExpressions to be joined.
The restriction is applied first without having access to the attributes generated by the projection.
Then projection is applied by selecting modifying the heading attribute.
Application of operators does not always lead to the creation of a subquery.
A subquery is generated when:
1. A restriction is applied on any computed or renamed attributes
2. A projection is applied remapping remapped attributes
3. Subclasses: Join, Aggregation, and Union have additional specific rules.
"""
_restriction = None
_restriction_attributes = None
_left = [] # list of booleans True for left joins, False for inner joins
_original_heading = None # heading before projections
# subclasses or instantiators must provide values
_connection = None
_heading = None
_support = None
# If the query will be using distinct
_distinct = False
@property
def connection(self):
""" a dj.Connection object """
assert self._connection is not None
return self._connection
@property
def support(self):
""" A list of table names or subqueries to from the FROM clause """
assert self._support is not None
return self._support
@property
def heading(self):
""" a dj.Heading object, reflects the effects of the projection operator .proj """
return self._heading
@property
def original_heading(self):
""" a dj.Heading object reflecting the attributes before projection """
return self._original_heading or self.heading
@property
def restriction(self):
""" a AndList object of restrictions applied to input to produce the result """
if self._restriction is None:
self._restriction = AndList()
return self._restriction
@property
def restriction_attributes(self):
""" the set of attribute names invoked in the WHERE clause """
if self._restriction_attributes is None:
self._restriction_attributes = set()
return self._restriction_attributes
@property
def primary_key(self):
return self.heading.primary_key
_subquery_alias_count = count() # count for alias names used in the FROM clause
def from_clause(self):
support = ('(' + src.make_sql() + ') as `$%x`' % next(
self._subquery_alias_count) if isinstance(src, QueryExpression)
else src for src in self.support)
clause = next(support)
for s, left in zip(support, self._left):
clause += ' NATURAL{left} JOIN {clause}'.format(
left=" LEFT" if left else "",
clause=s)
return clause
def where_clause(self):
return '' if not self.restriction else ' WHERE (%s)' % ')AND('.join(
str(s) for s in self.restriction)
def make_sql(self, fields=None):
"""
Make the SQL SELECT statement.
:param fields: used to explicitly set the select attributes
"""
return 'SELECT {distinct}{fields} FROM {from_}{where}'.format(
distinct="DISTINCT " if self._distinct else "",
fields=self.heading.as_sql(fields or self.heading.names),
from_=self.from_clause(), where=self.where_clause())
# --------- query operators -----------
def make_subquery(self):
""" create a new SELECT statement where self is the FROM clause """
result = QueryExpression()
result._connection = self.connection
result._support = [self]
result._heading = self.heading.make_subquery_heading()
return result
def restrict(self, restriction):
"""
Produces a new expression with the new restriction applied.
rel.restrict(restriction) is equivalent to rel & restriction.
rel.restrict(Not(restriction)) is equivalent to rel - restriction
The primary key of the result is unaffected.
Successive restrictions are combined as logical AND: r & a & b is equivalent to r & AndList((a, b))
Any QueryExpression, collection, or sequence other than an AndList are treated as OrLists
(logical disjunction of conditions)
Inverse restriction is accomplished by either using the subtraction operator or the Not class.
The expressions in each row equivalent:
rel & True rel
rel & False the empty entity set
rel & 'TRUE' rel
rel & 'FALSE' the empty entity set
rel - cond rel & Not(cond)
rel - 'TRUE' rel & False
rel - 'FALSE' rel
rel & AndList((cond1,cond2)) rel & cond1 & cond2
rel & AndList() rel
rel & [cond1, cond2] rel & OrList((cond1, cond2))
rel & [] rel & False
rel & None rel & False
rel & any_empty_entity_set rel & False
rel - AndList((cond1,cond2)) rel & [Not(cond1), Not(cond2)]
rel - [cond1, cond2] rel & Not(cond1) & Not(cond2)
rel - AndList() rel & False
rel - [] rel
rel - None rel
rel - any_empty_entity_set rel
When arg is another QueryExpression, the restriction rel & arg restricts rel to elements that match at least
one element in arg (hence arg is treated as an OrList).
Conversely, rel - arg restricts rel to elements that do not match any elements in arg.
Two elements match when their common attributes have equal values or when they have no common attributes.
All shared attributes must be in the primary key of either rel or arg or both or an error will be raised.
QueryExpression.restrict is the only access point that modifies restrictions. All other operators must
ultimately call restrict()
:param restriction: a sequence or an array (treated as OR list), another QueryExpression, an SQL condition
string, or an AndList.
"""
attributes = set()
new_condition = make_condition(self, restriction, attributes)
if new_condition is True:
return self # restriction has no effect, return the same object
# check that all attributes in condition are present in the query
try:
raise DataJointError("Attribute `%s` is not found in query." % next(
attr for attr in attributes if attr not in self.heading.names))
except StopIteration:
pass # all ok
# If the new condition uses any new attributes, a subquery is required.
# However, Aggregation's HAVING statement works fine with aliased attributes.
need_subquery = isinstance(self, Union) or (
not isinstance(self, Aggregation) and self.heading.new_attributes)
if need_subquery:
result = self.make_subquery()
else:
result = copy.copy(self)
result._restriction = AndList(self.restriction) # copy to preserve the original
result.restriction.append(new_condition)
result.restriction_attributes.update(attributes)
return result
def restrict_in_place(self, restriction):
self.__dict__.update(self.restrict(restriction).__dict__)
def __and__(self, restriction):
"""
Restriction operator e.g. ``q1 & q2``.
:return: a restricted copy of the input argument
See QueryExpression.restrict for more detail.
"""
return self.restrict(restriction)
def __xor__(self, restriction):
"""
Permissive restriction operator ignoring compatibility check e.g. ``q1 ^ q2``.
"""
if inspect.isclass(restriction) and issubclass(restriction, QueryExpression):
restriction = restriction()
if isinstance(restriction, Not):
return self.restrict(Not(PromiscuousOperand(restriction.restriction)))
return self.restrict(PromiscuousOperand(restriction))
def __sub__(self, restriction):
"""
Inverted restriction e.g. ``q1 - q2``.
:return: a restricted copy of the input argument
See QueryExpression.restrict for more detail.
"""
return self.restrict(Not(restriction))
def __neg__(self):
"""
Convert between restriction and inverted restriction e.g. ``-q1``.
:return: target restriction
See QueryExpression.restrict for more detail.
"""
if isinstance(self, Not):
return self.restriction
return Not(self)
def __mul__(self, other):
"""
join of query expressions `self` and `other` e.g. ``q1 * q2``.
"""
return self.join(other)
def __matmul__(self, other):
"""
Permissive join of query expressions `self` and `other` ignoring compatibility check
e.g. ``q1 @ q2``.
"""
if inspect.isclass(other) and issubclass(other, QueryExpression):
other = other() # instantiate
return self.join(other, semantic_check=False)
def join(self, other, semantic_check=True, left=False):
"""
create the joined QueryExpression.
a * b is short for A.join(B)
a @ b is short for A.join(B, semantic_check=False)
Additionally, left=True will retain the rows of self, effectively performing a left join.
"""
# trigger subqueries if joining on renamed attributes
if isinstance(other, U):
return other * self
if inspect.isclass(other) and issubclass(other, QueryExpression):
other = other() # instantiate
if not isinstance(other, QueryExpression):
raise DataJointError("The argument of join must be a QueryExpression")
if semantic_check:
assert_join_compatibility(self, other)
join_attributes = set(n for n in self.heading.names if n in other.heading.names)
# needs subquery if self's FROM clause has common attributes with other's FROM clause
need_subquery1 = need_subquery2 = bool(
(set(self.original_heading.names) & set(other.original_heading.names))
- join_attributes)
# need subquery if any of the join attributes are derived
need_subquery1 = (need_subquery1 or isinstance(self, Aggregation) or
any(n in self.heading.new_attributes for n in join_attributes)
or isinstance(self, Union))
need_subquery2 = (need_subquery2 or isinstance(other, Aggregation) or
any(n in other.heading.new_attributes for n in join_attributes)
or isinstance(self, Union))
if need_subquery1:
self = self.make_subquery()
if need_subquery2:
other = other.make_subquery()
result = QueryExpression()
result._connection = self.connection
result._support = self.support + other.support
result._left = self._left + [left] + other._left
result._heading = self.heading.join(other.heading)
result._restriction = AndList(self.restriction)
result._restriction.append(other.restriction)
result._original_heading = self.original_heading.join(other.original_heading)
assert len(result.support) == len(result._left) + 1
return result
def __add__(self, other):
"""union e.g. ``q1 + q2``."""
return Union.create(self, other)
def proj(self, *attributes, **named_attributes):
"""
Projection operator.
:param attributes: attributes to be included in the result. (The primary key is already included).
:param named_attributes: new attributes computed or renamed from existing attributes.
:return: the projected expression.
Primary key attributes cannot be excluded but may be renamed.
If the attribute list contains an Ellipsis ..., then all secondary attributes are included too
Prefixing an attribute name with a dash '-attr' removes the attribute from the list if present.
Keyword arguments can be used to rename attributes as in name='attr', duplicate them as in name='(attr)', or
self.proj(...) or self.proj(Ellipsis) -- include all attributes (return self)
self.proj() -- include only primary key
self.proj('attr1', 'attr2') -- include primary key and attributes attr1 and attr2
self.proj(..., '-attr1', '-attr2') -- include all attributes except attr1 and attr2
self.proj(name1='attr1') -- include primary key and 'attr1' renamed as name1
self.proj('attr1', dup='(attr1)') -- include primary key and attribute attr1 twice, with the duplicate 'dup'
self.proj(k='abs(attr1)') adds the new attribute k with the value computed as an expression (SQL syntax)
from other attributes available before the projection.
Each attribute name can only be used once.
"""
# new attributes in parentheses are included again with the new name without removing original
duplication_pattern = re.compile(fr'^\s*\(\s*(?!{"|".join(CONSTANT_LITERALS)})(?P<name>[a-zA-Z_]\w*)\s*\)\s*$')
# attributes without parentheses renamed
rename_pattern = re.compile(fr'^\s*(?!{"|".join(CONSTANT_LITERALS)})(?P<name>[a-zA-Z_]\w*)\s*$')
replicate_map = {k: m.group('name')
for k, m in ((k, duplication_pattern.match(v)) for k, v in named_attributes.items()) if m}
rename_map = {k: m.group('name')
for k, m in ((k, rename_pattern.match(v)) for k, v in named_attributes.items()) if m}
compute_map = {k: v for k, v in named_attributes.items()
if not duplication_pattern.match(v) and not rename_pattern.match(v)}
attributes = set(attributes)
# include primary key
attributes.update((k for k in self.primary_key if k not in rename_map.values()))
# include all secondary attributes with Ellipsis
if Ellipsis in attributes:
attributes.discard(Ellipsis)
attributes.update((a for a in self.heading.secondary_attributes
if a not in attributes and a not in rename_map.values()))
try:
raise DataJointError("%s is not a valid data type for an attribute in .proj" % next(
a for a in attributes if not isinstance(a, str)))
except StopIteration:
pass # normal case
# remove excluded attributes, specified as `-attr'
excluded = set(a for a in attributes if a.strip().startswith('-'))
attributes.difference_update(excluded)
excluded = set(a.lstrip('-').strip() for a in excluded)
attributes.difference_update(excluded)
try:
raise DataJointError("Cannot exclude primary key attribute %s", next(
a for a in excluded if a in self.primary_key))
except StopIteration:
pass # all ok
# check that all attributes exist in heading
try:
raise DataJointError(
'Attribute `%s` not found.' % next(a for a in attributes if a not in self.heading.names))
except StopIteration:
pass # all ok
# check that all mentioned names are present in heading
mentions = attributes.union(replicate_map.values()).union(rename_map.values())
try:
raise DataJointError("Attribute '%s' not found." % next(a for a in mentions if not self.heading.names))
except StopIteration:
pass # all ok
# check that newly created attributes do not clash with any other selected attributes
try:
raise DataJointError("Attribute `%s` already exists" % next(
a for a in rename_map if a in attributes.union(compute_map).union(replicate_map)))
except StopIteration:
pass # all ok
try:
raise DataJointError("Attribute `%s` already exists" % next(
a for a in compute_map if a in attributes.union(rename_map).union(replicate_map)))
except StopIteration:
pass # all ok
try:
raise DataJointError("Attribute `%s` already exists" % next(
a for a in replicate_map if a in attributes.union(rename_map).union(compute_map)))
except StopIteration:
pass # all ok
# need a subquery if the projection remaps any remapped attributes
used = set(q for v in compute_map.values() for q in extract_column_names(v))
used.update(rename_map.values())
used.update(replicate_map.values())
used.intersection_update(self.heading.names)
need_subquery = isinstance(self, Union) or any(
self.heading[name].attribute_expression is not None for name in used)
if not need_subquery and self.restriction:
# need a subquery if the restriction applies to attributes that have been renamed
need_subquery = any(name in self.restriction_attributes for name in self.heading.new_attributes)
result = self.make_subquery() if need_subquery else copy.copy(self)
result._original_heading = result.original_heading
result._heading = result.heading.select(
attributes, rename_map=dict(**rename_map, **replicate_map), compute_map=compute_map)
return result
def aggr(self, group, *attributes, keep_all_rows=False, **named_attributes):
"""
Aggregation of the type U('attr1','attr2').aggr(group, computation="QueryExpression")
has the primary key ('attr1','attr2') and performs aggregation computations for all matching elements of `group`.
:param group: The query expression to be aggregated.
:param keep_all_rows: True=keep all the rows from self. False=keep only rows that match entries in group.
:param named_attributes: computations of the form new_attribute="sql expression on attributes of group"
:return: The derived query expression
"""
if Ellipsis in attributes:
# expand ellipsis to include only attributes from the left table
attributes = set(attributes)
attributes.discard(Ellipsis)
attributes.update(self.heading.secondary_attributes)
return Aggregation.create(
self, group=group, keep_all_rows=keep_all_rows).proj(*attributes, **named_attributes)
aggregate = aggr # alias for aggr
# ---------- Fetch operators --------------------
@property
def fetch1(self):
return Fetch1(self)
@property
def fetch(self):
return Fetch(self)
def head(self, limit=25, **fetch_kwargs):
"""
shortcut to fetch the first few entries from query expression.
Equivalent to fetch(order_by="KEY", limit=25)
:param limit: number of entries
:param fetch_kwargs: kwargs for fetch
:return: query result
"""
return self.fetch(order_by="KEY", limit=limit, **fetch_kwargs)
def tail(self, limit=25, **fetch_kwargs):
"""
shortcut to fetch the last few entries from query expression.
Equivalent to fetch(order_by="KEY DESC", limit=25)[::-1]
:param limit: number of entries
:param fetch_kwargs: kwargs for fetch
:return: query result
"""
return self.fetch(order_by="KEY DESC", limit=limit, **fetch_kwargs)[::-1]
def __len__(self):
""":return: number of elements in the result set e.g. ``len(q1)``."""
return self.connection.query(
'SELECT {select_} FROM {from_}{where}'.format(
select_=('count(*)' if any(self._left)
else 'count(DISTINCT {fields})'.format(fields=self.heading.as_sql(
self.primary_key, include_aliases=False))),
from_=self.from_clause(),
where=self.where_clause())).fetchone()[0]
def __bool__(self):
"""
:return: True if the result is not empty. Equivalent to len(self) > 0 but often
faster e.g. ``bool(q1)``.
"""
return bool(self.connection.query(
'SELECT EXISTS(SELECT 1 FROM {from_}{where})'.format(
from_=self.from_clause(),
where=self.where_clause())).fetchone()[0])
def __contains__(self, item):
"""
returns True if the restriction in item matches any entries in self
e.g. ``restriction in q1``.
:param item: any restriction
(item in query_expression) is equivalent to bool(query_expression & item) but may be
executed more efficiently.
"""
return bool(self & item) # May be optimized e.g. using an EXISTS query
def __iter__(self):
"""
returns an iterator-compatible QueryExpression object e.g. ``iter(q1)``.
:param self: iterator-compatible QueryExpression object
"""
self._iter_only_key = all(v.in_key for v in self.heading.attributes.values())
self._iter_keys = self.fetch('KEY')
return self
def __next__(self):
"""
returns the next record on an iterator-compatible QueryExpression object
e.g. ``next(q1)``.
:param self: A query expression
:type self: :class:`QueryExpression`
:rtype: dict
"""
try:
key = self._iter_keys.pop(0)
except AttributeError:
# self._iter_keys is missing because __iter__ has not been called.
raise TypeError("A QueryExpression object is not an iterator. "
"Use iter(obj) to create an iterator.")
except IndexError:
raise StopIteration
else:
if self._iter_only_key:
return key
else:
try:
return (self & key).fetch1()
except DataJointError:
# The data may have been deleted since the moment the keys were fetched
# -- move on to next entry.
return next(self)
def cursor(self, offset=0, limit=None, order_by=None, as_dict=False):
"""
See expression.fetch() for input description.
:return: query cursor
"""
if offset and limit is None:
raise DataJointError('limit is required when offset is set')
sql = self.make_sql()
if order_by is not None:
sql += ' ORDER BY ' + ', '.join(order_by)
if limit is not None:
sql += ' LIMIT %d' % limit + (' OFFSET %d' % offset if offset else "")
logger.debug(sql)
return self.connection.query(sql, as_dict=as_dict)
def __repr__(self):
"""
returns the string representation of a QueryExpression object e.g. ``str(q1)``.
:param self: A query expression
:type self: :class:`QueryExpression`
:rtype: str
"""
return super().__repr__() if config['loglevel'].lower() == 'debug' else self.preview()
def preview(self, limit=None, width=None):
""" :return: a string of preview of the contents of the query. """
return preview(self, limit, width)
def _repr_html_(self):
""" :return: HTML to display table in Jupyter notebook. """
return repr_html(self)
class Aggregation(QueryExpression):
"""
Aggregation.create(arg, group, comp1='calc1', ..., compn='calcn') yields an entity set
with primary key from arg.
The computed arguments comp1, ..., compn use aggregation calculations on the attributes of
group or simple projections and calculations on the attributes of arg.
Aggregation is used QueryExpression.aggr and U.aggr.
Aggregation is a private class in DataJoint, not exposed to users.
"""
_left_restrict = None # the pre-GROUP BY conditions for the WHERE clause
_subquery_alias_count = count()
@classmethod
def create(cls, arg, group, keep_all_rows=False):
if inspect.isclass(group) and issubclass(group, QueryExpression):
group = group() # instantiate if a class
assert isinstance(group, QueryExpression)
if keep_all_rows and len(group.support) > 1 or group.heading.new_attributes:
group = group.make_subquery() # subquery if left joining a join
join = arg.join(group, left=keep_all_rows) # reuse the join logic
result = cls()
result._connection = join.connection
result._heading = join.heading.set_primary_key(arg.primary_key) # use left operand's primary key
result._support = join.support
result._left = join._left
result._left_restrict = join.restriction # WHERE clause applied before GROUP BY
result._grouping_attributes = result.primary_key
return result
def where_clause(self):
return '' if not self._left_restrict else ' WHERE (%s)' % ')AND('.join(
str(s) for s in self._left_restrict)
def make_sql(self, fields=None):
fields = self.heading.as_sql(fields or self.heading.names)
assert self._grouping_attributes or not self.restriction
distinct = set(self.heading.names) == set(self.primary_key)
return 'SELECT {distinct}{fields} FROM {from_}{where}{group_by}'.format(
distinct="DISTINCT " if distinct else "",
fields=fields,
from_=self.from_clause(),
where=self.where_clause(),
group_by="" if not self.primary_key else (
" GROUP BY `%s`" % '`,`'.join(self._grouping_attributes) +
("" if not self.restriction else ' HAVING (%s)' % ')AND('.join(self.restriction))))
def __len__(self):
return self.connection.query(
'SELECT count(1) FROM ({subquery}) `${alias:x}`'.format(
subquery=self.make_sql(),
alias=next(self._subquery_alias_count))).fetchone()[0]
def __bool__(self):
return bool(self.connection.query(
'SELECT EXISTS({sql})'.format(sql=self.make_sql())))
class Union(QueryExpression):
"""
Union is the private DataJoint class that implements the union operator.
"""
__count = count()
@classmethod
def create(cls, arg1, arg2):
if inspect.isclass(arg2) and issubclass(arg2, QueryExpression):
arg2 = arg2() # instantiate if a class
if not isinstance(arg2, QueryExpression):
raise DataJointError(
"A QueryExpression can only be unioned with another QueryExpression")
if arg1.connection != arg2.connection:
raise DataJointError(
"Cannot operate on QueryExpressions originating from different connections.")
if set(arg1.primary_key) != set(arg2.primary_key):
raise DataJointError("The operands of a union must share the same primary key.")
if set(arg1.heading.secondary_attributes) & set(arg2.heading.secondary_attributes):
raise DataJointError(
"The operands of a union must not share any secondary attributes.")
result = cls()
result._connection = arg1.connection
result._heading = arg1.heading.join(arg2.heading)
result._support = [arg1, arg2]
return result
def make_sql(self):
arg1, arg2 = self._support
if not arg1.heading.secondary_attributes and not arg2.heading.secondary_attributes:
# no secondary attributes: use UNION DISTINCT
fields = arg1.primary_key
return ("SELECT * FROM (({sql1}) UNION ({sql2})) as `_u{alias}`".format(
sql1=arg1.make_sql() if isinstance(arg1, Union) else arg1.make_sql(fields),
sql2=arg2.make_sql() if isinstance(arg2, Union) else arg2.make_sql(fields),
alias=next(self.__count)
))
# with secondary attributes, use union of left join with antijoin
fields = self.heading.names
sql1 = arg1.join(arg2, left=True).make_sql(fields)
sql2 = (arg2 - arg1).proj(
..., **{k: 'NULL' for k in arg1.heading.secondary_attributes}).make_sql(fields)
return "({sql1}) UNION ({sql2})".format(sql1=sql1, sql2=sql2)
def from_clause(self):
""" The union does not use a FROM clause """
assert False
def where_clause(self):
""" The union does not use a WHERE clause """
assert False
def __len__(self):
return self.connection.query(
'SELECT count(1) FROM ({subquery}) `${alias:x}`'.format(
subquery=self.make_sql(),
alias=next(QueryExpression._subquery_alias_count))).fetchone()[0]
def __bool__(self):
return bool(self.connection.query(
'SELECT EXISTS({sql})'.format(sql=self.make_sql())))
class U:
"""
dj.U objects are the universal sets representing all possible values of their attributes.
dj.U objects cannot be queried on their own but are useful for forming some queries.
dj.U('attr1', ..., 'attrn') represents the universal set with the primary key attributes attr1 ... attrn.
The universal set is the set of all possible combinations of values of the attributes.
Without any attributes, dj.U() represents the set with one element that has no attributes.
Restriction:
dj.U can be used to enumerate unique combinations of values of attributes from other expressions.
The following expression yields all unique combinations of contrast and brightness found in the `stimulus` set:
>>> dj.U('contrast', 'brightness') & stimulus
Aggregation:
In aggregation, dj.U is used for summary calculation over an entire set:
The following expression yields one element with one attribute `s` containing the total number of elements in
query expression `expr`:
>>> dj.U().aggr(expr, n='count(*)')
The following expressions both yield one element containing the number `n` of distinct values of attribute `attr` in
query expressio `expr`.
>>> dj.U().aggr(expr, n='count(distinct attr)')
>>> dj.U().aggr(dj.U('attr').aggr(expr), 'n=count(*)')
The following expression yields one element and one attribute `s` containing the sum of values of attribute `attr`
over entire result set of expression `expr`:
>>> dj.U().aggr(expr, s='sum(attr)')
The following expression yields the set of all unique combinations of attributes `attr1`, `attr2` and the number of
their occurrences in the result set of query expression `expr`.
>>> dj.U(attr1,attr2).aggr(expr, n='count(*)')
Joins:
If expression `expr` has attributes 'attr1' and 'attr2', then expr * dj.U('attr1','attr2') yields the same result
as `expr` but `attr1` and `attr2` are promoted to the the primary key. This is useful for producing a join on
non-primary key attributes.
For example, if `attr` is in both expr1 and expr2 but not in their primary keys, then expr1 * expr2 will throw
an error because in most cases, it does not make sense to join on non-primary key attributes and users must first
rename `attr` in one of the operands. The expression dj.U('attr') * rel1 * rel2 overrides this constraint.
"""
def __init__(self, *primary_key):
self._primary_key = primary_key
@property
def primary_key(self):
return self._primary_key
def __and__(self, other):
if inspect.isclass(other) and issubclass(other, QueryExpression):
other = other() # instantiate if a class
if not isinstance(other, QueryExpression):
raise DataJointError('Set U can only be restricted with a QueryExpression.')
result = copy.copy(other)
result._distinct = True
result._heading = result.heading.set_primary_key(self.primary_key)
result = result.proj()
return result
def join(self, other, left=False):
"""
Joining U with a query expression has the effect of promoting the attributes of U to
the primary key of the other query expression.
:param other: the other query expression to join with.
:param left: ignored. dj.U always acts as if left=False
:return: a copy of the other query expression with the primary key extended.
"""
if inspect.isclass(other) and issubclass(other, QueryExpression):
other = other() # instantiate if a class
if not isinstance(other, QueryExpression):
raise DataJointError('Set U can only be joined with a QueryExpression.')
try:
raise DataJointError(
'Attribute `%s` not found' % next(k for k in self.primary_key
if k not in other.heading.names))
except StopIteration:
pass # all ok
result = copy.copy(other)
result._heading = result.heading.set_primary_key(
other.primary_key + [k for k in self.primary_key
if k not in other.primary_key])
return result
def __mul__(self, other):
""" shorthand for join """
return self.join(other)
def aggr(self, group, **named_attributes):
"""
Aggregation of the type U('attr1','attr2').aggr(group, computation="QueryExpression")
has the primary key ('attr1','attr2') and performs aggregation computations for all matching elements of `group`.
:param group: The query expression to be aggregated.
:param named_attributes: computations of the form new_attribute="sql expression on attributes of group"
:return: The derived query expression
"""
if named_attributes.get('keep_all_rows', False):
raise DataJointError(
'Cannot set keep_all_rows=True when aggregating on a universal set.')
return Aggregation.create(self, group=group, keep_all_rows=False).proj(**named_attributes)
aggregate = aggr # alias for aggr
|
"""SCons.Tool.lex
Tool-specific initialization for lex.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Tool/lex.py 0.96.1.D001 2004/08/23 09:55:29 knight"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for lex to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
c_file.add_action('.l', SCons.Defaults.LexAction)
cxx_file.add_action('.ll', SCons.Defaults.LexAction)
env['LEX'] = env.Detect('flex') or 'lex'
env['LEXFLAGS'] = SCons.Util.CLVar('')
env['LEXCOM'] = '$LEX $LEXFLAGS -t $SOURCES > $TARGET'
def exists(env):
return env.Detect(['flex', 'lex'])
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('gronda_juggernaut')
mobileTemplate.setLevel(50)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(500)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setHideAmount(400)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(375)
mobileTemplate.setSocialGroup("gronda")
mobileTemplate.setAssistRange(10)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_gronda.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_3')
attacks.add('bm_charge_3')
attacks.add('bm_dampen_pain_3')
attacks.add('bm_shaken_1')
attacks.add('bm_stomp_3')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('gronda_juggernaut', mobileTemplate)
return
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('insane_kitonak')
mobileTemplate.setLevel(1)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("insane kitonak")
mobileTemplate.setAssistRange(4)
mobileTemplate.setStalker(False)
templates = Vector()
templates.add('object/mobile/shared_kitonak_male.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_cdef.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('insane_kitonak', mobileTemplate)
return
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from opendai_bcn_web.models import Pollution
from opendai_bcn_web import views
from opendai_client.geocoding_logic import GeoCodingLogic
from opendai_client.api_client import ApiClient
import json
class BCNAPITest(TestCase):
#===========================================================================
# def test_traffic(self):
# c = ApiClient()
# result = c.get_bcn_traffic_current()
#
# self.assertIsNotNone(result, 'Not Result!')
#===========================================================================
def test_weather(self):
result = views.weather_all(None)
self.assertIsNotNone(result, 'Not Result!')
o_result = json.loads(result.content)
self.assertTrue(o_result['max'].isdigit(), 'no max parsed')
self.assertTrue(o_result['min'].isdigit(), 'no max parsed')
pass
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('lesser_desert_womprat')
mobileTemplate.setLevel(4)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Wild Meat")
mobileTemplate.setMeatAmount(2)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setBoneAmount(2)
mobileTemplate.setBoneType("Animal Bone")
mobileTemplate.setHideAmount(1)
mobileTemplate.setSocialGroup("womprat")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_lesser_desert_womp_rat.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_1')
attacks.add('bm_bolster_armor_1')
attacks.add('bm_enfeeble_1')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('lesser_deser_womprat', mobileTemplate)
return
|
from tethyscluster.logger import log
from completers import VolumeCompleter
class CmdRemoveVolume(VolumeCompleter):
"""
removevolume [options] <volume_id>
Delete one or more EBS volumes
WARNING: This command will *PERMANENTLY* remove an EBS volume.
Please use caution!
Example:
$ tethyscluster removevolume vol-999999
"""
names = ['removevolume', 'rv']
def addopts(self, parser):
parser.add_option("-c", "--confirm", dest="confirm",
action="store_true", default=False,
help="do not prompt for confirmation, just "
"remove the volume")
def execute(self, args):
if not args:
self.parser.error("no volumes specified. exiting...")
for arg in args:
volid = arg
vol = self.ec2.get_volume(volid)
if vol.status in ['attaching', 'in-use']:
log.error("volume is currently in use. aborting...")
return
if vol.status == 'detaching':
log.error("volume is currently detaching. "
"please wait a few moments and try again...")
return
if not self.opts.confirm:
resp = raw_input("**PERMANENTLY** delete %s (y/n)? " % volid)
if resp not in ['y', 'Y', 'yes']:
log.info("Aborting...")
return
if vol.delete():
log.info("Volume %s deleted successfully" % (vol.id,))
else:
log.error("Error deleting volume %s" % (vol.id,))
|
percol.view.PROMPT = ur"<blue>Input:</blue> %q"
percol.view.RPROMPT = ur"(%F) [%i/%I]"
percol.import_keymap({
"C-h" : lambda percol: percol.command.delete_backward_char(),
"C-d" : lambda percol: percol.command.delete_forward_char(),
"C-k" : lambda percol: percol.command.kill_end_of_line(),
"C-y" : lambda percol: percol.command.yank(),
"C-t" : lambda percol: percol.command.transpose_chars(),
"C-a" : lambda percol: percol.command.beginning_of_line(),
"C-e" : lambda percol: percol.command.end_of_line(),
"C-b" : lambda percol: percol.command.backward_char(),
"C-f" : lambda percol: percol.command.forward_char(),
"M-f" : lambda percol: percol.command.forward_word(),
"M-b" : lambda percol: percol.command.backward_word(),
"M-d" : lambda percol: percol.command.delete_forward_word(),
"M-h" : lambda percol: percol.command.delete_backward_word(),
"C-n" : lambda percol: percol.command.select_next(),
"C-p" : lambda percol: percol.command.select_previous(),
"C-v" : lambda percol: percol.command.select_next_page(),
"M-v" : lambda percol: percol.command.select_previous_page(),
"M-<" : lambda percol: percol.command.select_top(),
"M->" : lambda percol: percol.command.select_bottom(),
"C-m" : lambda percol: percol.finish(),
"C-j" : lambda percol: percol.finish(),
"C-g" : lambda percol: percol.cancel(),
})
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
compat_HTTPError,
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from url if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", or "m3u8_native".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language_preference Is this in the correct requested
language?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
creator: The main artist who created the video.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A url pointing to the subtitles file
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The url to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title" and "id" attributes with the same
semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occured.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occured.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return cls.__name__[:-2]
@property
def IE_NAME(self):
return type(self).__name__[:-2]
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if os.name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a url that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
def _get_tfa_info(self):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
return None
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if display_name is None:
display_name = name
return self._html_search_regex(
r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower(), None)
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower(), None)
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
@staticmethod
def _hidden_inputs(html):
return dict([
(input.group('name'), input.group('value')) for input in re.finditer(
r'''(?x)
<input\s+
type=(?P<q_hidden>["\'])hidden(?P=q_hidden)\s+
name=(?P<q_name>["\'])(?P<name>.+?)(?P=q_name)\s+
(?:id=(?P<q_id>["\']).+?(?P=q_id)\s+)?
value=(?P<q_value>["\'])(?P<value>.*?)(?P=q_value)
''', html)
])
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?s)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
preference = f.get('preference')
if preference is None:
proto = f.get('protocol')
if proto is None:
proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
preference = 0 if proto in ['http', 'https'] else -0.1
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip()):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source)
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
for i, media_el in enumerate(media_nodes):
if manifest_version == '2.0':
media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
if determine_ext(manifest_url) == 'f4m':
formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id))
continue
tbr = int_or_none(media_el.attrib.get('bitrate'))
formats.append({
'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
'url': manifest_url,
'ext': 'flv',
'tbr': tbr,
'width': int_or_none(media_el.attrib.get('width')),
'height': int_or_none(media_el.attrib.get('height')),
'preference': preference,
})
self._sort_formats(formats)
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True):
formats = [{
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 1 if preference else -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if m3u8_doc is False:
return m3u8_doc
last_info = None
last_media = None
kv_rex = re.compile(
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_info[m.group('key')] = v
elif line.startswith('#EXT-X-MEDIA:'):
last_media = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_media[m.group('key')] = v
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
# TODO: looks like video codec is not always necessarily goes first
va_codecs = codecs.split(',')
if va_codecs[0]:
f['vcodec'] = va_codecs[0].partition('.')[0]
if len(va_codecs) > 1 and va_codecs[1]:
f['acodec'] = va_codecs[1].partition('.')[0]
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
if last_media is not None:
f['m3u8_media'] = last_media
last_media = None
formats.append(f)
last_info = {}
self._sort_formats(formats)
return formats
# TODO: improve extraction
def _extract_smil_formats(self, smil_url, video_id, fatal=True):
smil = self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal)
if smil is False:
assert not fatal
return []
base = smil.find('./head/meta').get('base')
formats = []
rtmp_count = 0
if smil.findall('./body/seq/video'):
video = smil.findall('./body/seq/video')[0]
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
formats.extend(fmts)
else:
for video in smil.findall('./body/switch/video'):
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
formats.extend(fmts)
self._sort_formats(formats)
return formats
def _parse_smil_video(self, video, video_id, base, rtmp_count):
src = video.get('src')
if not src:
return ([], rtmp_count)
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
proto = video.get('proto')
if not proto:
if base:
if base.startswith('rtmp'):
proto = 'rtmp'
elif base.startswith('http'):
proto = 'http'
ext = video.get('ext')
if proto == 'm3u8':
return (self._extract_m3u8_formats(src, video_id, ext), rtmp_count)
elif proto == 'rtmp':
rtmp_count += 1
streamer = video.get('streamer') or base
return ([{
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
}], rtmp_count)
elif proto.startswith('http'):
return ([{
'url': base + src,
'ext': ext or 'flv',
'tbr': bitrate,
'width': width,
'height': height,
}], rtmp_count)
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if 'playlist' in tc:
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError("This method must be implemented by subclasses")
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
controlsList=[
('Select From List', '//select'), # Get SELECTS
('Select Radio Button', '//input[@type="radio"]'), # Get RADIOS
('Select checkbox', '//input[@type="checkbox"]'), # Get CHECKBOXES
('Click button', '//input[@type="submit"]'), # Get BUTTONS
# NOTE : Get remaining //inputs last to avoid creating bogus textfield definitions
('Input text', '//input[@type="text"]'), # Get TEXT INPUT FIELDS
('Input text', '//input[@type="textarea"]'), # Get TEXT INPUT FIELDS
('Click button', '//input[@type="button"]'), # Get INPUT BUTTONS
('Click link', '//a'), # Get LINKS
]
print 'controlsList',controlsList
|
from oslo_config import cfg
from oslo_log import log as logging
from sahara import conductor as cond
from sahara import context
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins import provisioning as common_configs
from sahara.utils import cluster as c_u
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
conductor = cond.API
ntp_opts = [
cfg.StrOpt('default_ntp_server',
default="pool.ntp.org",
help="Default ntp server for time sync")
]
CONF.register_opts(ntp_opts)
def _sudo(remote, cmd):
remote.execute_command(cmd, run_as_root=True)
def _restart_ntp(remote):
distrib = remote.get_os_distrib()
cmd = "service %s restart"
if distrib == 'ubuntu':
cmd = cmd % "ntp"
else:
cmd = cmd % "ntpd"
_sudo(remote, cmd)
def _verify_installation(remote):
distrib = remote.get_os_distrib()
if distrib == 'ubuntu':
return remote.execute_command("dpkg -s ntp")
else:
return remote.execute_command("rpm -q ntp")
def _check_ntp_installed(remote):
try:
exit_code, stdout = _verify_installation(remote)
if exit_code != 0:
return False
return True
except Exception:
return False
def _configure_ntp_on_instance(instance, url):
with context.set_current_instance_id(instance.instance_id):
LOG.debug("Configuring ntp server")
with instance.remote() as r:
if not _check_ntp_installed(r):
# missing ntp service
LOG.warning(_LW("Unable to configure NTP service"))
return
r.append_to_file(
"/etc/ntp.conf", "server {url}".format(url=url),
run_as_root=True)
_restart_ntp(r)
try:
_sudo(r, "ntpdate -u {url}".format(url=url))
except Exception as e:
LOG.debug("Update time on VM failed with error: %s", e)
LOG.info(_LI("NTP successfully configured"))
def is_ntp_enabled(cluster):
target = common_configs.NTP_ENABLED.applicable_target
name = common_configs.NTP_ENABLED.name
cl_configs = cluster.cluster_configs
if target not in cl_configs or name not in cl_configs[target]:
return common_configs.NTP_ENABLED.default_value
return cl_configs[target][name]
def retrieve_ntp_server_url(cluster):
target = common_configs.NTP_URL.applicable_target
name = common_configs.NTP_URL.name
cl_configs = cluster.cluster_configs
if target not in cl_configs or name not in cl_configs[target]:
return CONF.default_ntp_server
return cl_configs[target][name]
def configure_ntp(cluster_id):
cluster = conductor.cluster_get(context.ctx(), cluster_id)
if not is_ntp_enabled(cluster):
LOG.debug("Don't configure NTP on cluster")
return
instances = c_u.get_instances(cluster)
url = retrieve_ntp_server_url(cluster)
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn("configure-ntp-%s" % instance.instance_name,
_configure_ntp_on_instance, instance, url)
|
"""
An EBS implementation of the ``IBlockDeviceAPI``.
"""
from subprocess import check_output
import threading
import time
import logging
from uuid import UUID
from bitmath import Byte, GiB
from pyrsistent import PRecord, field, pset
from zope.interface import implementer
from boto import ec2
from boto import config
from boto.ec2.connection import EC2Connection
from boto.utils import get_instance_metadata
from boto.exception import EC2ResponseError
from twisted.python.filepath import FilePath
from eliot import Message
from .blockdevice import (
IBlockDeviceAPI, BlockDeviceVolume, UnknownVolume, AlreadyAttachedVolume,
UnattachedVolume,
)
from ._logging import (
AWS_ACTION, BOTO_EC2RESPONSE_ERROR, NO_AVAILABLE_DEVICE,
NO_NEW_DEVICE_IN_OS, WAITING_FOR_VOLUME_STATUS_CHANGE,
BOTO_LOG_HEADER, IN_USE_DEVICES,
)
DATASET_ID_LABEL = u'flocker-dataset-id'
METADATA_VERSION_LABEL = u'flocker-metadata-version'
CLUSTER_ID_LABEL = u'flocker-cluster-id'
ATTACHED_DEVICE_LABEL = u'attached-device-name'
BOTO_NUM_RETRIES = u'20'
VOLUME_STATE_CHANGE_TIMEOUT = 300
MAX_ATTACH_RETRIES = 3
class EliotLogHandler(logging.Handler):
_to_log = {"Method", "Path", "Params"}
def emit(self, record):
fields = vars(record)
# Only log certain things. The log is massively too verbose
# otherwise.
if fields.get("msg", ":").split(":")[0] in self._to_log:
Message.new(
message_type=BOTO_LOG_HEADER, **fields
).write()
def _enable_boto_logging():
"""
Make boto log activity using Eliot.
"""
logger = logging.getLogger("boto")
logger.addHandler(EliotLogHandler())
# It seems as though basically all boto log messages are at the same
# level. Either we can see all of them or we can see none of them.
# We'll do some extra filtering in the handler.
logger.setLevel(logging.DEBUG)
_enable_boto_logging()
def ec2_client(region, zone, access_key_id, secret_access_key):
"""
Establish connection to EC2 client.
:param str region: The name of the EC2 region to connect to.
:param str zone: The zone for the EC2 region to connect to.
:param str access_key_id: "aws_access_key_id" credential for EC2.
:param str secret_access_key: "aws_secret_access_key" EC2 credential.
:return: An ``_EC2`` giving information about EC2 client connection
and EC2 instance zone.
"""
# Set 2 retry knobs in Boto to BOTO_NUM_RETRIES:
# 1. ``num_retries``:
# Request automatic exponential backoff and retry
# attempts by Boto if an EC2 API call fails with
# ``RequestLimitExceeded`` due to system load.
# 2. ``metadata_service_num_attempts``:
# Request for retry attempts by Boto to
# retrieve data from Metadata Service used to retrieve
# credentials for IAM roles on EC2 instances.
if not config.has_section('Boto'):
config.add_section('Boto')
config.set('Boto', 'num_retries', BOTO_NUM_RETRIES)
config.set('Boto', 'metadata_service_num_attempts', BOTO_NUM_RETRIES)
# Get Boto EC2 connection with ``EC2ResponseError`` logged by Eliot.
connection = ec2.connect_to_region(region,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
return _EC2(zone=zone,
connection=_LoggedBotoConnection(connection=connection))
def _boto_logged_method(method_name, original_name):
"""
Run a boto.ec2.connection.EC2Connection method and
log additional information about any exceptions that are raised.
:param str method_name: The name of the method of the wrapped object to
call.
:param str original_name: The name of the attribute of self where the
wrapped object can be found.
:return: A function which will call the method of the wrapped object and do
the extra exception logging.
"""
def _run_with_logging(self, *args, **kwargs):
"""
Run given boto.ec2.connection.EC2Connection method with exception
logging for ``EC2ResponseError``.
"""
original = getattr(self, original_name)
method = getattr(original, method_name)
# Trace IBlockDeviceAPI ``method`` as Eliot Action.
# See https://clusterhq.atlassian.net/browse/FLOC-2054
# for ensuring all method arguments are serializable.
with AWS_ACTION(operation=[method_name, args, kwargs]):
try:
return method(*args, **kwargs)
except EC2ResponseError as e:
BOTO_EC2RESPONSE_ERROR(
aws_code=e.code,
aws_message=e.message,
aws_request_id=e.request_id,
).write()
raise
return _run_with_logging
def boto_logger(*args, **kwargs):
"""
Decorator to log all callable boto.ec2.connection.EC2Connection
methods.
:return: A function that will decorate all methods of the given
class with Boto exception logging.
"""
def _class_decorator(cls):
for attr in EC2Connection.__dict__:
# Log wrap all callable methods except `__init__`.
if attr != '__init__':
attribute = getattr(EC2Connection, attr)
if callable(attribute):
setattr(cls, attr,
_boto_logged_method(attr, *args, **kwargs))
return cls
return _class_decorator
@boto_logger("connection")
class _LoggedBotoConnection(PRecord):
"""
Wrapper ``PRecord`` around ``boto.ec2.connection.EC2Connection``
to facilitate logging of exceptions from Boto APIs.
:ivar boto.ec2.connection.EC2Connection connection: Object
representing connection to an EC2 instance with logged
``EC2ConnectionError``.
"""
connection = field(mandatory=True)
class _EC2(PRecord):
"""
:ivar str zone: The name of the zone for the connection.
:ivar boto.ec2.connection.EC2Connection connection: Object
representing connection to an EC2 instance.
"""
zone = field(mandatory=True)
connection = field(mandatory=True)
def _blockdevicevolume_from_ebs_volume(ebs_volume):
"""
Helper function to convert Volume information from
EBS format to Flocker block device format.
:param boto.ec2.volume ebs_volume: Volume in EC2 format.
:return: Input volume in BlockDeviceVolume format.
"""
return BlockDeviceVolume(
blockdevice_id=unicode(ebs_volume.id),
size=int(GiB(ebs_volume.size).to_Byte().value),
attached_to=ebs_volume.attach_data.instance_id,
dataset_id=UUID(ebs_volume.tags[DATASET_ID_LABEL])
)
def _wait_for_volume(volume,
start_status,
transient_status,
end_status):
"""
Helper function to wait for a given volume to change state
from ``start_status`` via ``transient_status`` to ``end_status``.
:param boto.ec2.volume volume: Volume to check
status for.
:param unicode start_status: Volume status at starting point.
:param unicode transient_status: Allowed transient state for
volume to be in, on the way to ``end_status``.
:param unicode end_status: Expected destination status for
the input volume.
:raises Exception: When input volume failed to reach
expected destination status.
"""
# It typically takes a few seconds for anything to happen, so start
# out sleeping a little before doing initial check to reduce
# unnecessary polling of the API:
time.sleep(5.0)
# Wait ``VOLUME_STATE_CHANGE_TIMEOUT`` seconds for
# volume status to transition from
# start_status -> transient_status -> end_status.
start_time = time.time()
while time.time() - start_time < VOLUME_STATE_CHANGE_TIMEOUT:
try:
volume.update()
except EC2ResponseError as e:
# If AWS cannot find the volume, raise ``UnknownVolume``.
# (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
# for error details).
if e.code == u'InvalidVolume.NotFound':
raise UnknownVolume(volume.id)
if volume.status == end_status:
return
elif volume.status not in [start_status, transient_status]:
break
time.sleep(1.0)
WAITING_FOR_VOLUME_STATUS_CHANGE(volume_id=volume.id,
status=volume.status,
target_status=end_status,
wait_time=(time.time() - start_time))
# We either:
# 1) Timed out waiting to reach ``end_status``, or,
# 2) Reached an unexpected status (state change did not
# start, or failed).
# Raise an ``Exception`` in both cases.
raise Exception(
'Volume state transition failed. '
'Volume: {!r}, '
'Start Status: {!r}, '
'Transient Status: {!r}, '
'Expected End Status: {!r}, '
'Discovered End Status: {!r},'
'Wait time: {!r},'
'Time limit: {!r}.'.format(
volume, start_status, transient_status, end_status,
volume.status, time.time() - start_time,
VOLUME_STATE_CHANGE_TIMEOUT
)
)
def _get_device_size(device):
"""
Helper function to fetch the size of given block device.
:param unicode device: Name of the block device to fetch size for.
:returns: Size, in SI metric bytes, of device we are interested in.
:rtype: int
"""
device_name = b"/dev/" + device.encode("ascii")
# Retrieve size of device as OS sees it using `lsblk`.
# Requires util-linux-ng package on CentOS, and
# util-linux on Ubuntu.
# Required package is installed by default
# on Ubuntu 14.04 and CentOS 7.
command = [b"/bin/lsblk", b"--noheadings", b"--bytes",
b"--output", b"SIZE", device_name]
# Get the base device size, which is the first line in
# `lsblk` output. Ignore partition sizes.
# XXX: Handle error cases during `check_output()` run
# (https://clusterhq.atlassian.net/browse/FLOC-1886).
command_output = check_output(command).split(b'\n')[0]
device_size = int(command_output.strip().decode("ascii"))
return device_size
def _wait_for_new_device(base, size, time_limit=60):
"""
Helper function to wait for up to 60s for new
EBS block device (`/dev/sd*` or `/dev/xvd*`) to
manifest in the OS.
:param list base: List of baseline block devices
that existed before execution of operation that expects
to create a new block device.
:param int size: Size of the block device we are expected
to manifest in the OS.
:param int time_limit: Time, in seconds, to wait for
new device to manifest. Defaults to 60s.
:returns: formatted string name of the new block device.
:rtype: unicode
"""
start_time = time.time()
elapsed_time = time.time() - start_time
while elapsed_time < time_limit:
for device in list(set(FilePath(b"/sys/block").children()) -
set(base)):
device_name = FilePath.basename(device)
if (device_name.startswith((b"sd", b"xvd")) and
_get_device_size(device_name) == size):
new_device = u'/dev/' + device_name.decode("ascii")
return new_device
time.sleep(0.1)
elapsed_time = time.time() - start_time
# If we failed to find a new device of expected size,
# log sizes of all new devices on this compute instance,
# for debuggability.
new_devices = list(set(FilePath(b"/sys/block").children()) - set(base))
new_devices_size = [_get_device_size(device) for device in new_devices]
NO_NEW_DEVICE_IN_OS(new_devices=new_devices,
new_devices_size=new_devices_size,
expected_size=size,
time_limit=time_limit).write()
return None
def _is_cluster_volume(cluster_id, ebs_volume):
"""
Helper function to check if given volume belongs to
given cluster.
:param UUID cluster_id: UUID of Flocker cluster to check for
membership.
:param boto.ec2.volume ebs_volume: EBS volume to check for
input cluster membership.
:return bool: True if input volume belongs to input
Flocker cluster. False otherwise.
"""
actual_cluster_id = ebs_volume.tags.get(CLUSTER_ID_LABEL)
if actual_cluster_id is not None:
actual_cluster_id = UUID(actual_cluster_id)
if actual_cluster_id == cluster_id:
return True
return False
@implementer(IBlockDeviceAPI)
class EBSBlockDeviceAPI(object):
"""
An EBS implementation of ``IBlockDeviceAPI`` which creates
block devices in an EC2 cluster using Boto APIs.
"""
def __init__(self, ec2_client, cluster_id):
"""
Initialize EBS block device API instance.
:param _EC2 ec2_client: A record of EC2 connection and zone.
:param UUID cluster_id: UUID of cluster for this
API instance.
"""
self.connection = ec2_client.connection
self.zone = ec2_client.zone
self.cluster_id = cluster_id
self.lock = threading.Lock()
def allocation_unit(self):
"""
Return a fixed allocation_unit for now; one which we observe
to work on AWS.
"""
return int(GiB(1).to_Byte().value)
def compute_instance_id(self):
"""
Look up the EC2 instance ID for this node.
"""
return get_instance_metadata()['instance-id'].decode("ascii")
def _get_ebs_volume(self, blockdevice_id):
"""
Lookup EBS Volume information for a given blockdevice_id.
:param unicode blockdevice_id: ID of a blockdevice that needs lookup.
:returns: boto.ec2.volume.Volume for the input id.
:raise UnknownVolume: If no volume with a matching identifier can be
found.
"""
try:
all_volumes = self.connection.get_all_volumes(
volume_ids=[blockdevice_id])
except EC2ResponseError as e:
# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html#CommonErrors
if e.error_code == "InvalidVolume.NotFound":
raise UnknownVolume(blockdevice_id)
else:
raise
for volume in all_volumes:
if volume.id == blockdevice_id:
return volume
raise UnknownVolume(blockdevice_id)
def _next_device(self, instance_id, volumes, devices_in_use):
"""
Get the next available EBS device name for a given EC2 instance.
Algorithm:
1. Get all ``Block devices`` currently in use by given instance:
a) List all volumes visible to this instance.
b) Gather device IDs of all devices attached to (a).
2. Devices available for EBS volume usage are ``/dev/sd[f-p]``.
Find the first device from this set that is currently not
in use.
XXX: Handle lack of free devices in ``/dev/sd[f-p]`` range
(see https://clusterhq.atlassian.net/browse/FLOC-1887).
:param unicode instance_id: EC2 instance ID.
:param volumes: Collection of currently known
``BlockDeviceVolume`` instances.
:param set devices_in_use: Unicode names of devices that are
probably in use based on observed behavior.
:returns unicode file_name: available device name for attaching
EBS volume.
:returns ``None`` if suitable EBS device names on this EC2
instance are currently occupied.
"""
devices = pset({v.attach_data.device for v in volumes
if v.attach_data.instance_id == instance_id})
devices = devices | devices_in_use
IN_USE_DEVICES(devices=devices).write()
for suffix in b"fghijklmonp":
file_name = u'/dev/sd' + suffix
if file_name not in devices:
return file_name
# Could not find any suitable device that is available
# for attachment. Log to Eliot before giving up.
NO_AVAILABLE_DEVICE(devices=devices).write()
return None
def create_volume(self, dataset_id, size):
"""
Create a volume on EBS. Store Flocker-specific
{metadata version, cluster id, dataset id} for the volume
as volume tag data.
Open issues: https://clusterhq.atlassian.net/browse/FLOC-1792
"""
requested_volume = self.connection.create_volume(
size=int(Byte(size).to_GiB().value), zone=self.zone)
# Stamp created volume with Flocker-specific tags.
metadata = {
METADATA_VERSION_LABEL: '1',
CLUSTER_ID_LABEL: unicode(self.cluster_id),
DATASET_ID_LABEL: unicode(dataset_id),
}
self.connection.create_tags([requested_volume.id],
metadata)
# Wait for created volume to reach 'available' state.
_wait_for_volume(requested_volume,
start_status=u'',
transient_status=u'creating',
end_status=u'available')
# Return created volume in BlockDeviceVolume format.
return _blockdevicevolume_from_ebs_volume(requested_volume)
def list_volumes(self):
"""
Return all volumes that belong to this Flocker cluster.
"""
volumes = []
for ebs_volume in self.connection.get_all_volumes():
if _is_cluster_volume(self.cluster_id, ebs_volume):
volumes.append(
_blockdevicevolume_from_ebs_volume(ebs_volume)
)
return volumes
def attach_volume(self, blockdevice_id, attach_to):
"""
Attach an EBS volume to given compute instance.
:param unicode blockdevice_id: EBS UUID for volume to be attached.
:param unicode attach_to: Instance id of AWS Compute instance to
attached the blockdevice to.
:raises UnknownVolume: If there does not exist a BlockDeviceVolume
corresponding to the input blockdevice_id.
:raises AlreadyAttachedVolume: If the input volume is already attached
to a device.
"""
ebs_volume = self._get_ebs_volume(blockdevice_id)
volume = _blockdevicevolume_from_ebs_volume(ebs_volume)
if (volume.attached_to is not None or
ebs_volume.status != 'available'):
raise AlreadyAttachedVolume(blockdevice_id)
ignore_devices = pset([])
attach_attempts = 0
while True:
with self.lock:
# begin lock scope
blockdevices = FilePath(b"/sys/block").children()
volumes = self.connection.get_all_volumes()
device = self._next_device(attach_to, volumes, ignore_devices)
if device is None:
# XXX: Handle lack of free devices in ``/dev/sd[f-p]``.
# (https://clusterhq.atlassian.net/browse/FLOC-1887).
# No point in attempting an ``attach_volume``, return.
return
try:
self.connection.attach_volume(blockdevice_id,
attach_to,
device)
except EC2ResponseError as e:
# If attach failed that is often because of eventual
# consistency in AWS, so let's ignore this one if it
# fails:
if e.code == u'InvalidParameterValue':
attach_attempts += 1
if attach_attempts == MAX_ATTACH_RETRIES:
raise
ignore_devices = ignore_devices.add(device)
else:
raise
else:
# Wait for new device to manifest in the OS. Since there
# is currently no standardized protocol across Linux guests
# in EC2 for mapping `device` to the name device driver
# picked (http://docs.aws.amazon.com/AWSEC2/latest/
# UserGuide/device_naming.html), wait for new block device
# to be available to the OS, and interpret it as ours.
# Wait under lock scope to reduce false positives.
new_device = _wait_for_new_device(blockdevices,
volume.size)
break
# end lock scope
# Stamp EBS volume with attached device name tag.
# If OS fails to see new block device in 60 seconds,
# `new_device` is `None`, indicating the volume failed
# to attach to the compute instance.
metadata = {
ATTACHED_DEVICE_LABEL: unicode(new_device),
}
if new_device is not None:
self.connection.create_tags([ebs_volume.id], metadata)
_wait_for_volume(ebs_volume,
start_status=u'available',
transient_status=u'attaching',
end_status=u'in-use')
attached_volume = volume.set('attached_to', attach_to)
return attached_volume
def detach_volume(self, blockdevice_id):
"""
Detach EBS volume identified by blockdevice_id.
:param unicode blockdevice_id: EBS UUID for volume to be detached.
:raises UnknownVolume: If there does not exist a BlockDeviceVolume
corresponding to the input blockdevice_id.
:raises UnattachedVolume: If the BlockDeviceVolume for the
blockdevice_id is not currently 'in-use'.
"""
ebs_volume = self._get_ebs_volume(blockdevice_id)
volume = _blockdevicevolume_from_ebs_volume(ebs_volume)
if (volume.attached_to is None or
ebs_volume.status != 'in-use'):
raise UnattachedVolume(blockdevice_id)
self.connection.detach_volume(blockdevice_id)
_wait_for_volume(ebs_volume,
start_status=u'in-use',
transient_status=u'detaching',
end_status=u'available')
# Delete attached device metadata from EBS Volume
self.connection.delete_tags([ebs_volume.id], [ATTACHED_DEVICE_LABEL])
def destroy_volume(self, blockdevice_id):
"""
Destroy EBS volume identified by blockdevice_id.
:param String blockdevice_id: EBS UUID for volume to be destroyed.
:raises UnknownVolume: If there does not exist a Flocker cluster
volume identified by input blockdevice_id.
:raises Exception: If we failed to destroy Flocker cluster volume
corresponding to input blockdevice_id.
"""
ebs_volume = self._get_ebs_volume(blockdevice_id)
destroy_result = self.connection.delete_volume(blockdevice_id)
if destroy_result:
try:
_wait_for_volume(ebs_volume,
start_status=u'available',
transient_status=u'deleting',
end_status='')
except UnknownVolume:
return
else:
raise Exception(
'Failed to delete volume: {!r}'.format(blockdevice_id)
)
def get_device_path(self, blockdevice_id):
"""
Get device path for the EBS volume corresponding to the given
block device.
:param unicode blockdevice_id: EBS UUID for the volume to look up.
:returns: A ``FilePath`` for the device.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to a host.
"""
ebs_volume = self._get_ebs_volume(blockdevice_id)
volume = _blockdevicevolume_from_ebs_volume(ebs_volume)
if volume.attached_to is None:
raise UnattachedVolume(blockdevice_id)
try:
device = ebs_volume.tags[ATTACHED_DEVICE_LABEL]
except KeyError:
raise UnattachedVolume(blockdevice_id)
if device is None:
raise UnattachedVolume(blockdevice_id)
return FilePath(device)
def aws_from_configuration(region, zone, access_key_id, secret_access_key,
cluster_id):
"""
Build an ``EBSBlockDeviceAPI`` instance using configuration and
credentials.
:param str region: The EC2 region slug. Volumes will be manipulated in
this region.
:param str zone: The EC2 availability zone. Volumes will be manipulated in
this zone.
:param str access_key_id: The EC2 API key identifier to use to make AWS API
calls.
:param str secret_access_key: The EC2 API key to use to make AWS API calls.
:param UUID cluster_id: The unique identifier of the cluster with which to
associate the resulting object. It will only manipulate volumes
belonging to this cluster.
:return: A ``EBSBlockDeviceAPI`` instance using the given parameters.
"""
return EBSBlockDeviceAPI(
ec2_client=ec2_client(
region=region,
zone=zone,
access_key_id=access_key_id,
secret_access_key=secret_access_key,
),
cluster_id=cluster_id,
)
|
''' HTTP API extensions of COSC (versus ODL).
@author: Ken Jarrad (kjarrad@cisco.com)
'''
from __future__ import print_function as _print_function
from requests import post
from basics import odl_http
single_url_encode = odl_http.url_encode
double_url_encode = lambda val: single_url_encode(single_url_encode(val))
odl_http.url_encode = double_url_encode
from logging import log, INFO as LOG_LEVEL
def cosc_authentication_token(hostname='localhost', port=8181, username='admin', password='admin'):
""" Obtain authentication token from COSC.
"""
global _cosc_authentication_token
if not '_cosc_authentication_token' in globals():
url = "https://%s/controller-auth" % (hostname)
log(LOG_LEVEL,'cosc authentication url: %s', url)
form_data = {'grant_type': 'password', 'username': username, 'password':password, 'scope':'sdn'}
log(LOG_LEVEL, 'cosc authentication parameters:')
[log(LOG_LEVEL, ' %s = %s', k, v) for (k, v) in form_data.items()]
try:
response = post(url, data=form_data, verify=False)
log(LOG_LEVEL, 'cosc authentication status code: %s', response.status_code)
expected_status_code = 201
if response.status_code == expected_status_code:
_cosc_authentication_token = response.json()['access_token']
else:
msg = 'Expected HTTP status code %s, got %d' % (expected_status_code, response.status_code)
if response.text:
raise ValueError(msg, response.text)
else:
raise ValueError(msg)
except Exception as e:
raise ValueError('Unable to obtain COSC authentication token.', url, e)
return _cosc_authentication_token
|
import os
import argparse
def generate_metricset(base_path, metricbeat_path, module, metricset):
generate_module(base_path, metricbeat_path, module, metricset)
metricset_path = base_path + "/module/" + module + "/" + metricset
meta_path = metricset_path + "/_meta"
if os.path.isdir(metricset_path):
print("Metricset already exists. Skipping creating metricset {}"
.format(metricset))
return
os.makedirs(meta_path)
templates = metricbeat_path + "/scripts/module/metricset/"
content = load_file(templates + "metricset.go.tmpl", module, metricset)
with open(metricset_path + "/" + metricset + ".go", "w") as f:
f.write(content)
content = load_file(templates + "fields.yml", module, metricset)
with open(meta_path + "/fields.yml", "w") as f:
f.write(content)
content = load_file(templates + "docs.asciidoc", module, metricset)
with open(meta_path + "/docs.asciidoc", "w") as f:
f.write(content)
content = load_file(templates + "data.json", module, metricset)
with open(meta_path + "/data.json", "w") as f:
f.write(content)
print("Metricset {} created.".format(metricset))
def generate_module(base_path, metricbeat_path, module, metricset):
module_path = base_path + "/module/" + module
meta_path = module_path + "/_meta"
if os.path.isdir(module_path):
print("Module already exists. Skipping creating module {}"
.format(module))
return
os.makedirs(meta_path)
templates = metricbeat_path + "/scripts/module/"
content = load_file(templates + "fields.yml", module, "")
with open(meta_path + "/fields.yml", "w") as f:
f.write(content)
content = load_file(templates + "docs.asciidoc", module, "")
with open(meta_path + "/docs.asciidoc", "w") as f:
f.write(content)
content = load_file(templates + "config.yml", module, metricset)
with open(meta_path + "/config.yml", "w") as f:
f.write(content)
content = load_file(templates + "doc.go.tmpl", module, "")
with open(module_path + "/doc.go", "w") as f:
f.write(content)
print("Module {} created.".format(module))
def load_file(file, module, metricset):
content = ""
with open(file) as f:
content = f.read()
return content.replace("{module}", module).replace("{metricset}",
metricset)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Creates a metricset")
parser.add_argument("--module", help="Module name")
parser.add_argument("--metricset", help="Metricset name")
parser.add_argument("--path", help="Beat path")
parser.add_argument("--es_beats",
help="The path to the general beats folder")
args = parser.parse_args()
if args.path is None:
args.path = './'
print("Set default path for beat path: " + args.path)
if args.es_beats is None:
args.es_beats = '../'
print("Set default path for es_beats path: " + args.es_beats)
if args.module is None or args.module == '':
args.module = raw_input("Module name: ")
if args.metricset is None or args.metricset == '':
args.metricset = raw_input("Metricset name: ")
path = os.path.abspath(args.path)
metricbeat_path = os.path.abspath(args.es_beats + "/metricbeat")
generate_metricset(path, metricbeat_path, args.module.lower(),
args.metricset.lower())
|
"""Worker utilities for representing MapTasks.
Each MapTask represents a sequence of ParallelInstruction(s): read from a
source, write to a sink, parallel do, etc.
"""
import collections
from apache_beam import coders
from apache_beam.runners import common
def build_worker_instruction(*args):
"""Create an object representing a ParallelInstruction protobuf.
This will be a collections.namedtuple with a custom __str__ method.
Alas, this wrapper is not known to pylint, which thinks it creates
constants. You may have to put a disable=invalid-name pylint
annotation on any use of this, depending on your names.
Args:
*args: first argument is the name of the type to create. Should
start with "Worker". Second arguments is alist of the
attributes of this object.
Returns:
A new class, a subclass of tuple, that represents the protobuf.
"""
tuple_class = collections.namedtuple(*args)
tuple_class.__str__ = worker_object_to_string
tuple_class.__repr__ = worker_object_to_string
return tuple_class
def worker_printable_fields(workerproto):
"""Returns the interesting fields of a Worker* object."""
return ['%s=%s' % (name, value)
# _asdict is the only way and cannot subclass this generated class
# pylint: disable=protected-access
for name, value in workerproto._asdict().iteritems()
# want to output value 0 but not None nor []
if (value or value == 0)
and name not in
('coder', 'coders', 'output_coders',
'elements',
'combine_fn', 'serialized_fn', 'window_fn',
'append_trailing_newlines', 'strip_trailing_newlines',
'compression_type', 'context',
'start_shuffle_position', 'end_shuffle_position',
'shuffle_reader_config', 'shuffle_writer_config')]
def worker_object_to_string(worker_object):
"""Returns a string compactly representing a Worker* object."""
return '%s(%s)' % (worker_object.__class__.__name__,
', '.join(worker_printable_fields(worker_object)))
WorkerRead = build_worker_instruction(
'WorkerRead', ['source', 'output_coders'])
"""Worker details needed to read from a source.
Attributes:
source: a source object.
output_coders: 1-tuple of the coder for the output.
"""
WorkerSideInputSource = build_worker_instruction(
'WorkerSideInputSource', ['source', 'tag'])
"""Worker details needed to read from a side input source.
Attributes:
source: a source object.
tag: string tag for this side input.
"""
WorkerGroupingShuffleRead = build_worker_instruction(
'WorkerGroupingShuffleRead',
['start_shuffle_position', 'end_shuffle_position',
'shuffle_reader_config', 'coder', 'output_coders'])
"""Worker details needed to read from a grouping shuffle source.
Attributes:
start_shuffle_position: An opaque string to be passed to the shuffle
source to indicate where to start reading.
end_shuffle_position: An opaque string to be passed to the shuffle
source to indicate where to stop reading.
shuffle_reader_config: An opaque string used to initialize the shuffle
reader. Contains things like connection endpoints for the shuffle
server appliance and various options.
coder: The KV coder used to decode shuffle entries.
output_coders: 1-tuple of the coder for the output.
"""
WorkerUngroupedShuffleRead = build_worker_instruction(
'WorkerUngroupedShuffleRead',
['start_shuffle_position', 'end_shuffle_position',
'shuffle_reader_config', 'coder', 'output_coders'])
"""Worker details needed to read from an ungrouped shuffle source.
Attributes:
start_shuffle_position: An opaque string to be passed to the shuffle
source to indicate where to start reading.
end_shuffle_position: An opaque string to be passed to the shuffle
source to indicate where to stop reading.
shuffle_reader_config: An opaque string used to initialize the shuffle
reader. Contains things like connection endpoints for the shuffle
server appliance and various options.
coder: The value coder used to decode shuffle entries.
"""
WorkerWrite = build_worker_instruction(
'WorkerWrite', ['sink', 'input', 'output_coders'])
"""Worker details needed to write to a sink.
Attributes:
sink: a sink object.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple, coder to use to estimate bytes written.
"""
WorkerInMemoryWrite = build_worker_instruction(
'WorkerInMemoryWrite',
['output_buffer', 'write_windowed_values', 'input', 'output_coders'])
"""Worker details needed to write to a in-memory sink.
Used only for unit testing. It makes worker tests less cluttered with code like
"write to a file and then check file contents".
Attributes:
output_buffer: list to which output elements will be appended
write_windowed_values: whether to record the entire WindowedValue outputs,
or just the raw (unwindowed) value
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple, coder to use to estimate bytes written.
"""
WorkerShuffleWrite = build_worker_instruction(
'WorkerShuffleWrite',
['shuffle_kind', 'shuffle_writer_config', 'input', 'output_coders'])
"""Worker details needed to write to a shuffle sink.
Attributes:
shuffle_kind: A string describing the shuffle kind. This can control the
way the worker interacts with the shuffle sink. The possible values are:
'ungrouped', 'group_keys', and 'group_keys_and_sort_values'.
shuffle_writer_config: An opaque string used to initialize the shuffle
write. Contains things like connection endpoints for the shuffle
server appliance and various options.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple of the coder for input elements. If the
shuffle_kind is grouping, this is expected to be a KV coder.
"""
WorkerDoFn = build_worker_instruction(
'WorkerDoFn',
['serialized_fn', 'output_tags', 'input', 'side_inputs', 'output_coders'])
"""Worker details needed to run a DoFn.
Attributes:
serialized_fn: A serialized DoFn object to be run for each input element.
output_tags: The string tags used to identify the outputs of a ParDo
operation. The tag is present even if the ParDo has just one output
(e.g., ['out'].
output_coders: array of coders, one for each output.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
side_inputs: A list of Worker...Read instances describing sources to be
used for getting values. The types supported right now are
WorkerInMemoryRead and WorkerTextRead.
"""
WorkerReifyTimestampAndWindows = build_worker_instruction(
'WorkerReifyTimestampAndWindows',
['output_tags', 'input', 'output_coders'])
"""Worker details needed to run a WindowInto.
Attributes:
output_tags: The string tags used to identify the outputs of a ParDo
operation. The tag is present even if the ParDo has just one output
(e.g., ['out'].
output_coders: array of coders, one for each output.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
"""
WorkerMergeWindows = build_worker_instruction(
'WorkerMergeWindows',
['window_fn', 'combine_fn', 'phase', 'output_tags', 'input', 'coders',
'context', 'output_coders'])
"""Worker details needed to run a MergeWindows (aka. GroupAlsoByWindows).
Attributes:
window_fn: A serialized Windowing object representing the windowing strategy.
combine_fn: A serialized CombineFn object to be used after executing the
GroupAlsoByWindows operation. May be None if not a combining operation.
phase: Possible values are 'all', 'add', 'merge', and 'extract'.
A runner optimizer may split the user combiner in 3 separate
phases (ADD, MERGE, and EXTRACT), on separate VMs, as it sees
fit. The phase attribute dictates which DoFn is actually running in
the worker. May be None if not a combining operation.
output_tags: The string tags used to identify the outputs of a ParDo
operation. The tag is present even if the ParDo has just one output
(e.g., ['out'].
output_coders: array of coders, one for each output.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
coders: A 2-tuple of coders (key, value) to encode shuffle entries.
context: The ExecutionContext object for the current work item.
"""
WorkerCombineFn = build_worker_instruction(
'WorkerCombineFn',
['serialized_fn', 'phase', 'input', 'output_coders'])
"""Worker details needed to run a CombineFn.
Attributes:
serialized_fn: A serialized CombineFn object to be used.
phase: Possible values are 'all', 'add', 'merge', and 'extract'.
A runner optimizer may split the user combiner in 3 separate
phases (ADD, MERGE, and EXTRACT), on separate VMs, as it sees
fit. The phase attribute dictates which DoFn is actually running in
the worker.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple of the coder for the output.
"""
WorkerPartialGroupByKey = build_worker_instruction(
'WorkerPartialGroupByKey',
['combine_fn', 'input', 'output_coders'])
"""Worker details needed to run a partial group-by-key.
Attributes:
combine_fn: A serialized CombineFn object to be used.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple of the coder for the output.
"""
WorkerFlatten = build_worker_instruction(
'WorkerFlatten',
['inputs', 'output_coders'])
"""Worker details needed to run a Flatten.
Attributes:
inputs: A list of tuples, each (producer index, output index), representing
the ParallelInstruction operations whose output feeds into this operation.
The output index is 0 unless the input is from a multi-output
operation (such as ParDo).
output_coders: 1-tuple of the coder for the output.
"""
def get_coder_from_spec(coder_spec):
"""Return a coder instance from a coder spec.
Args:
coder_spec: A dict where the value of the '@type' key is a pickled instance
of a Coder instance.
Returns:
A coder instance (has encode/decode methods).
"""
assert coder_spec is not None
# Ignore the wrappers in these encodings.
ignored_wrappers = (
'com.google.cloud.dataflow.sdk.util.TimerOrElement$TimerOrElementCoder')
if coder_spec['@type'] in ignored_wrappers:
assert len(coder_spec['component_encodings']) == 1
coder_spec = coder_spec['component_encodings'][0]
return get_coder_from_spec(coder_spec)
# Handle a few well known types of coders.
if coder_spec['@type'] == 'kind:pair':
assert len(coder_spec['component_encodings']) == 2
component_coders = [
get_coder_from_spec(c) for c in coder_spec['component_encodings']]
return coders.TupleCoder(component_coders)
elif coder_spec['@type'] == 'kind:stream':
assert len(coder_spec['component_encodings']) == 1
return coders.IterableCoder(
get_coder_from_spec(coder_spec['component_encodings'][0]))
elif coder_spec['@type'] == 'kind:windowed_value':
assert len(coder_spec['component_encodings']) == 2
value_coder, window_coder = [
get_coder_from_spec(c) for c in coder_spec['component_encodings']]
return coders.coders.WindowedValueCoder(
value_coder, window_coder=window_coder)
elif coder_spec['@type'] == 'kind:interval_window':
assert ('component_encodings' not in coder_spec
or not coder_spec['component_encodings'])
return coders.coders.IntervalWindowCoder()
elif coder_spec['@type'] == 'kind:global_window':
assert ('component_encodings' not in coder_spec
or not coder_spec['component_encodings'])
return coders.coders.GlobalWindowCoder()
elif coder_spec['@type'] == 'kind:length_prefix':
assert len(coder_spec['component_encodings']) == 1
return coders.coders.LengthPrefixCoder(
get_coder_from_spec(coder_spec['component_encodings'][0]))
elif coder_spec['@type'] == 'kind:bytes':
assert ('component_encodings' not in coder_spec
or len(coder_spec['component_encodings'] == 0))
return coders.BytesCoder()
# We pass coders in the form "<coder_name>$<pickled_data>" to make the job
# description JSON more readable.
return coders.coders.deserialize_coder(coder_spec['@type'])
class MapTask(object):
"""A map task decoded into operations and ready to be executed.
Attributes:
operations: A list of Worker* object created by parsing the instructions
within the map task.
stage_name: The name of this map task execution stage.
system_names: The system names of the step corresponding to each map task
operation in the execution graph.
step_names: The user-given names of the step corresponding to each map task
operation (e.g. Foo/Bar/ParDo).
original_names: The internal name of a step in the original workflow graph.
name_contexts: A common.NameContext object containing name information
about a step.
"""
def __init__(self, operations, stage_name,
system_names=None,
step_names=None,
original_names=None,
name_contexts=None):
self.operations = operations
self.stage_name = stage_name
# TODO(BEAM-4028): Remove arguments other than name_contexts.
self.name_contexts = name_contexts or self._make_name_contexts(
original_names, step_names, system_names)
@staticmethod
def _make_name_contexts(original_names, user_names, system_names):
# TODO(BEAM-4028): Remove method once map task relies on name contexts.
return [common.DataflowNameContext(step_name, user_name, system_name)
for step_name, user_name, system_name in zip(original_names,
user_names,
system_names)]
@property
def system_names(self):
"""Returns a list containing the system names of steps.
A System name is the name of a step in the optimized Dataflow graph.
"""
return [nc.system_name for nc in self.name_contexts]
@property
def original_names(self):
"""Returns a list containing the original names of steps.
An original name is the internal name of a step in the Dataflow graph
(e.g. 's2').
"""
return [nc.step_name for nc in self.name_contexts]
@property
def step_names(self):
"""Returns a list containing the user names of steps.
In this context, a step name is the user-given name of a step in the
Dataflow graph (e.g. 's2').
"""
return [nc.user_name for nc in self.name_contexts]
def __str__(self):
return '<%s %s steps=%s>' % (self.__class__.__name__, self.stage_name,
'+'.join(self.step_names))
|
from synapse.util.caches import register_cache
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
class ExpiringCache(object):
def __init__(self, cache_name, clock, max_len=0, expiry_ms=0,
reset_expiry_on_get=False, iterable=False):
"""
Args:
cache_name (str): Name of this cache, used for logging.
clock (Clock)
max_len (int): Max size of dict. If the dict grows larger than this
then the oldest items get automatically evicted. Default is 0,
which indicates there is no max limit.
expiry_ms (int): How long before an item is evicted from the cache
in milliseconds. Default is 0, indicating items never get
evicted based on time.
reset_expiry_on_get (bool): If true, will reset the expiry time for
an item on access. Defaults to False.
iterable (bool): If true, the size is calculated by summing the
sizes of all entries, rather than the number of entries.
"""
self._cache_name = cache_name
self._clock = clock
self._max_len = max_len
self._expiry_ms = expiry_ms
self._reset_expiry_on_get = reset_expiry_on_get
self._cache = OrderedDict()
self.metrics = register_cache(cache_name, self)
self.iterable = iterable
self._size_estimate = 0
def start(self):
if not self._expiry_ms:
# Don't bother starting the loop if things never expire
return
def f():
self._prune_cache()
self._clock.looping_call(f, self._expiry_ms / 2)
def __setitem__(self, key, value):
now = self._clock.time_msec()
self._cache[key] = _CacheEntry(now, value)
if self.iterable:
self._size_estimate += len(value)
# Evict if there are now too many items
while self._max_len and len(self) > self._max_len:
_key, value = self._cache.popitem(last=False)
if self.iterable:
self._size_estimate -= len(value.value)
def __getitem__(self, key):
try:
entry = self._cache[key]
self.metrics.inc_hits()
except KeyError:
self.metrics.inc_misses()
raise
if self._reset_expiry_on_get:
entry.time = self._clock.time_msec()
return entry.value
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def _prune_cache(self):
if not self._expiry_ms:
# zero expiry time means don't expire. This should never get called
# since we have this check in start too.
return
begin_length = len(self)
now = self._clock.time_msec()
keys_to_delete = set()
for key, cache_entry in self._cache.items():
if now - cache_entry.time > self._expiry_ms:
keys_to_delete.add(key)
for k in keys_to_delete:
value = self._cache.pop(k)
if self.iterable:
self._size_estimate -= len(value.value)
logger.debug(
"[%s] _prune_cache before: %d, after len: %d",
self._cache_name, begin_length, len(self)
)
def __len__(self):
if self.iterable:
return self._size_estimate
else:
return len(self._cache)
class _CacheEntry(object):
def __init__(self, time, value):
self.time = time
self.value = value
|
import datetime
import iso8601
import netaddr
from oslo_utils import timeutils
import six
from nova.network import model as network_model
from nova.objects import base as obj_base
from nova.objects import fields
from nova import test
class FakeFieldType(fields.FieldType):
def coerce(self, obj, attr, value):
return '*%s*' % value
def to_primitive(self, obj, attr, value):
return '!%s!' % value
def from_primitive(self, obj, attr, value):
return value[1:-1]
class FakeEnum(fields.Enum):
FROG = "frog"
PLATYPUS = "platypus"
ALLIGATOR = "alligator"
ALL = (FROG, PLATYPUS, ALLIGATOR)
def __init__(self, **kwargs):
super(FakeEnum, self).__init__(valid_values=FakeEnum.ALL,
**kwargs)
class FakeEnumAlt(fields.Enum):
FROG = "frog"
PLATYPUS = "platypus"
AARDVARK = "aardvark"
ALL = (FROG, PLATYPUS, AARDVARK)
def __init__(self, **kwargs):
super(FakeEnumAlt, self).__init__(valid_values=FakeEnumAlt.ALL,
**kwargs)
class FakeEnumField(fields.BaseEnumField):
AUTO_TYPE = FakeEnum()
class FakeEnumAltField(fields.BaseEnumField):
AUTO_TYPE = FakeEnumAlt()
class TestField(test.NoDBTestCase):
def setUp(self):
super(TestField, self).setUp()
self.field = fields.Field(FakeFieldType())
self.coerce_good_values = [('foo', '*foo*')]
self.coerce_bad_values = []
self.to_primitive_values = [('foo', '!foo!')]
self.from_primitive_values = [('!foo!', 'foo')]
def test_coerce_good_values(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val))
def test_coerce_bad_values(self):
for in_val in self.coerce_bad_values:
self.assertRaises((TypeError, ValueError),
self.field.coerce, 'obj', 'attr', in_val)
def test_to_primitive(self):
for in_val, prim_val in self.to_primitive_values:
self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr',
in_val))
def test_from_primitive(self):
class ObjectLikeThing(object):
_context = 'context'
for prim_val, out_val in self.from_primitive_values:
self.assertEqual(out_val, self.field.from_primitive(
ObjectLikeThing, 'attr', prim_val))
def test_stringify(self):
self.assertEqual('123', self.field.stringify(123))
class TestString(TestField):
def setUp(self):
super(TestString, self).setUp()
self.field = fields.StringField()
self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')]
if six.PY2:
self.coerce_good_values.append((long(1), '1'))
self.coerce_bad_values = [None]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'123'", self.field.stringify(123))
class TestBaseEnum(TestField):
def setUp(self):
super(TestBaseEnum, self).setUp()
self.field = FakeEnumField()
self.coerce_good_values = [('frog', 'frog'),
('platypus', 'platypus'),
('alligator', 'alligator')]
self.coerce_bad_values = ['aardvark', 'wookie']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'platypus'", self.field.stringify('platypus'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'aardvark')
def test_fingerprint(self):
# Notes(yjiang5): make sure changing valid_value will be detected
# in test_objects.test_versions
field1 = FakeEnumField()
field2 = FakeEnumAltField()
self.assertNotEqual(str(field1), str(field2))
class TestEnum(TestField):
def setUp(self):
super(TestEnum, self).setUp()
self.field = fields.EnumField(
valid_values=['foo', 'bar', 1, 1, True])
self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')]
if six.PY2:
self.coerce_good_values.append((long(1), '1'))
self.coerce_bad_values = ['boo', 2, False]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'foo'", self.field.stringify('foo'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, '123')
def test_fingerprint(self):
# Notes(yjiang5): make sure changing valid_value will be detected
# in test_objects.test_versions
field1 = fields.EnumField(valid_values=['foo', 'bar'])
field2 = fields.EnumField(valid_values=['foo', 'bar1'])
self.assertNotEqual(str(field1), str(field2))
def test_without_valid_values(self):
self.assertRaises(ValueError, fields.EnumField, 1)
def test_with_empty_values(self):
self.assertRaises(ValueError, fields.EnumField, [])
class TestArchitecture(TestField):
def setUp(self):
super(TestArchitecture, self).setUp()
self.field = fields.ArchitectureField()
self.coerce_good_values = [('x86_64', 'x86_64'),
('amd64', 'x86_64'),
('I686', 'i686'),
('i386', 'i686')]
self.coerce_bad_values = ['x86_99']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'aarch64'", self.field.stringify('aarch64'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'ppc42')
class TestCPUMode(TestField):
def setUp(self):
super(TestCPUMode, self).setUp()
self.field = fields.CPUModeField()
self.coerce_good_values = [('host-model', 'host-model'),
('host-passthrough', 'host-passthrough'),
('custom', 'custom')]
self.coerce_bad_values = ['magic']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'custom'", self.field.stringify('custom'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'magic')
class TestCPUMatch(TestField):
def setUp(self):
super(TestCPUMatch, self).setUp()
self.field = fields.CPUMatchField()
self.coerce_good_values = [('exact', 'exact'),
('strict', 'strict'),
('minimum', 'minimum')]
self.coerce_bad_values = ['best']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'exact'", self.field.stringify('exact'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'best')
class TestCPUFeaturePolicy(TestField):
def setUp(self):
super(TestCPUFeaturePolicy, self).setUp()
self.field = fields.CPUFeaturePolicyField()
self.coerce_good_values = [('force', 'force'),
('require', 'require'),
('optional', 'optional'),
('disable', 'disable'),
('forbid', 'forbid')]
self.coerce_bad_values = ['disallow']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'forbid'", self.field.stringify('forbid'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'disallow')
class TestDiskBus(TestField):
def setUp(self):
super(TestDiskBus, self).setUp()
self.field = fields.DiskBusField()
self.coerce_good_values = [('fdc', 'fdc'),
('ide', 'ide'),
('sata', 'sata'),
('scsi', 'scsi'),
('usb', 'usb'),
('virtio', 'virtio'),
('xen', 'xen')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'ide'", self.field.stringify('ide'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestHVType(TestField):
def setUp(self):
super(TestHVType, self).setUp()
self.field = fields.HVTypeField()
self.coerce_good_values = [('baremetal', 'baremetal'),
('bhyve', 'bhyve'),
('fake', 'fake'),
('kvm', 'kvm'),
('xapi', 'xen'),
('powervm', 'phyp')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'xen'", self.field.stringify('xen'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestOSType(TestField):
def setUp(self):
super(TestOSType, self).setUp()
self.field = fields.OSTypeField()
self.coerce_good_values = [('linux', 'linux'),
('windows', 'windows'),
('WINDOWS', 'windows')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'linux'", self.field.stringify('linux'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestRNGModel(TestField):
def setUp(self):
super(TestRNGModel, self).setUp()
self.field = fields.RNGModelField()
self.coerce_good_values = [('virtio', 'virtio'), ]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'virtio'", self.field.stringify('virtio'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestSCSIModel(TestField):
def setUp(self):
super(TestSCSIModel, self).setUp()
self.field = fields.SCSIModelField()
self.coerce_good_values = [('buslogic', 'buslogic'),
('ibmvscsi', 'ibmvscsi'),
('lsilogic', 'lsilogic'),
('lsisas1068', 'lsisas1068'),
('lsisas1078', 'lsisas1078'),
('virtio-scsi', 'virtio-scsi'),
('vmpvscsi', 'vmpvscsi'),
('lsilogicsas', 'lsisas1068'),
('paravirtual', 'vmpvscsi')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'vmpvscsi'", self.field.stringify('vmpvscsi'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestVideoModel(TestField):
def setUp(self):
super(TestVideoModel, self).setUp()
self.field = fields.VideoModelField()
self.coerce_good_values = [('cirrus', 'cirrus'),
('qxl', 'qxl'),
('vga', 'vga'),
('vmvga', 'vmvga'),
('xen', 'xen')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'cirrus'", self.field.stringify('cirrus'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestVIFModel(TestField):
def setUp(self):
super(TestVIFModel, self).setUp()
self.field = fields.VIFModelField()
self.coerce_good_values = [('virtio', 'virtio'),
('ne2k_pci', 'ne2k_pci'),
('pcnet', 'pcnet'),
('rtl8139', 'rtl8139'),
('e1000', 'e1000'),
('e1000e', 'e1000e'),
('netfront', 'netfront'),
('spapr-vlan', 'spapr-vlan'),
('VirtualE1000', 'e1000'),
('VirtualE1000e', 'e1000e'),
('VirtualPCNet32', 'pcnet'),
('VirtualSriovEthernetCard', 'sriov'),
('VirtualVmxnet', 'vmxnet'),
('VirtualVmxnet3', 'vmxnet3'),
]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'e1000'", self.field.stringify('e1000'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestVMMode(TestField):
def setUp(self):
super(TestVMMode, self).setUp()
self.field = fields.VMModeField()
self.coerce_good_values = [('hvm', 'hvm'),
('xen', 'xen'),
('uml', 'uml'),
('exe', 'exe'),
('pv', 'xen'),
('hv', 'hvm'),
('baremetal', 'hvm')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'hvm'", self.field.stringify('hvm'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestWatchdogAction(TestField):
def setUp(self):
super(TestWatchdogAction, self).setUp()
self.field = fields.WatchdogActionField()
self.coerce_good_values = [('none', 'none'),
('pause', 'pause'),
('poweroff', 'poweroff'),
('reset', 'reset')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'reset'", self.field.stringify('reset'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestInteger(TestField):
def setUp(self):
super(TestInteger, self).setUp()
self.field = fields.IntegerField()
self.coerce_good_values = [(1, 1), ('1', 1)]
self.coerce_bad_values = ['foo', None]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
class TestFloat(TestField):
def setUp(self):
super(TestFloat, self).setUp()
self.field = fields.FloatField()
self.coerce_good_values = [(1.1, 1.1), ('1.1', 1.1)]
self.coerce_bad_values = ['foo', None]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
class TestBoolean(TestField):
def setUp(self):
super(TestBoolean, self).setUp()
self.field = fields.BooleanField()
self.coerce_good_values = [(True, True), (False, False), (1, True),
('foo', True), (0, False), ('', False)]
self.coerce_bad_values = []
self.to_primitive_values = self.coerce_good_values[0:2]
self.from_primitive_values = self.coerce_good_values[0:2]
class TestFlexibleBoolean(TestField):
def setUp(self):
super(TestFlexibleBoolean, self).setUp()
self.field = fields.FlexibleBooleanField()
self.coerce_good_values = [(True, True), (False, False),
("true", True), ("false", False),
("t", True), ("f", False),
("yes", True), ("no", False),
("y", True), ("n", False),
("on", True), ("off", False),
(1, True), (0, False),
('frog', False), ('', False)]
self.coerce_bad_values = []
self.to_primitive_values = self.coerce_good_values[0:2]
self.from_primitive_values = self.coerce_good_values[0:2]
class TestDateTime(TestField):
def setUp(self):
super(TestDateTime, self).setUp()
self.dt = datetime.datetime(1955, 11, 5, tzinfo=iso8601.iso8601.Utc())
self.field = fields.DateTimeField()
self.coerce_good_values = [(self.dt, self.dt),
(timeutils.isotime(self.dt), self.dt)]
self.coerce_bad_values = [1, 'foo']
self.to_primitive_values = [(self.dt, timeutils.isotime(self.dt))]
self.from_primitive_values = [(timeutils.isotime(self.dt), self.dt)]
def test_stringify(self):
self.assertEqual(
'1955-11-05T18:00:00Z',
self.field.stringify(
datetime.datetime(1955, 11, 5, 18, 0, 0,
tzinfo=iso8601.iso8601.Utc())))
class TestIPAddress(TestField):
def setUp(self):
super(TestIPAddress, self).setUp()
self.field = fields.IPAddressField()
self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')),
('::1', netaddr.IPAddress('::1')),
(netaddr.IPAddress('::1'),
netaddr.IPAddress('::1'))]
self.coerce_bad_values = ['1-2', 'foo']
self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4'),
(netaddr.IPAddress('::1'), '::1')]
self.from_primitive_values = [('1.2.3.4',
netaddr.IPAddress('1.2.3.4')),
('::1',
netaddr.IPAddress('::1'))]
class TestIPAddressV4(TestField):
def setUp(self):
super(TestIPAddressV4, self).setUp()
self.field = fields.IPV4AddressField()
self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')),
(netaddr.IPAddress('1.2.3.4'),
netaddr.IPAddress('1.2.3.4'))]
self.coerce_bad_values = ['1-2', 'foo', '::1']
self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4')]
self.from_primitive_values = [('1.2.3.4',
netaddr.IPAddress('1.2.3.4'))]
class TestIPAddressV6(TestField):
def setUp(self):
super(TestIPAddressV6, self).setUp()
self.field = fields.IPV6AddressField()
self.coerce_good_values = [('::1', netaddr.IPAddress('::1')),
(netaddr.IPAddress('::1'),
netaddr.IPAddress('::1'))]
self.coerce_bad_values = ['1.2', 'foo', '1.2.3.4']
self.to_primitive_values = [(netaddr.IPAddress('::1'), '::1')]
self.from_primitive_values = [('::1',
netaddr.IPAddress('::1'))]
class TestDict(TestField):
def setUp(self):
super(TestDict, self).setUp()
self.field = fields.Field(fields.Dict(FakeFieldType()))
self.coerce_good_values = [({'foo': 'bar'}, {'foo': '*bar*'}),
({'foo': 1}, {'foo': '*1*'})]
self.coerce_bad_values = [{1: 'bar'}, 'foo']
self.to_primitive_values = [({'foo': 'bar'}, {'foo': '!bar!'})]
self.from_primitive_values = [({'foo': '!bar!'}, {'foo': 'bar'})]
def test_stringify(self):
self.assertEqual("{key=val}", self.field.stringify({'key': 'val'}))
class TestDictOfStrings(TestField):
def setUp(self):
super(TestDictOfStrings, self).setUp()
self.field = fields.DictOfStringsField()
self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
({'foo': 1}, {'foo': '1'})]
self.coerce_bad_values = [{1: 'bar'}, {'foo': None}, 'foo']
self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
def test_stringify(self):
self.assertEqual("{key='val'}", self.field.stringify({'key': 'val'}))
class TestDictOfIntegers(TestField):
def setUp(self):
super(TestDictOfIntegers, self).setUp()
self.field = fields.DictOfIntegersField()
self.coerce_good_values = [({'foo': '42'}, {'foo': 42}),
({'foo': 4.2}, {'foo': 4})]
self.coerce_bad_values = [{1: 'bar'}, {'foo': 'boo'},
'foo', {'foo': None}]
self.to_primitive_values = [({'foo': 42}, {'foo': 42})]
self.from_primitive_values = [({'foo': 42}, {'foo': 42})]
def test_stringify(self):
self.assertEqual("{key=42}", self.field.stringify({'key': 42}))
class TestDictOfStringsNone(TestField):
def setUp(self):
super(TestDictOfStringsNone, self).setUp()
self.field = fields.DictOfNullableStringsField()
self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
({'foo': 1}, {'foo': '1'}),
({'foo': None}, {'foo': None})]
self.coerce_bad_values = [{1: 'bar'}, 'foo']
self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
def test_stringify(self):
self.assertEqual("{k2=None,key='val'}",
self.field.stringify({'k2': None,
'key': 'val'}))
class TestListOfDictOfNullableStringsField(TestField):
def setUp(self):
super(TestListOfDictOfNullableStringsField, self).setUp()
self.field = fields.ListOfDictOfNullableStringsField()
self.coerce_good_values = [([{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}],
[{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}]),
([{'f': 1}, {'f1': 'b1'}],
[{'f': '1'}, {'f1': 'b1'}]),
([{'foo': None}], [{'foo': None}])]
self.coerce_bad_values = [[{1: 'a'}], ['ham', 1], ['eggs']]
self.to_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}],
[{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])]
self.from_primitive_values = [([{'f': 'b'}, {'f1': 'b1'},
{'f2': None}],
[{'f': 'b'}, {'f1': 'b1'},
{'f2': None}])]
def test_stringify(self):
self.assertEqual("[{f=None,f1='b1'},{f2='b2'}]",
self.field.stringify(
[{'f': None, 'f1': 'b1'}, {'f2': 'b2'}]))
class TestList(TestField):
def setUp(self):
super(TestList, self).setUp()
self.field = fields.Field(fields.List(FakeFieldType()))
self.coerce_good_values = [(['foo', 'bar'], ['*foo*', '*bar*'])]
self.coerce_bad_values = ['foo']
self.to_primitive_values = [(['foo'], ['!foo!'])]
self.from_primitive_values = [(['!foo!'], ['foo'])]
def test_stringify(self):
self.assertEqual('[123]', self.field.stringify([123]))
class TestListOfStrings(TestField):
def setUp(self):
super(TestListOfStrings, self).setUp()
self.field = fields.ListOfStringsField()
self.coerce_good_values = [(['foo', 'bar'], ['foo', 'bar'])]
self.coerce_bad_values = ['foo']
self.to_primitive_values = [(['foo'], ['foo'])]
self.from_primitive_values = [(['foo'], ['foo'])]
def test_stringify(self):
self.assertEqual("['abc']", self.field.stringify(['abc']))
class TestSet(TestField):
def setUp(self):
super(TestSet, self).setUp()
self.field = fields.Field(fields.Set(FakeFieldType()))
self.coerce_good_values = [(set(['foo', 'bar']),
set(['*foo*', '*bar*']))]
self.coerce_bad_values = [['foo'], {'foo': 'bar'}]
self.to_primitive_values = [(set(['foo']), tuple(['!foo!']))]
self.from_primitive_values = [(tuple(['!foo!']), set(['foo']))]
def test_stringify(self):
self.assertEqual('set([123])', self.field.stringify(set([123])))
class TestSetOfIntegers(TestField):
def setUp(self):
super(TestSetOfIntegers, self).setUp()
self.field = fields.SetOfIntegersField()
self.coerce_good_values = [(set(['1', 2]),
set([1, 2]))]
self.coerce_bad_values = [set(['foo'])]
self.to_primitive_values = [(set([1]), tuple([1]))]
self.from_primitive_values = [(tuple([1]), set([1]))]
def test_stringify(self):
self.assertEqual('set([1,2])', self.field.stringify(set([1, 2])))
class TestListOfSetsOfIntegers(TestField):
def setUp(self):
super(TestListOfSetsOfIntegers, self).setUp()
self.field = fields.ListOfSetsOfIntegersField()
self.coerce_good_values = [([set(['1', 2]), set([3, '4'])],
[set([1, 2]), set([3, 4])])]
self.coerce_bad_values = [[set(['foo'])]]
self.to_primitive_values = [([set([1])], [tuple([1])])]
self.from_primitive_values = [([tuple([1])], [set([1])])]
def test_stringify(self):
self.assertEqual('[set([1,2])]', self.field.stringify([set([1, 2])]))
class TestObject(TestField):
def setUp(self):
super(TestObject, self).setUp()
class TestableObject(obj_base.NovaObject):
fields = {
'uuid': fields.StringField(),
}
def __eq__(self, value):
# NOTE(danms): Be rather lax about this equality thing to
# satisfy the assertEqual() in test_from_primitive(). We
# just want to make sure the right type of object is re-created
return value.__class__.__name__ == TestableObject.__name__
class OtherTestableObject(obj_base.NovaObject):
pass
test_inst = TestableObject()
self._test_cls = TestableObject
self.field = fields.Field(fields.Object('TestableObject'))
self.coerce_good_values = [(test_inst, test_inst)]
self.coerce_bad_values = [OtherTestableObject(), 1, 'foo']
self.to_primitive_values = [(test_inst, test_inst.obj_to_primitive())]
self.from_primitive_values = [(test_inst.obj_to_primitive(),
test_inst),
(test_inst, test_inst)]
def test_stringify(self):
obj = self._test_cls(uuid='fake-uuid')
self.assertEqual('TestableObject(fake-uuid)',
self.field.stringify(obj))
class TestNetworkModel(TestField):
def setUp(self):
super(TestNetworkModel, self).setUp()
model = network_model.NetworkInfo()
self.field = fields.Field(fields.NetworkModel())
self.coerce_good_values = [(model, model), (model.json(), model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, model.json())]
self.from_primitive_values = [(model.json(), model)]
def test_stringify(self):
networkinfo = network_model.NetworkInfo()
networkinfo.append(network_model.VIF(id=123))
networkinfo.append(network_model.VIF(id=456))
self.assertEqual('NetworkModel(123,456)',
self.field.stringify(networkinfo))
class TestIPNetwork(TestField):
def setUp(self):
super(TestIPNetwork, self).setUp()
self.field = fields.Field(fields.IPNetwork())
good = ['192.168.1.0/24', '0.0.0.0/0', '::1/128', '::1/64', '::1/0']
self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good]
self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo',
'::1/129', '192.168.0.0/-1']
self.to_primitive_values = [(netaddr.IPNetwork(x), x)
for x in good]
self.from_primitive_values = [(x, netaddr.IPNetwork(x))
for x in good]
class TestIPV4Network(TestField):
def setUp(self):
super(TestIPV4Network, self).setUp()
self.field = fields.Field(fields.IPV4Network())
good = ['192.168.1.0/24', '0.0.0.0/0']
self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good]
self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo',
'::1/129', '192.168.0.0/-1']
self.to_primitive_values = [(netaddr.IPNetwork(x), x)
for x in good]
self.from_primitive_values = [(x, netaddr.IPNetwork(x))
for x in good]
class TestIPV6Network(TestField):
def setUp(self):
super(TestIPV6Network, self).setUp()
self.field = fields.Field(fields.IPV6Network())
good = ['::1/128', '::1/64', '::1/0']
self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good]
self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo',
'::1/129', '192.168.0.0/-1']
self.to_primitive_values = [(netaddr.IPNetwork(x), x)
for x in good]
self.from_primitive_values = [(x, netaddr.IPNetwork(x))
for x in good]
|
import mock
from trove.common.db import models
from trove.tests.unittests import trove_testtools
class DatastoreSchemaTest(trove_testtools.TestCase):
def setUp(self):
super(DatastoreSchemaTest, self).setUp()
self.dbname = 'testdb'
self.serial_db = {'_name': self.dbname,
'_character_set': None,
'_collate': None}
def tearDown(self):
super(DatastoreSchemaTest, self).tearDown()
def _empty_schema(self):
return models.DatastoreSchema(deserializing=True)
def test_init_name(self):
database = models.DatastoreSchema(self.dbname)
self.assertEqual(self.dbname, database.name)
database2 = models.DatastoreSchema(name=self.dbname)
self.assertEqual(self.dbname, database2.name)
def test_init_no_name(self):
self.assertRaises(RuntimeError, models.DatastoreSchema)
@mock.patch.object(models.DatastoreSchema, 'verify_dict')
def test_init_deserializing(self, mock_verify):
database = models.DatastoreSchema.deserialize(self.serial_db)
mock_verify.assert_any_call()
self.assertEqual(self.dbname, database.name)
def test_serialize(self):
database = models.DatastoreSchema(self.dbname)
self.assertEqual(self.serial_db, database.serialize())
def test_name_property(self):
test_name = "Anna"
database = self._empty_schema()
database.name = test_name
self.assertEqual(test_name, database.name)
def _do_validate_bad_schema_name(self, name):
database = self._empty_schema()
self.assertRaises(ValueError, database._validate_schema_name, name)
def test_validate_name_empty(self):
self._do_validate_bad_schema_name(None)
@mock.patch.object(models.DatastoreSchema, '_max_schema_name_length',
new_callable=mock.PropertyMock)
def test_validate_name_long(self, mock_max_len):
mock_max_len.return_value = 5
self._do_validate_bad_schema_name('toolong')
@mock.patch.object(models.DatastoreSchema, '_is_valid_schema_name')
def test_validate_name_invalid(self, mock_is_valid):
mock_is_valid.return_value = False
self._do_validate_bad_schema_name('notvalid')
def test_verify_dict(self):
database = models.DatastoreSchema(self.dbname)
# using context patch because the property setter needs to work
# properly during init for this test
with mock.patch.object(
models.DatastoreSchema, 'name',
new_callable=mock.PropertyMock) as mock_name_property:
database.verify_dict()
mock_name_property.assert_called_with(self.dbname)
def test_checks_pass(self):
database = models.DatastoreSchema(self.dbname)
database.check_reserved()
database.check_create()
database.check_delete()
@mock.patch.object(models.DatastoreSchema, 'ignored_dbs',
new_callable=mock.PropertyMock)
def test_checks_fail(self, mock_ignored_dbs):
mock_ignored_dbs.return_value = [self.dbname]
database = models.DatastoreSchema(self.dbname)
self.assertRaises(ValueError, database.check_reserved)
self.assertRaises(ValueError, database.check_create)
self.assertRaises(ValueError, database.check_delete)
class DatastoreUserTest(trove_testtools.TestCase):
def setUp(self):
super(DatastoreUserTest, self).setUp()
self.username = 'testuser'
self.password = 'password'
self.host = '192.168.0.1'
self.dbname = 'testdb'
self.serial_db = {'_name': self.dbname,
'_character_set': None,
'_collate': None}
self.databases = [self.serial_db]
self.host_wildcard = '%'
self.serial_user_basic = {
'_name': self.username, '_password': None,
'_host': self.host_wildcard, '_databases': [],
'_is_root': False
}
self.serial_user_full = {
'_name': self.username, '_password': self.password,
'_host': self.host, '_databases': self.databases,
'_is_root': False
}
def tearDown(self):
super(DatastoreUserTest, self).tearDown()
def _empty_user(self):
return models.DatastoreUser(deserializing=True)
def _test_user_basic(self, user):
self.assertEqual(self.username, user.name)
self.assertIsNone(user.password)
self.assertEqual(self.host_wildcard, user.host)
self.assertEqual([], user.databases)
def _test_user_full(self, user):
self.assertEqual(self.username, user.name)
self.assertEqual(self.password, user.password)
self.assertEqual(self.host, user.host)
self.assertEqual(self.databases, user.databases)
def test_init_name(self):
user1 = models.DatastoreUser(self.username)
self._test_user_basic(user1)
user2 = models.DatastoreUser(name=self.username)
self._test_user_basic(user2)
def test_init_no_name(self):
self.assertRaises(ValueError, models.DatastoreUser)
def test_init_options(self):
user1 = models.DatastoreUser(self.username)
self._test_user_basic(user1)
user2 = models.DatastoreUser(self.username, self.password,
self.host, self.dbname)
self._test_user_full(user2)
user3 = models.DatastoreUser(name=self.username,
password=self.password,
host=self.host,
databases=self.dbname)
self._test_user_full(user3)
@mock.patch.object(models.DatastoreUser, 'verify_dict')
def test_init_deserializing(self, mock_verify):
user1 = models.DatastoreUser.deserialize(self.serial_user_basic)
self._test_user_basic(user1)
user2 = models.DatastoreUser.deserialize(self.serial_user_full)
self._test_user_full(user2)
self.assertEqual(2, mock_verify.call_count)
def test_serialize(self):
user1 = models.DatastoreUser(self.username)
self.assertEqual(self.serial_user_basic, user1.serialize())
user2 = models.DatastoreUser(self.username, self.password,
self.host, self.dbname)
self.assertEqual(self.serial_user_full, user2.serialize())
@mock.patch.object(models.DatastoreUser, '_validate_user_name')
def test_name_property(self, mock_validate):
test_name = "Anna"
user = self._empty_user()
user.name = test_name
self.assertEqual(test_name, user.name)
mock_validate.assert_called_with(test_name)
def _do_validate_bad_user_name(self, name):
user = self._empty_user()
self.assertRaises(ValueError, user._validate_user_name, name)
def test_validate_name_empty(self):
self._do_validate_bad_user_name(None)
@mock.patch.object(models.DatastoreUser, '_max_user_name_length',
new_callable=mock.PropertyMock)
def test_validate_name_long(self, mock_max_len):
mock_max_len.return_value = 5
self._do_validate_bad_user_name('toolong')
@mock.patch.object(models.DatastoreUser, '_is_valid_user_name')
def test_validate_name_invalid(self, mock_is_valid):
mock_is_valid.return_value = False
self._do_validate_bad_user_name('notvalid')
@mock.patch.object(models.DatastoreUser, '_is_valid_password')
def test_password_property(self, mock_validate):
test_password = "NewPassword"
user = self._empty_user()
user.password = test_password
mock_validate.assert_called_with(test_password)
self.assertEqual(test_password, user.password)
@mock.patch.object(models.DatastoreUser, '_is_valid_password')
def test_password_property_error(self, mock_validate):
mock_validate.return_value = False
test_password = "NewPassword"
user = self._empty_user()
def test():
user.password = test_password
self.assertRaises(ValueError, test)
@mock.patch.object(models.DatastoreUser, '_is_valid_host_name')
def test_host_property(self, mock_validate):
test_host = "192.168.0.2"
user = self._empty_user()
user.host = test_host
mock_validate.assert_called_with(test_host)
self.assertEqual(test_host, user.host)
@mock.patch.object(models.DatastoreUser, '_is_valid_host_name')
def test_host_property_error(self, mock_validate):
mock_validate.return_value = False
test_host = "192.168.0.2"
user = self._empty_user()
def test():
user.host = test_host
self.assertRaises(ValueError, test)
@mock.patch.object(models.DatastoreUser, '_add_database')
def test_databases_property(self, mock_add_database):
test_dbname1 = 'otherdb'
test_dbname2 = 'lastdb'
user = self._empty_user()
def test(value):
user._databases.append({'_name': value,
'_character_set': None,
'_collate': None})
mock_add_database.side_effect = test
user.databases = self.dbname
user.databases = [test_dbname1, test_dbname2]
mock_add_database.assert_any_call(self.dbname)
mock_add_database.assert_any_call(test_dbname1)
mock_add_database.assert_any_call(test_dbname2)
self.assertIn(self.serial_db, user.databases)
self.assertIn({'_name': test_dbname1,
'_character_set': None,
'_collate': None}, user.databases)
self.assertIn({'_name': test_dbname2,
'_character_set': None,
'_collate': None}, user.databases)
def test_build_database_schema(self):
user = self._empty_user()
schema = user._build_database_schema(self.dbname)
self.assertEqual(self.serial_db, schema.serialize())
def test_add_database(self):
user = self._empty_user()
user._add_database(self.dbname)
self.assertEqual([self.serial_db], user.databases)
# check that adding an exsting db does nothing
user._add_database(self.dbname)
self.assertEqual([self.serial_db], user.databases)
@mock.patch.object(models, 'DatastoreSchema')
def test_deserialize_schema(self, mock_ds_schema):
mock_ds_schema.deserialize = mock.Mock()
user = self._empty_user()
user.deserialize_schema(self.serial_db)
mock_ds_schema.deserialize.assert_called_with(self.serial_db)
@mock.patch.object(models.DatastoreUser, 'deserialize_schema')
@mock.patch.object(models.DatastoreUser, 'host',
new_callable=mock.PropertyMock)
@mock.patch.object(models.DatastoreUser, 'password',
new_callable=mock.PropertyMock)
@mock.patch.object(models.DatastoreUser, 'name',
new_callable=mock.PropertyMock)
def _test_verify_dict_with_mocks(self, user,
mock_name_property,
mock_password_property,
mock_host_property,
mock_deserialize_schema):
user.verify_dict()
mock_name_property.assert_called_with(self.username)
mock_password_property.assert_called_with(self.password)
mock_host_property.assert_called_with(self.host)
mock_deserialize_schema.assert_called_with(self.serial_db)
def test_verify_dict(self):
user = models.DatastoreUser(self.username, self.password,
self.host, self.dbname)
self._test_verify_dict_with_mocks(user)
def test_validate_dict_defaults(self):
user = models.DatastoreUser(self.username)
user.verify_dict()
self.assertIsNone(user.password)
self.assertEqual(self.host_wildcard, user.host)
self.assertEqual([], user.databases)
def test_is_root(self):
user = models.DatastoreUser(self.username)
self.assertFalse(user._is_root)
user.make_root()
self.assertTrue(user._is_root)
def test_checks_pass(self):
user = models.DatastoreUser(self.username)
user.check_reserved()
user.check_create()
user.check_delete()
@mock.patch.object(models.DatastoreUser, 'ignored_users',
new_callable=mock.PropertyMock)
def test_checks_fail(self, mock_ignored_users):
mock_ignored_users.return_value = [self.username]
user = models.DatastoreUser(self.username)
self.assertRaises(ValueError, user.check_reserved)
self.assertRaises(ValueError, user.check_create)
self.assertRaises(ValueError, user.check_delete)
|
from __future__ import absolute_import
import mock
import time
import pytest
import opentracing
from opentracing import Format
class APICompatibilityCheckMixin(object):
"""
A mixin class for validation that a given tracer implementation
satisfies the requirements of the OpenTracing API.
"""
def tracer(self):
raise NotImplementedError('Subclass must implement tracer()')
def check_baggage_values(self):
"""If true, the test will validate Baggage items by storing and
retrieving them from the trace context. If false, it will only attempt
to store and retrieve the Baggage items to check the API compliance,
but not actually validate stored values. The latter mode is only
useful for no-op tracer.
"""
return True
def check_scope_manager(self):
"""If true, the test suite will validate the `ScopeManager` propagation
to ensure correct parenting. If false, it will only use the API without
asserting. The latter mode is only useful for no-op tracer.
"""
return True
def is_parent(self, parent, span):
"""Utility method that must be defined by Tracer implementers to define
how the test suite can check when a `Span` is a parent of another one.
It depends by the underlying implementation that is not part of the
OpenTracing API.
"""
return False
def test_active_span(self):
tracer = self.tracer()
span = tracer.start_span('Fry')
if self.check_scope_manager():
assert tracer.active_span is None
assert tracer.scope_manager.active is None
with tracer.scope_manager.activate(span, True):
assert tracer.active_span is span
assert tracer.scope_manager.active.span is span
def test_start_active_span(self):
# the first usage returns a `Scope` that wraps a root `Span`
tracer = self.tracer()
with tracer.start_active_span('Fry') as scope:
assert scope.span is not None
if self.check_scope_manager():
assert self.is_parent(None, scope.span)
def test_start_active_span_parent(self):
# ensure the `ScopeManager` provides the right parenting
tracer = self.tracer()
with tracer.start_active_span('Fry') as parent:
with tracer.start_active_span('Farnsworth') as child:
if self.check_scope_manager():
assert self.is_parent(parent.span, child.span)
def test_start_active_span_ignore_active_span(self):
# ensure the `ScopeManager` ignores the active `Scope`
# if the flag is set
tracer = self.tracer()
with tracer.start_active_span('Fry') as parent:
with tracer.start_active_span('Farnsworth',
ignore_active_span=True) as child:
if self.check_scope_manager():
assert not self.is_parent(parent.span, child.span)
def test_start_active_span_not_finish_on_close(self):
# ensure a `Span` is finished when the `Scope` close
tracer = self.tracer()
scope = tracer.start_active_span('Fry', finish_on_close=False)
with mock.patch.object(scope.span, 'finish') as finish:
scope.close()
assert finish.call_count == 0
def test_start_active_span_finish_on_close(self):
# a `Span` is not finished when the flag is set
tracer = self.tracer()
scope = tracer.start_active_span('Fry', finish_on_close=True)
with mock.patch.object(scope.span, 'finish') as finish:
scope.close()
if self.check_scope_manager():
assert finish.call_count == 1
def test_start_active_span_default_finish_on_close(self):
# a `Span` is finished when no flag is set
tracer = self.tracer()
scope = tracer.start_active_span('Fry')
with mock.patch.object(scope.span, 'finish') as finish:
scope.close()
if self.check_scope_manager():
assert finish.call_count == 1
def test_start_span(self):
tracer = self.tracer()
span = tracer.start_span(operation_name='Fry')
span.finish()
with tracer.start_span(operation_name='Fry',
tags={'birthday': 'August 14 1974'}) as span:
span.log_event('birthplace',
payload={'hospital': 'Brooklyn Pre-Med Hospital',
'city': 'Old New York'})
def test_start_span_propagation(self):
# `start_span` must inherit the current active `Scope` span
tracer = self.tracer()
with tracer.start_active_span('Fry') as parent:
with tracer.start_span(operation_name='Farnsworth') as child:
if self.check_scope_manager():
assert self.is_parent(parent.span, child)
def test_start_span_propagation_ignore_active_span(self):
# `start_span` doesn't inherit the current active `Scope` span
# if the flag is set
tracer = self.tracer()
with tracer.start_active_span('Fry') as parent:
with tracer.start_span(operation_name='Farnsworth',
ignore_active_span=True) as child:
if self.check_scope_manager():
assert not self.is_parent(parent.span, child)
def test_start_span_with_parent(self):
tracer = self.tracer()
parent_span = tracer.start_span(operation_name='parent')
assert parent_span is not None
span = tracer.start_span(
operation_name='Leela',
child_of=parent_span)
span.finish()
span = tracer.start_span(
operation_name='Leela',
references=[opentracing.follows_from(parent_span.context)],
tags={'birthplace': 'sewers'})
span.finish()
parent_span.finish()
def test_start_child_span(self):
tracer = self.tracer()
parent_span = tracer.start_span(operation_name='parent')
assert parent_span is not None
child_span = opentracing.start_child_span(
parent_span, operation_name='Leela')
child_span.finish()
parent_span.finish()
def test_set_operation_name(self):
span = self.tracer().start_span().set_operation_name('Farnsworth')
span.finish()
def test_span_as_context_manager(self):
tracer = self.tracer()
finish = {'called': False}
def mock_finish(*_):
finish['called'] = True
with tracer.start_span(operation_name='antiquing') as span:
setattr(span, 'finish', mock_finish)
assert finish['called'] is True
# now try with exception
finish['called'] = False
try:
with tracer.start_span(operation_name='antiquing') as span:
setattr(span, 'finish', mock_finish)
raise ValueError()
except ValueError:
assert finish['called'] is True
else:
raise AssertionError('Expected ValueError') # pragma: no cover
def test_span_tag_value_types(self):
with self.tracer().start_span(operation_name='ManyTypes') as span:
span. \
set_tag('an_int', 9). \
set_tag('a_bool', True). \
set_tag('a_string', 'aoeuidhtns')
def test_span_tags_with_chaining(self):
span = self.tracer().start_span(operation_name='Farnsworth')
span. \
set_tag('birthday', '9 April, 2841'). \
set_tag('loves', 'different lengths of wires')
span. \
set_tag('unicode_val', u'non-ascii: \u200b'). \
set_tag(u'unicode_key_\u200b', 'ascii val')
span.finish()
def test_span_logs(self):
span = self.tracer().start_span(operation_name='Fry')
# Newer API
span.log_kv(
{'frozen.year': 1999, 'frozen.place': 'Cryogenics Labs'})
span.log_kv(
{'defrosted.year': 2999, 'defrosted.place': 'Cryogenics Labs'},
time.time())
# Older API
span.\
log_event('frozen', {'year': 1999, 'place': 'Cryogenics Labs'}). \
log_event('defrosted', {'year': 2999}). \
log_event('became his own grandfather', 1947)
span.\
log(event='frozen'). \
log(payload={'year': 1999}). \
log(timestamp=time.time(),
event='frozen',
payload={'year': 1999}). \
log(timestamp=time.time(),
event='unfrozen',
payload={'year': 2999})
def test_span_baggage(self):
with self.tracer().start_span(operation_name='Fry') as span:
assert span.context.baggage == {}
span_ref = span.set_baggage_item('Kiff-loves', 'Amy')
assert span_ref is span
val = span.get_baggage_item('Kiff-loves')
if self.check_baggage_values():
assert 'Amy' == val
pass
def test_context_baggage(self):
with self.tracer().start_span(operation_name='Fry') as span:
assert span.context.baggage == {}
span.set_baggage_item('Kiff-loves', 'Amy')
if self.check_baggage_values():
assert span.context.baggage == {'Kiff-loves': 'Amy'}
pass
def test_text_propagation(self):
with self.tracer().start_span(operation_name='Bender') as span:
text_carrier = {}
self.tracer().inject(
span_context=span.context,
format=opentracing.Format.TEXT_MAP,
carrier=text_carrier)
extracted_ctx = self.tracer().extract(
format=opentracing.Format.TEXT_MAP,
carrier=text_carrier)
assert extracted_ctx.baggage == {}
def test_binary_propagation(self):
with self.tracer().start_span(operation_name='Bender') as span:
bin_carrier = bytearray()
self.tracer().inject(
span_context=span.context,
format=opentracing.Format.BINARY,
carrier=bin_carrier)
extracted_ctx = self.tracer().extract(
format=opentracing.Format.BINARY,
carrier=bin_carrier)
assert extracted_ctx.baggage == {}
def test_mandatory_formats(self):
formats = [
(Format.TEXT_MAP, {}),
(Format.HTTP_HEADERS, {}),
(Format.BINARY, bytearray()),
]
with self.tracer().start_span(operation_name='Bender') as span:
for fmt, carrier in formats:
# expecting no exceptions
span.tracer.inject(span.context, fmt, carrier)
span.tracer.extract(fmt, carrier)
def test_unknown_format(self):
custom_format = 'kiss my shiny metal ...'
with self.tracer().start_span(operation_name='Bender') as span:
with pytest.raises(opentracing.UnsupportedFormatException):
span.tracer.inject(span.context, custom_format, {})
with pytest.raises(opentracing.UnsupportedFormatException):
span.tracer.extract(custom_format, {})
def test_tracer_start_active_span_scope(self):
# the Tracer ScopeManager should store the active Scope
tracer = self.tracer()
scope = tracer.start_active_span('Fry')
if self.check_scope_manager():
assert tracer.scope_manager.active == scope
scope.close()
def test_tracer_start_span_scope(self):
# the Tracer ScopeManager should not store the new Span
tracer = self.tracer()
span = tracer.start_span(operation_name='Fry')
if self.check_scope_manager():
assert tracer.scope_manager.active is None
span.finish()
|
"""Implementation of Unix-like du command for cloud storage providers."""
from __future__ import absolute_import
import sys
from gslib.boto_translation import S3_DELETE_MARKER_GUID
from gslib.bucket_listing_ref import BucketListingObject
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.ls_helper import LsHelper
from gslib.storage_url import ContainsWildcard
from gslib.storage_url import StorageUrlFromString
from gslib.util import MakeHumanReadable
from gslib.util import NO_MAX
from gslib.util import UTF8
_SYNOPSIS = """
gsutil du url...
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The du command displays the amount of space (in bytes) being used by the
objects in the file or object hierarchy under a given URL. The syntax emulates
the Linux du command (which stands for disk usage). For example, the command:
gsutil du -s gs://your-bucket/dir
will report the total space used by all objects under gs://your-bucket/dir and
any sub-directories.
<B>OPTIONS</B>
-0 Ends each output line with a 0 byte rather than a newline. This
can be useful to make the output more easily machine-readable.
-a Includes non-current object versions / generations in the listing
(only useful with a versioning-enabled bucket). Also prints
generation and metageneration for each listed object.
-c Includes a grand total at the end of the output.
-e A pattern to exclude from reporting. Example: -e "*.o" would
exclude any object that ends in ".o". Can be specified multiple
times.
-h Prints object sizes in human-readable format (e.g., 1 KiB,
234 MiB, 2GiB, etc.)
-s Displays only the grand total for each argument.
-X Similar to -e, but excludes patterns from the given file. The
patterns to exclude should be one per line.
<B>EXAMPLES</B>
To list the size of all objects in a bucket:
gsutil du gs://bucketname
To list the size of all objects underneath a prefix:
gsutil du gs://bucketname/prefix/*
To print the total number of bytes in a bucket, in human-readable form:
gsutil du -ch gs://bucketname
To see a summary of the total bytes in the two given buckets:
gsutil du -s gs://bucket1 gs://bucket2
To list the size of all objects in a versioned bucket, including objects that
are not the latest:
gsutil du -a gs://bucketname
To list all objects in a bucket, except objects that end in ".bak",
with each object printed ending in a null byte:
gsutil du -e "*.bak" -0 gs://bucketname
To get a total of all buckets in a project with a grand total for an entire
project:
gsutil -o GSUtil:default_project_id=project-name du -shc
""")
class DuCommand(Command):
"""Implementation of gsutil du command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'du',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=NO_MAX,
supported_sub_args='0ace:hsX:',
file_url_ok=False,
provider_url_ok=True,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='du',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Display object size usage',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def _PrintSummaryLine(self, num_bytes, name):
size_string = (MakeHumanReadable(num_bytes)
if self.human_readable else str(num_bytes))
sys.stdout.write('%(size)-10s %(name)s%(ending)s' % {
'size': size_string, 'name': name, 'ending': self.line_ending})
def _PrintInfoAboutBucketListingRef(self, bucket_listing_ref):
"""Print listing info for given bucket_listing_ref.
Args:
bucket_listing_ref: BucketListing being listed.
Returns:
Tuple (number of objects, object size)
Raises:
Exception: if calling bug encountered.
"""
obj = bucket_listing_ref.root_object
url_str = bucket_listing_ref.url_string
if (obj.metadata and S3_DELETE_MARKER_GUID in
obj.metadata.additionalProperties):
size_string = '0'
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
size_string = (MakeHumanReadable(obj.size)
if self.human_readable else str(obj.size))
num_bytes = obj.size
num_objs = 1
if not self.summary_only:
sys.stdout.write('%(size)-10s %(url)s%(ending)s' % {
'size': size_string,
'url': url_str.encode(UTF8),
'ending': self.line_ending})
return (num_objs, num_bytes)
def RunCommand(self):
"""Command entry point for the du command."""
self.line_ending = '\n'
self.all_versions = False
self.produce_total = False
self.human_readable = False
self.summary_only = False
self.exclude_patterns = []
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-0':
self.line_ending = '\0'
elif o == '-a':
self.all_versions = True
elif o == '-c':
self.produce_total = True
elif o == '-e':
self.exclude_patterns.append(a)
elif o == '-h':
self.human_readable = True
elif o == '-s':
self.summary_only = True
elif o == '-X':
if a == '-':
f = sys.stdin
else:
f = open(a, 'r')
try:
for line in f:
line = line.strip().decode(UTF8)
if line:
self.exclude_patterns.append(line)
finally:
f.close()
if not self.args:
# Default to listing all gs buckets.
self.args = ['gs://']
total_bytes = 0
got_nomatch_errors = False
def _PrintObjectLong(blr):
return self._PrintInfoAboutBucketListingRef(blr)
def _PrintNothing(unused_blr=None):
pass
def _PrintDirectory(num_bytes, blr):
if not self.summary_only:
self._PrintSummaryLine(num_bytes, blr.url_string.encode(UTF8))
for url_arg in self.args:
top_level_storage_url = StorageUrlFromString(url_arg)
if top_level_storage_url.IsFileUrl():
raise CommandException('Only cloud URLs are supported for %s'
% self.command_name)
bucket_listing_fields = ['size']
ls_helper = LsHelper(
self.WildcardIterator, self.logger,
print_object_func=_PrintObjectLong, print_dir_func=_PrintNothing,
print_dir_header_func=_PrintNothing,
print_dir_summary_func=_PrintDirectory,
print_newline_func=_PrintNothing, all_versions=self.all_versions,
should_recurse=True, exclude_patterns=self.exclude_patterns,
fields=bucket_listing_fields)
# ls_helper expands to objects and prefixes, so perform a top-level
# expansion first.
if top_level_storage_url.IsProvider():
# Provider URL: use bucket wildcard to iterate over all buckets.
top_level_iter = self.WildcardIterator(
'%s://*' % top_level_storage_url.scheme).IterBuckets(
bucket_fields=['id'])
elif top_level_storage_url.IsBucket():
top_level_iter = self.WildcardIterator(
'%s://%s' % (top_level_storage_url.scheme,
top_level_storage_url.bucket_name)).IterBuckets(
bucket_fields=['id'])
else:
top_level_iter = [BucketListingObject(top_level_storage_url)]
for blr in top_level_iter:
storage_url = blr.storage_url
if storage_url.IsBucket() and self.summary_only:
storage_url = StorageUrlFromString(
storage_url.CreatePrefixUrl(wildcard_suffix='**'))
_, exp_objs, exp_bytes = ls_helper.ExpandUrlAndPrint(storage_url)
if (storage_url.IsObject() and exp_objs == 0 and
ContainsWildcard(url_arg) and not self.exclude_patterns):
got_nomatch_errors = True
total_bytes += exp_bytes
if self.summary_only:
self._PrintSummaryLine(exp_bytes,
blr.url_string.rstrip('/').encode(UTF8))
if self.produce_total:
self._PrintSummaryLine(total_bytes, 'total')
if got_nomatch_errors:
raise CommandException('One or more URLs matched no objects.')
return 0
|
"""Editor Plugin"""
import os
import os.path as osp
import re
import time
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant, getopenfilenames, to_qvariant
from qtpy.QtCore import QByteArray, Qt, Signal, Slot
from qtpy.QtGui import QKeySequence
from qtpy.QtPrintSupport import QAbstractPrintDialog, QPrintDialog, QPrinter
from qtpy.QtWidgets import (QAction, QActionGroup, QApplication, QDialog,
QFileDialog, QGridLayout, QGroupBox, QHBoxLayout,
QInputDialog, QLabel, QMenu, QSplitter, QTabWidget,
QToolBar, QVBoxLayout, QWidget)
from spyder import dependencies
from spyder.config.base import _, get_conf_path, running_under_pytest
from spyder.config.main import (CONF, RUN_CELL_SHORTCUT,
RUN_CELL_AND_ADVANCE_SHORTCUT)
from spyder.config.utils import (get_edit_filetypes, get_edit_filters,
get_filter)
from spyder.py3compat import PY2, qbytearray_to_str, to_text_string
from spyder.utils import codeanalysis, encoding, programs, sourcecode
from spyder.utils import icon_manager as ima
from spyder.utils.introspection.manager import IntrospectionManager
from spyder.utils.qthelpers import create_action, add_actions, MENU_SEPARATOR
from spyder.utils.misc import getcwd_or_home
from spyder.widgets.findreplace import FindReplace
from spyder.widgets.editor import (EditorMainWindow, EditorSplitter,
EditorStack, Printer)
from spyder.widgets.sourcecode.codeeditor import CodeEditor
from spyder.widgets.status import (CursorPositionStatus, EncodingStatus,
EOLStatus, ReadWriteStatus)
from spyder.plugins import SpyderPluginWidget
from spyder.plugins.configdialog import PluginConfigPage
from spyder.plugins.runconfig import (ALWAYS_OPEN_FIRST_RUN_OPTION,
get_run_configuration,
RunConfigDialog, RunConfigOneDialog)
NBCONVERT_REQVER = ">=4.0"
dependencies.add("nbconvert", _("Manipulate Jupyter notebooks on the Editor"),
required_version=NBCONVERT_REQVER)
def _load_all_breakpoints():
bp_dict = CONF.get('run', 'breakpoints', {})
for filename in list(bp_dict.keys()):
if not osp.isfile(filename):
bp_dict.pop(filename)
return bp_dict
def load_breakpoints(filename):
breakpoints = _load_all_breakpoints().get(filename, [])
if breakpoints and isinstance(breakpoints[0], int):
# Old breakpoints format
breakpoints = [(lineno, None) for lineno in breakpoints]
return breakpoints
def save_breakpoints(filename, breakpoints):
if not osp.isfile(filename):
return
bp_dict = _load_all_breakpoints()
bp_dict[filename] = breakpoints
CONF.set('run', 'breakpoints', bp_dict)
def clear_all_breakpoints():
CONF.set('run', 'breakpoints', {})
def clear_breakpoint(filename, lineno):
breakpoints = load_breakpoints(filename)
if breakpoints:
for breakpoint in breakpoints[:]:
if breakpoint[0] == lineno:
breakpoints.remove(breakpoint)
save_breakpoints(filename, breakpoints)
WINPDB_PATH = programs.find_program('winpdb')
class EditorConfigPage(PluginConfigPage):
def get_name(self):
return _("Editor")
def get_icon(self):
return ima.icon('edit')
def setup_page(self):
template_btn = self.create_button(_("Edit template for new modules"),
self.plugin.edit_template)
interface_group = QGroupBox(_("Interface"))
newcb = self.create_checkbox
showtabbar_box = newcb(_("Show tab bar"), 'show_tab_bar')
interface_layout = QVBoxLayout()
interface_layout.addWidget(showtabbar_box)
interface_group.setLayout(interface_layout)
display_group = QGroupBox(_("Source code"))
linenumbers_box = newcb(_("Show line numbers"), 'line_numbers')
blanks_box = newcb(_("Show blank spaces"), 'blank_spaces')
edgeline_box = newcb(_("Show vertical line after"), 'edge_line')
edgeline_spin = self.create_spinbox("", _("characters"),
'edge_line_column', 79, 1, 500)
edgeline_box.toggled.connect(edgeline_spin.spinbox.setEnabled)
edgeline_box.toggled.connect(edgeline_spin.slabel.setEnabled)
edgeline_spin.spinbox.setEnabled(self.get_option('edge_line'))
edgeline_spin.slabel.setEnabled(self.get_option('edge_line'))
currentline_box = newcb(_("Highlight current line"),
'highlight_current_line')
currentcell_box = newcb(_("Highlight current cell"),
'highlight_current_cell')
occurrence_box = newcb(_("Highlight occurrences after"),
'occurrence_highlighting')
occurrence_spin = self.create_spinbox("", _(" ms"),
'occurrence_highlighting/timeout',
min_=100, max_=1000000, step=100)
occurrence_box.toggled.connect(occurrence_spin.spinbox.setEnabled)
occurrence_box.toggled.connect(occurrence_spin.slabel.setEnabled)
occurrence_spin.spinbox.setEnabled(
self.get_option('occurrence_highlighting'))
occurrence_spin.slabel.setEnabled(
self.get_option('occurrence_highlighting'))
wrap_mode_box = newcb(_("Wrap lines"), 'wrap')
display_layout = QGridLayout()
display_layout.addWidget(linenumbers_box, 0, 0)
display_layout.addWidget(blanks_box, 1, 0)
display_layout.addWidget(edgeline_box, 2, 0)
display_layout.addWidget(edgeline_spin.spinbox, 2, 1)
display_layout.addWidget(edgeline_spin.slabel, 2, 2)
display_layout.addWidget(currentline_box, 3, 0)
display_layout.addWidget(currentcell_box, 4, 0)
display_layout.addWidget(occurrence_box, 5, 0)
display_layout.addWidget(occurrence_spin.spinbox, 5, 1)
display_layout.addWidget(occurrence_spin.slabel, 5, 2)
display_layout.addWidget(wrap_mode_box, 6, 0)
display_h_layout = QHBoxLayout()
display_h_layout.addLayout(display_layout)
display_h_layout.addStretch(1)
display_group.setLayout(display_h_layout)
run_group = QGroupBox(_("Run"))
saveall_box = newcb(_("Save all files before running script"),
'save_all_before_run')
run_selection_group = QGroupBox(_("Run selection"))
focus_box = newcb(_("Maintain focus in the Editor after running cells "
"or selections"), 'focus_to_editor')
introspection_group = QGroupBox(_("Introspection"))
rope_is_installed = programs.is_module_installed('rope')
if rope_is_installed:
completion_box = newcb(_("Automatic code completion"),
'codecompletion/auto')
case_comp_box = newcb(_("Case sensitive code completion"),
'codecompletion/case_sensitive')
comp_enter_box = newcb(_("Enter key selects completion"),
'codecompletion/enter_key')
calltips_box = newcb(_("Display balloon tips"), 'calltips')
gotodef_box = newcb(_("Link to object definition"),
'go_to_definition',
tip=_("If this option is enabled, clicking on an object\n"
"name (left-click + Ctrl key) will go this object\n"
"definition (if resolved)."))
else:
rope_label = QLabel(_("<b>Warning:</b><br>"
"The Python module <i>rope</i> is not "
"installed on this computer: calltips, "
"code completion and go-to-definition "
"features won't be available."))
rope_label.setWordWrap(True)
sourcecode_group = QGroupBox(_("Source code"))
closepar_box = newcb(_("Automatic insertion of parentheses, braces "
"and brackets"),
'close_parentheses')
close_quotes_box = newcb(_("Automatic insertion of closing quotes"),
'close_quotes')
add_colons_box = newcb(_("Automatic insertion of colons after 'for', "
"'if', 'def', etc"),
'add_colons')
autounindent_box = newcb(_("Automatic indentation after 'else', "
"'elif', etc."), 'auto_unindent')
indent_chars_box = self.create_combobox(_("Indentation characters: "),
((_("2 spaces"), '* *'),
(_("3 spaces"), '* *'),
(_("4 spaces"), '* *'),
(_("5 spaces"), '* *'),
(_("6 spaces"), '* *'),
(_("7 spaces"), '* *'),
(_("8 spaces"), '* *'),
(_("Tabulations"), '*\t*')), 'indent_chars')
tabwidth_spin = self.create_spinbox(_("Tab stop width:"), _("spaces"),
'tab_stop_width_spaces', 4, 1, 8, 1)
def enable_tabwidth_spin(index):
if index == 7: # Tabulations
tabwidth_spin.plabel.setEnabled(True)
tabwidth_spin.spinbox.setEnabled(True)
else:
tabwidth_spin.plabel.setEnabled(False)
tabwidth_spin.spinbox.setEnabled(False)
indent_chars_box.combobox.currentIndexChanged.connect(enable_tabwidth_spin)
tab_mode_box = newcb(_("Tab always indent"),
'tab_always_indent', default=False,
tip=_("If enabled, pressing Tab will always indent,\n"
"even when the cursor is not at the beginning\n"
"of a line (when this option is enabled, code\n"
"completion may be triggered using the alternate\n"
"shortcut: Ctrl+Space)"))
ibackspace_box = newcb(_("Intelligent backspace"),
'intelligent_backspace', default=True)
removetrail_box = newcb(_("Automatically remove trailing spaces "
"when saving files"),
'always_remove_trailing_spaces', default=False)
analysis_group = QGroupBox(_("Analysis"))
pep_url = '<a href="http://www.python.org/dev/peps/pep-0008/">PEP8</a>'
pep8_label = QLabel(_("<i>(Refer to the {} page)</i>").format(pep_url))
pep8_label.setOpenExternalLinks(True)
is_pyflakes = codeanalysis.is_pyflakes_installed()
is_pep8 = codeanalysis.get_checker_executable(
'pycodestyle') is not None
pyflakes_box = newcb(_("Real-time code analysis"),
'code_analysis/pyflakes', default=True,
tip=_("<p>If enabled, Python source code will be analyzed "
"using pyflakes, lines containing errors or "
"warnings will be highlighted.</p>"
"<p><u>Note</u>: add <b>analysis:ignore</b> in "
"a comment to ignore code analysis "
"warnings.</p>"))
pyflakes_box.setEnabled(is_pyflakes)
if not is_pyflakes:
pyflakes_box.setToolTip(_("Code analysis requires pyflakes %s+") %
codeanalysis.PYFLAKES_REQVER)
pep8_box = newcb(_("Real-time code style analysis"),
'code_analysis/pep8', default=False,
tip=_("<p>If enabled, Python source code will be analyzed "
"using pycodestyle, lines that are not following PEP8 "
"style guide will be highlighted.</p>"
"<p><u>Note</u>: add <b>analysis:ignore</b> in "
"a comment to ignore style analysis "
"warnings.</p>"))
pep8_box.setEnabled(is_pep8)
todolist_box = newcb(_("Code annotations (TODO, FIXME, XXX, HINT, TIP,"
" @todo, HACK, BUG, OPTIMIZE, !!!, ???)"),
'todo_list', default=True)
realtime_radio = self.create_radiobutton(
_("Perform analysis when "
"saving file and every"),
'realtime_analysis', True)
saveonly_radio = self.create_radiobutton(
_("Perform analysis only "
"when saving file"),
'onsave_analysis')
af_spin = self.create_spinbox("", _(" ms"), 'realtime_analysis/timeout',
min_=100, max_=1000000, step=100)
af_layout = QHBoxLayout()
af_layout.addWidget(realtime_radio)
af_layout.addWidget(af_spin)
run_layout = QVBoxLayout()
run_layout.addWidget(saveall_box)
run_group.setLayout(run_layout)
run_selection_layout = QVBoxLayout()
run_selection_layout.addWidget(focus_box)
run_selection_group.setLayout(run_selection_layout)
introspection_layout = QVBoxLayout()
if rope_is_installed:
introspection_layout.addWidget(calltips_box)
introspection_layout.addWidget(completion_box)
introspection_layout.addWidget(case_comp_box)
introspection_layout.addWidget(comp_enter_box)
introspection_layout.addWidget(gotodef_box)
else:
introspection_layout.addWidget(rope_label)
introspection_group.setLayout(introspection_layout)
analysis_layout = QVBoxLayout()
analysis_layout.addWidget(pyflakes_box)
analysis_pep_layout = QHBoxLayout()
analysis_pep_layout.addWidget(pep8_box)
analysis_pep_layout.addWidget(pep8_label)
analysis_layout.addLayout(analysis_pep_layout)
analysis_layout.addWidget(todolist_box)
analysis_layout.addLayout(af_layout)
analysis_layout.addWidget(saveonly_radio)
analysis_group.setLayout(analysis_layout)
sourcecode_layout = QVBoxLayout()
sourcecode_layout.addWidget(closepar_box)
sourcecode_layout.addWidget(autounindent_box)
sourcecode_layout.addWidget(add_colons_box)
sourcecode_layout.addWidget(close_quotes_box)
indent_tab_layout = QHBoxLayout()
indent_tab_grid_layout = QGridLayout()
indent_tab_grid_layout.addWidget(indent_chars_box.label, 0, 0)
indent_tab_grid_layout.addWidget(indent_chars_box.combobox, 0, 1)
indent_tab_grid_layout.addWidget(tabwidth_spin.plabel, 1, 0)
indent_tab_grid_layout.addWidget(tabwidth_spin.spinbox, 1, 1)
indent_tab_grid_layout.addWidget(tabwidth_spin.slabel, 1, 2)
indent_tab_layout.addLayout(indent_tab_grid_layout)
indent_tab_layout.addStretch(1)
sourcecode_layout.addLayout(indent_tab_layout)
sourcecode_layout.addWidget(tab_mode_box)
sourcecode_layout.addWidget(ibackspace_box)
sourcecode_layout.addWidget(removetrail_box)
sourcecode_group.setLayout(sourcecode_layout)
eol_group = QGroupBox(_("End-of-line characters"))
eol_label = QLabel(_("When opening a text file containing "
"mixed end-of-line characters (this may "
"raise syntax errors in the consoles "
"on Windows platforms), Spyder may fix the "
"file automatically."))
eol_label.setWordWrap(True)
check_eol_box = newcb(_("Fix automatically and show warning "
"message box"),
'check_eol_chars', default=True)
eol_layout = QVBoxLayout()
eol_layout.addWidget(eol_label)
eol_layout.addWidget(check_eol_box)
eol_group.setLayout(eol_layout)
tabs = QTabWidget()
tabs.addTab(self.create_tab(interface_group, display_group),
_("Display"))
tabs.addTab(self.create_tab(introspection_group, analysis_group),
_("Code Introspection/Analysis"))
tabs.addTab(self.create_tab(template_btn, run_group, run_selection_group,
sourcecode_group, eol_group),
_("Advanced settings"))
vlayout = QVBoxLayout()
vlayout.addWidget(tabs)
self.setLayout(vlayout)
class Editor(SpyderPluginWidget):
"""
Multi-file Editor widget
"""
CONF_SECTION = 'editor'
CONFIGWIDGET_CLASS = EditorConfigPage
TEMPFILE_PATH = get_conf_path('temp.py')
TEMPLATE_PATH = get_conf_path('template.py')
DISABLE_ACTIONS_WHEN_HIDDEN = False # SpyderPluginWidget class attribute
# Signals
run_in_current_ipyclient = Signal(str, str, str, bool, bool, bool, bool)
exec_in_extconsole = Signal(str, bool)
redirect_stdio = Signal(bool)
open_dir = Signal(str)
breakpoints_saved = Signal()
run_in_current_extconsole = Signal(str, str, str, bool, bool)
open_file_update = Signal(str)
def __init__(self, parent, ignore_last_opened_files=False):
if PYQT5:
SpyderPluginWidget.__init__(self, parent, main=parent)
else:
SpyderPluginWidget.__init__(self, parent)
self.__set_eol_chars = True
# Creating template if it doesn't already exist
if not osp.isfile(self.TEMPLATE_PATH):
if os.name == "nt":
shebang = []
else:
shebang = ['#!/usr/bin/env python' + ('2' if PY2 else '3')]
header = shebang + ['# -*- coding: utf-8 -*-', '"""', 'Created on %(date)s',
'', '@author: %(username)s', '"""', '', 'import tellurium as te',
'import roadrunner', '', 'r = te.loada("""', '', '""")','']
try:
encoding.write(os.linesep.join(header), self.TEMPLATE_PATH,
'utf-8')
except EnvironmentError:
pass
self.projects = None
self.outlineexplorer = None
self.help = None
self.editorstacks = None
self.editorwindows = None
self.editorwindows_to_be_created = None
self.file_dependent_actions = []
self.pythonfile_dependent_actions = []
self.dock_toolbar_actions = None
self.edit_menu_actions = None #XXX: find another way to notify Spyder
# (see spyder.py: 'update_edit_menu' method)
self.search_menu_actions = None #XXX: same thing ('update_search_menu')
self.stack_menu_actions = None
# Initialize plugin
self.initialize_plugin()
# Configuration dialog size
self.dialog_size = None
statusbar = self.main.statusBar()
self.readwrite_status = ReadWriteStatus(self, statusbar)
self.eol_status = EOLStatus(self, statusbar)
self.encoding_status = EncodingStatus(self, statusbar)
self.cursorpos_status = CursorPositionStatus(self, statusbar)
layout = QVBoxLayout()
self.dock_toolbar = QToolBar(self)
add_actions(self.dock_toolbar, self.dock_toolbar_actions)
layout.addWidget(self.dock_toolbar)
self.last_edit_cursor_pos = None
self.cursor_pos_history = []
self.cursor_pos_index = None
self.__ignore_cursor_position = True
self.editorstacks = []
self.last_focus_editorstack = {}
self.editorwindows = []
self.editorwindows_to_be_created = []
self.toolbar_list = None
self.menu_list = None
# Don't start IntrospectionManager when running tests because
# it consumes a lot of memory
if (running_under_pytest()
and not os.environ.get('SPY_TEST_USE_INTROSPECTION')):
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
self.introspector = Mock()
else:
self.introspector = IntrospectionManager(
extra_path=self.main.get_spyder_pythonpath())
# Setup new windows:
self.main.all_actions_defined.connect(self.setup_other_windows)
# Change module completions when PYTHONPATH changes
self.main.sig_pythonpath_changed.connect(self.set_path)
# Find widget
self.find_widget = FindReplace(self, enable_replace=True)
self.find_widget.hide()
self.find_widget.visibility_changed.connect(
lambda vs: self.rehighlight_cells())
self.register_widget_shortcuts(self.find_widget)
# Tabbed editor widget + Find/Replace widget
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
self.editorsplitter = EditorSplitter(self, self,
self.stack_menu_actions, first=True)
editor_layout.addWidget(self.editorsplitter)
editor_layout.addWidget(self.find_widget)
# Splitter: editor widgets (see above) + outline explorer
self.splitter = QSplitter(self)
self.splitter.setContentsMargins(0, 0, 0, 0)
self.splitter.addWidget(editor_widgets)
self.splitter.setStretchFactor(0, 5)
self.splitter.setStretchFactor(1, 1)
layout.addWidget(self.splitter)
self.setLayout(layout)
self.setFocusPolicy(Qt.ClickFocus)
# Editor's splitter state
state = self.get_option('splitter_state', None)
if state is not None:
self.splitter.restoreState( QByteArray().fromHex(
str(state).encode('utf-8')) )
self.recent_files = self.get_option('recent_files', [])
self.untitled_num = 0
# Parameters of last file execution:
self.__last_ic_exec = None # internal console
self.__last_ec_exec = None # external console
# File types and filters used by the Open dialog
self.edit_filetypes = None
self.edit_filters = None
self.__ignore_cursor_position = False
current_editor = self.get_current_editor()
if current_editor is not None:
filename = self.get_current_filename()
position = current_editor.get_position('cursor')
self.add_cursor_position_to_history(filename, position)
self.update_cursorpos_actions()
self.set_path()
def set_projects(self, projects):
self.projects = projects
@Slot()
def show_hide_projects(self):
if self.projects is not None:
dw = self.projects.dockwidget
if dw.isVisible():
dw.hide()
else:
dw.show()
dw.raise_()
self.switch_to_plugin()
def set_outlineexplorer(self, outlineexplorer):
self.outlineexplorer = outlineexplorer
for editorstack in self.editorstacks:
editorstack.set_outlineexplorer(self.outlineexplorer)
self.editorstacks[0].initialize_outlineexplorer()
self.outlineexplorer.edit_goto.connect(
lambda filenames, goto, word:
self.load(filenames=filenames, goto=goto, word=word,
editorwindow=self))
self.outlineexplorer.edit.connect(
lambda filenames:
self.load(filenames=filenames, editorwindow=self))
def set_help(self, help_plugin):
self.help = help_plugin
for editorstack in self.editorstacks:
editorstack.set_help(self.help)
#------ Private API --------------------------------------------------------
def restore_scrollbar_position(self):
"""Restoring scrollbar position after main window is visible"""
# Widget is now visible, we may center cursor on top level editor:
try:
self.get_current_editor().centerCursor()
except AttributeError:
pass
#------ SpyderPluginWidget API ---------------------------------------------
def get_plugin_title(self):
"""Return widget title"""
title = _('Editor')
if self.dockwidget:
filename = self.get_current_filename()
if self.dockwidget.dock_tabbar:
if filename and self.dockwidget.dock_tabbar.count() < 2:
title += ' - ' + to_text_string(filename)
else:
title += ' - ' + to_text_string(filename)
return title
def get_plugin_icon(self):
"""Return widget icon."""
return ima.icon('edit')
def get_focus_widget(self):
"""
Return the widget to give focus to.
This happens when plugin's dockwidget is raised on top-level.
"""
return self.get_current_editor()
def visibility_changed(self, enable):
"""DockWidget visibility has changed"""
SpyderPluginWidget.visibility_changed(self, enable)
if self.dockwidget.isWindow():
self.dock_toolbar.show()
else:
self.dock_toolbar.hide()
if enable:
self.refresh_plugin()
self.update_plugin_title.emit()
def refresh_plugin(self):
"""Refresh editor plugin"""
editorstack = self.get_current_editorstack()
editorstack.refresh()
self.refresh_save_all_action()
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed"""
state = self.splitter.saveState()
self.set_option('splitter_state', qbytearray_to_str(state))
filenames = []
editorstack = self.editorstacks[0]
active_project_path = None
if self.projects is not None:
active_project_path = self.projects.get_active_project_path()
if not active_project_path:
self.set_open_filenames()
else:
self.projects.set_project_filenames(
[finfo.filename for finfo in editorstack.data])
self.set_option('layout_settings',
self.editorsplitter.get_layout_settings())
self.set_option('windows_layout_settings',
[win.get_layout_settings() for win in self.editorwindows])
self.set_option('recent_files', self.recent_files)
try:
if not editorstack.save_if_changed(cancelable) and cancelable:
return False
else:
for win in self.editorwindows[:]:
win.close()
return True
except IndexError:
return True
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
# ---- File menu and toolbar ----
self.new_action = create_action(
self,
_("&New file..."),
icon=ima.icon('filenew'), tip=_("New file"),
triggered=self.new,
context=Qt.WidgetShortcut
)
self.register_shortcut(self.new_action, context="Editor",
name="New file", add_sc_to_tip=True)
self.open_last_closed_action = create_action(
self,
_("O&pen last closed"),
tip=_("Open last closed"),
triggered=self.open_last_closed
)
self.register_shortcut(self.open_last_closed_action, context="Editor",
name="Open last closed")
self.open_action = create_action(self, _("&Open..."),
icon=ima.icon('fileopen'), tip=_("Open file"),
triggered=self.load,
context=Qt.WidgetShortcut)
self.register_shortcut(self.open_action, context="Editor",
name="Open file", add_sc_to_tip=True)
self.revert_action = create_action(self, _("&Revert"),
icon=ima.icon('revert'), tip=_("Revert file from disk"),
triggered=self.revert)
self.save_action = create_action(self, _("&Save"),
icon=ima.icon('filesave'), tip=_("Save file"),
triggered=self.save,
context=Qt.WidgetShortcut)
self.register_shortcut(self.save_action, context="Editor",
name="Save file", add_sc_to_tip=True)
self.save_all_action = create_action(self, _("Sav&e all"),
icon=ima.icon('save_all'), tip=_("Save all files"),
triggered=self.save_all,
context=Qt.WidgetShortcut)
self.register_shortcut(self.save_all_action, context="Editor",
name="Save all", add_sc_to_tip=True)
save_as_action = create_action(self, _("Save &as..."), None,
ima.icon('filesaveas'), tip=_("Save current file as..."),
triggered=self.save_as,
context=Qt.WidgetShortcut)
self.register_shortcut(save_as_action, "Editor", "Save As")
save_copy_as_action = create_action(self, _("Save copy as..."), None,
ima.icon('filesaveas'), _("Save copy of current file as..."),
triggered=self.save_copy_as)
print_preview_action = create_action(self, _("Print preview..."),
tip=_("Print preview..."), triggered=self.print_preview)
self.print_action = create_action(self, _("&Print..."),
icon=ima.icon('print'), tip=_("Print current file..."),
triggered=self.print_file)
# Shortcut for close_action is defined in widgets/editor.py
self.close_action = create_action(self, _("&Close"),
icon=ima.icon('fileclose'), tip=_("Close current file"),
triggered=self.close_file)
self.close_all_action = create_action(self, _("C&lose all"),
icon=ima.icon('filecloseall'), tip=_("Close all opened files"),
triggered=self.close_all_files,
context=Qt.WidgetShortcut)
self.register_shortcut(self.close_all_action, context="Editor",
name="Close all")
# ---- Find menu and toolbar ----
_text = _("&Find text")
find_action = create_action(self, _text, icon=ima.icon('find'),
tip=_text, triggered=self.find,
context=Qt.WidgetShortcut)
self.register_shortcut(find_action, context="_",
name="Find text", add_sc_to_tip=True)
find_next_action = create_action(self, _("Find &next"),
icon=ima.icon('findnext'),
triggered=self.find_next,
context=Qt.WidgetShortcut)
self.register_shortcut(find_next_action, context="_",
name="Find next")
find_previous_action = create_action(self, _("Find &previous"),
icon=ima.icon('findprevious'),
triggered=self.find_previous,
context=Qt.WidgetShortcut)
self.register_shortcut(find_previous_action, context="_",
name="Find previous")
_text = _("&Replace text")
replace_action = create_action(self, _text, icon=ima.icon('replace'),
tip=_text, triggered=self.replace,
context=Qt.WidgetShortcut)
self.register_shortcut(replace_action, context="_",
name="Replace text")
# ---- Debug menu and toolbar ----
set_clear_breakpoint_action = create_action(self,
_("Set/Clear breakpoint"),
icon=ima.icon('breakpoint_big'),
triggered=self.set_or_clear_breakpoint,
context=Qt.WidgetShortcut)
self.register_shortcut(set_clear_breakpoint_action, context="Editor",
name="Breakpoint")
set_cond_breakpoint_action = create_action(self,
_("Set/Edit conditional breakpoint"),
icon=ima.icon('breakpoint_cond_big'),
triggered=self.set_or_edit_conditional_breakpoint,
context=Qt.WidgetShortcut)
self.register_shortcut(set_cond_breakpoint_action, context="Editor",
name="Conditional breakpoint")
clear_all_breakpoints_action = create_action(self,
_('Clear breakpoints in all files'),
triggered=self.clear_all_breakpoints)
self.winpdb_action = create_action(self, _("Debug with winpdb"),
triggered=self.run_winpdb)
self.winpdb_action.setEnabled(WINPDB_PATH is not None and PY2)
# --- Debug toolbar ---
debug_action = create_action(self, _("&Debug"),
icon=ima.icon('debug'),
tip=_("Debug file"),
triggered=self.debug_file)
self.register_shortcut(debug_action, context="_", name="Debug",
add_sc_to_tip=True)
debug_next_action = create_action(self, _("Step"),
icon=ima.icon('arrow-step-over'), tip=_("Run current line"),
triggered=lambda: self.debug_command("next"))
self.register_shortcut(debug_next_action, "_", "Debug Step Over",
add_sc_to_tip=True)
debug_continue_action = create_action(self, _("Continue"),
icon=ima.icon('arrow-continue'),
tip=_("Continue execution until next breakpoint"),
triggered=lambda: self.debug_command("continue"))
self.register_shortcut(debug_continue_action, "_", "Debug Continue",
add_sc_to_tip=True)
debug_step_action = create_action(self, _("Step Into"),
icon=ima.icon('arrow-step-in'),
tip=_("Step into function or method of current line"),
triggered=lambda: self.debug_command("step"))
self.register_shortcut(debug_step_action, "_", "Debug Step Into",
add_sc_to_tip=True)
debug_return_action = create_action(self, _("Step Return"),
icon=ima.icon('arrow-step-out'),
tip=_("Run until current function or method returns"),
triggered=lambda: self.debug_command("return"))
self.register_shortcut(debug_return_action, "_", "Debug Step Return",
add_sc_to_tip=True)
debug_exit_action = create_action(self, _("Stop"),
icon=ima.icon('stop_debug'), tip=_("Stop debugging"),
triggered=lambda: self.debug_command("exit"))
self.register_shortcut(debug_exit_action, "_", "Debug Exit",
add_sc_to_tip=True)
# --- Run toolbar ---
run_action = create_action(self, _("&Run"), icon=ima.icon('run'),
tip=_("Run file"),
triggered=self.run_file)
self.register_shortcut(run_action, context="_", name="Run",
add_sc_to_tip=True)
configure_action = create_action(self, _("&Configuration per file..."),
icon=ima.icon('run_settings'),
tip=_("Run settings"),
menurole=QAction.NoRole,
triggered=self.edit_run_configurations)
self.register_shortcut(configure_action, context="_",
name="Configure", add_sc_to_tip=True)
re_run_action = create_action(self, _("Re-run &last script"),
icon=ima.icon('run_again'),
tip=_("Run again last file"),
triggered=self.re_run_file)
self.register_shortcut(re_run_action, context="_",
name="Re-run last script",
add_sc_to_tip=True)
run_selected_action = create_action(self, _("Run &selection or "
"current line"),
icon=ima.icon('run_selection'),
tip=_("Run selection or "
"current line"),
triggered=self.run_selection,
context=Qt.WidgetShortcut)
self.register_shortcut(run_selected_action, context="Editor",
name="Run selection", add_sc_to_tip=True)
run_cell_action = create_action(self,
_("Run cell"),
icon=ima.icon('run_cell'),
shortcut=QKeySequence(RUN_CELL_SHORTCUT),
tip=_("Run current cell (Ctrl+Enter)\n"
"[Use #%% to create cells]"),
triggered=self.run_cell,
context=Qt.WidgetShortcut)
run_cell_advance_action = create_action(self,
_("Run cell and advance"),
icon=ima.icon('run_cell_advance'),
shortcut=QKeySequence(RUN_CELL_AND_ADVANCE_SHORTCUT),
tip=_("Run current cell and go to the next one "
"(Shift+Enter)"),
triggered=self.run_cell_and_advance,
context=Qt.WidgetShortcut)
re_run_last_cell_action = create_action(self,
_("Re-run last cell"),
tip=_("Re run last cell "),
triggered=self.re_run_last_cell,
context=Qt.WidgetShortcut)
self.register_shortcut(re_run_last_cell_action,
context="Editor",
name='re-run last cell',
add_sc_to_tip=True)
# --- Source code Toolbar ---
self.todo_list_action = create_action(self,
_("Show todo list"), icon=ima.icon('todo_list'),
tip=_("Show comments list (TODO/FIXME/XXX/HINT/TIP/@todo/"
"HACK/BUG/OPTIMIZE/!!!/???)"),
triggered=self.go_to_next_todo)
self.todo_menu = QMenu(self)
self.todo_list_action.setMenu(self.todo_menu)
self.todo_menu.aboutToShow.connect(self.update_todo_menu)
self.warning_list_action = create_action(self,
_("Show warning/error list"), icon=ima.icon('wng_list'),
tip=_("Show code analysis warnings/errors"),
triggered=self.go_to_next_warning)
self.warning_menu = QMenu(self)
self.warning_list_action.setMenu(self.warning_menu)
self.warning_menu.aboutToShow.connect(self.update_warning_menu)
self.previous_warning_action = create_action(self,
_("Previous warning/error"), icon=ima.icon('prev_wng'),
tip=_("Go to previous code analysis warning/error"),
triggered=self.go_to_previous_warning)
self.next_warning_action = create_action(self,
_("Next warning/error"), icon=ima.icon('next_wng'),
tip=_("Go to next code analysis warning/error"),
triggered=self.go_to_next_warning)
self.previous_edit_cursor_action = create_action(self,
_("Last edit location"), icon=ima.icon('last_edit_location'),
tip=_("Go to last edit location"),
triggered=self.go_to_last_edit_location,
context=Qt.WidgetShortcut)
self.register_shortcut(self.previous_edit_cursor_action,
context="Editor",
name="Last edit location",
add_sc_to_tip=True)
self.previous_cursor_action = create_action(self,
_("Previous cursor position"), icon=ima.icon('prev_cursor'),
tip=_("Go to previous cursor position"),
triggered=self.go_to_previous_cursor_position,
context=Qt.WidgetShortcut)
self.register_shortcut(self.previous_cursor_action,
context="Editor",
name="Previous cursor position",
add_sc_to_tip=True)
self.next_cursor_action = create_action(self,
_("Next cursor position"), icon=ima.icon('next_cursor'),
tip=_("Go to next cursor position"),
triggered=self.go_to_next_cursor_position,
context=Qt.WidgetShortcut)
self.register_shortcut(self.next_cursor_action,
context="Editor",
name="Next cursor position",
add_sc_to_tip=True)
# --- Edit Toolbar ---
self.toggle_comment_action = create_action(self,
_("Comment")+"/"+_("Uncomment"), icon=ima.icon('comment'),
tip=_("Comment current line or selection"),
triggered=self.toggle_comment, context=Qt.WidgetShortcut)
self.register_shortcut(self.toggle_comment_action, context="Editor",
name="Toggle comment")
blockcomment_action = create_action(self, _("Add &block comment"),
tip=_("Add block comment around "
"current line or selection"),
triggered=self.blockcomment, context=Qt.WidgetShortcut)
self.register_shortcut(blockcomment_action, context="Editor",
name="Blockcomment")
unblockcomment_action = create_action(self,
_("R&emove block comment"),
tip = _("Remove comment block around "
"current line or selection"),
triggered=self.unblockcomment, context=Qt.WidgetShortcut)
self.register_shortcut(unblockcomment_action, context="Editor",
name="Unblockcomment")
# ----------------------------------------------------------------------
# The following action shortcuts are hard-coded in CodeEditor
# keyPressEvent handler (the shortcut is here only to inform user):
# (context=Qt.WidgetShortcut -> disable shortcut for other widgets)
self.indent_action = create_action(self,
_("Indent"), "Tab", icon=ima.icon('indent'),
tip=_("Indent current line or selection"),
triggered=self.indent, context=Qt.WidgetShortcut)
self.unindent_action = create_action(self,
_("Unindent"), "Shift+Tab", icon=ima.icon('unindent'),
tip=_("Unindent current line or selection"),
triggered=self.unindent, context=Qt.WidgetShortcut)
self.text_uppercase_action = create_action(self,
_("Toggle Uppercase"),
tip=_("Change to uppercase current line or selection"),
triggered=self.text_uppercase, context=Qt.WidgetShortcut)
self.register_shortcut(self.text_uppercase_action, context="Editor",
name="transform to uppercase")
self.text_lowercase_action = create_action(self,
_("Toggle Lowercase"),
tip=_("Change to lowercase current line or selection"),
triggered=self.text_lowercase, context=Qt.WidgetShortcut)
self.register_shortcut(self.text_lowercase_action, context="Editor",
name="transform to lowercase")
# ----------------------------------------------------------------------
self.win_eol_action = create_action(self,
_("Carriage return and line feed (Windows)"),
toggled=lambda checked: self.toggle_eol_chars('nt', checked))
self.linux_eol_action = create_action(self,
_("Line feed (UNIX)"),
toggled=lambda checked: self.toggle_eol_chars('posix', checked))
self.mac_eol_action = create_action(self,
_("Carriage return (Mac)"),
toggled=lambda checked: self.toggle_eol_chars('mac', checked))
eol_action_group = QActionGroup(self)
eol_actions = (self.win_eol_action, self.linux_eol_action,
self.mac_eol_action)
add_actions(eol_action_group, eol_actions)
eol_menu = QMenu(_("Convert end-of-line characters"), self)
add_actions(eol_menu, eol_actions)
trailingspaces_action = create_action(self,
_("Remove trailing spaces"),
triggered=self.remove_trailing_spaces)
self.showblanks_action = create_action(self, _("Show blank spaces"),
toggled=self.toggle_show_blanks)
fixindentation_action = create_action(self, _("Fix indentation"),
tip=_("Replace tab characters by space characters"),
triggered=self.fix_indentation)
gotoline_action = create_action(self, _("Go to line..."),
icon=ima.icon('gotoline'),
triggered=self.go_to_line,
context=Qt.WidgetShortcut)
self.register_shortcut(gotoline_action, context="Editor",
name="Go to line")
workdir_action = create_action(self,
_("Set console working directory"),
icon=ima.icon('DirOpenIcon'),
tip=_("Set current console (and file explorer) working "
"directory to current script directory"),
triggered=self.__set_workdir)
self.max_recent_action = create_action(self,
_("Maximum number of recent files..."),
triggered=self.change_max_recent_files)
self.clear_recent_action = create_action(self,
_("Clear this list"), tip=_("Clear recent files list"),
triggered=self.clear_recent_files)
# ---- File menu/toolbar construction ----
self.recent_file_menu = QMenu(_("Open &recent"), self)
self.recent_file_menu.aboutToShow.connect(self.update_recent_file_menu)
file_menu_actions = [self.new_action,
MENU_SEPARATOR,
self.open_action,
self.open_last_closed_action,
self.recent_file_menu,
MENU_SEPARATOR,
MENU_SEPARATOR,
self.save_action,
self.save_all_action,
save_as_action,
save_copy_as_action,
self.revert_action,
MENU_SEPARATOR,
print_preview_action,
self.print_action,
MENU_SEPARATOR,
self.close_action,
self.close_all_action,
MENU_SEPARATOR]
self.main.file_menu_actions += file_menu_actions
file_toolbar_actions = ([self.new_action, self.open_action,
self.save_action, self.save_all_action] +
self.main.file_toolbar_actions)
self.main.file_toolbar_actions = file_toolbar_actions
# ---- Find menu/toolbar construction ----
self.main.search_menu_actions = [find_action,
find_next_action,
find_previous_action,
replace_action]
self.main.search_toolbar_actions = [find_action,
find_next_action,
replace_action]
# ---- Edit menu/toolbar construction ----
self.edit_menu_actions = [self.toggle_comment_action,
blockcomment_action, unblockcomment_action,
self.indent_action, self.unindent_action,
self.text_uppercase_action,
self.text_lowercase_action]
self.main.edit_menu_actions += [MENU_SEPARATOR] + self.edit_menu_actions
edit_toolbar_actions = [self.toggle_comment_action,
self.unindent_action, self.indent_action]
self.main.edit_toolbar_actions += edit_toolbar_actions
# ---- Search menu/toolbar construction ----
self.search_menu_actions = [gotoline_action]
self.main.search_menu_actions += self.search_menu_actions
self.main.search_toolbar_actions += [gotoline_action]
# ---- Run menu/toolbar construction ----
run_menu_actions = [run_action, run_cell_action,
run_cell_advance_action,
re_run_last_cell_action, MENU_SEPARATOR,
run_selected_action, re_run_action,
configure_action, MENU_SEPARATOR]
self.main.run_menu_actions += run_menu_actions
run_toolbar_actions = [run_action, run_cell_action,
run_cell_advance_action, run_selected_action,
re_run_action]
self.main.run_toolbar_actions += run_toolbar_actions
# ---- Debug menu/toolbar construction ----
# NOTE: 'list_breakpoints' is used by the breakpoints
# plugin to add its "List breakpoints" action to this
# menu
debug_menu_actions = [debug_action,
debug_next_action,
debug_step_action,
debug_return_action,
debug_continue_action,
debug_exit_action,
MENU_SEPARATOR,
set_clear_breakpoint_action,
set_cond_breakpoint_action,
clear_all_breakpoints_action,
'list_breakpoints',
MENU_SEPARATOR,
self.winpdb_action]
self.main.debug_menu_actions += debug_menu_actions
debug_toolbar_actions = [debug_action, debug_next_action,
debug_step_action, debug_return_action,
debug_continue_action, debug_exit_action]
self.main.debug_toolbar_actions += debug_toolbar_actions
# ---- Source menu/toolbar construction ----
source_menu_actions = [eol_menu,
self.showblanks_action,
trailingspaces_action,
fixindentation_action,
MENU_SEPARATOR,
self.todo_list_action,
self.warning_list_action,
self.previous_warning_action,
self.next_warning_action,
MENU_SEPARATOR,
self.previous_edit_cursor_action,
self.previous_cursor_action,
self.next_cursor_action]
self.main.source_menu_actions += source_menu_actions
source_toolbar_actions = [self.todo_list_action,
self.warning_list_action,
self.previous_warning_action,
self.next_warning_action,
MENU_SEPARATOR,
self.previous_edit_cursor_action,
self.previous_cursor_action,
self.next_cursor_action]
self.main.source_toolbar_actions += source_toolbar_actions
# ---- Dock widget and file dependent actions ----
self.dock_toolbar_actions = (file_toolbar_actions +
[MENU_SEPARATOR] +
source_toolbar_actions +
[MENU_SEPARATOR] +
run_toolbar_actions +
[MENU_SEPARATOR] +
debug_toolbar_actions +
[MENU_SEPARATOR] +
edit_toolbar_actions)
self.pythonfile_dependent_actions = [run_action, configure_action,
set_clear_breakpoint_action,
set_cond_breakpoint_action,
debug_action, run_selected_action,
run_cell_action,
run_cell_advance_action,
re_run_last_cell_action,
blockcomment_action,
unblockcomment_action,
self.winpdb_action]
self.cythonfile_compatible_actions = [run_action, configure_action]
self.file_dependent_actions = self.pythonfile_dependent_actions + \
[self.save_action, save_as_action, save_copy_as_action,
print_preview_action, self.print_action,
self.save_all_action, gotoline_action, workdir_action,
self.close_action, self.close_all_action,
self.toggle_comment_action, self.revert_action,
self.indent_action, self.unindent_action]
self.stack_menu_actions = [gotoline_action, workdir_action]
return self.file_dependent_actions
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.main.restore_scrollbar_position.connect(
self.restore_scrollbar_position)
self.main.console.edit_goto.connect(self.load)
self.exec_in_extconsole.connect(self.main.execute_in_external_console)
self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)
self.open_dir.connect(self.main.workingdirectory.chdir)
self.set_help(self.main.help)
if self.main.outlineexplorer is not None:
self.set_outlineexplorer(self.main.outlineexplorer)
editorstack = self.get_current_editorstack()
if not editorstack.data:
self.__load_temp_file()
self.main.add_dockwidget(self)
self.main.add_to_fileswitcher(self, editorstack.tabs, editorstack.data,
ima.icon('TextFileIcon'))
def update_font(self):
"""Update font from Preferences"""
font = self.get_plugin_font()
color_scheme = self.get_color_scheme()
for editorstack in self.editorstacks:
editorstack.set_default_font(font, color_scheme)
completion_size = CONF.get('main', 'completion/size')
for finfo in editorstack.data:
comp_widget = finfo.editor.completion_widget
comp_widget.setup_appearance(completion_size, font)
#------ Focus tabwidget
def __get_focus_editorstack(self):
fwidget = QApplication.focusWidget()
if isinstance(fwidget, EditorStack):
return fwidget
else:
for editorstack in self.editorstacks:
if editorstack.isAncestorOf(fwidget):
return editorstack
def set_last_focus_editorstack(self, editorwindow, editorstack):
self.last_focus_editorstack[editorwindow] = editorstack
self.last_focus_editorstack[None] = editorstack # very last editorstack
def get_last_focus_editorstack(self, editorwindow=None):
return self.last_focus_editorstack[editorwindow]
def remove_last_focus_editorstack(self, editorstack):
for editorwindow, widget in list(self.last_focus_editorstack.items()):
if widget is editorstack:
self.last_focus_editorstack[editorwindow] = None
def save_focus_editorstack(self):
editorstack = self.__get_focus_editorstack()
if editorstack is not None:
for win in [self]+self.editorwindows:
if win.isAncestorOf(editorstack):
self.set_last_focus_editorstack(win, editorstack)
# ------ Handling editorstacks
def register_editorstack(self, editorstack):
self.editorstacks.append(editorstack)
self.register_widget_shortcuts(editorstack)
if len(self.editorstacks) > 1 and self.main is not None:
# The first editostack is registered automatically with Spyder's
# main window through the `register_plugin` method. Only additional
# editors added by splitting need to be registered.
# See Issue #5057.
self.main.fileswitcher.sig_goto_file.connect(
editorstack.set_stack_index)
if self.isAncestorOf(editorstack):
# editorstack is a child of the Editor plugin
self.set_last_focus_editorstack(self, editorstack)
editorstack.set_closable( len(self.editorstacks) > 1 )
if self.outlineexplorer is not None:
editorstack.set_outlineexplorer(self.outlineexplorer)
editorstack.set_find_widget(self.find_widget)
editorstack.reset_statusbar.connect(self.readwrite_status.hide)
editorstack.reset_statusbar.connect(self.encoding_status.hide)
editorstack.reset_statusbar.connect(self.cursorpos_status.hide)
editorstack.readonly_changed.connect(
self.readwrite_status.readonly_changed)
editorstack.encoding_changed.connect(
self.encoding_status.encoding_changed)
editorstack.sig_editor_cursor_position_changed.connect(
self.cursorpos_status.cursor_position_changed)
editorstack.sig_refresh_eol_chars.connect(self.eol_status.eol_changed)
editorstack.set_help(self.help)
editorstack.set_io_actions(self.new_action, self.open_action,
self.save_action, self.revert_action)
editorstack.set_tempfile_path(self.TEMPFILE_PATH)
editorstack.set_introspector(self.introspector)
settings = (
('set_pyflakes_enabled', 'code_analysis/pyflakes'),
('set_pep8_enabled', 'code_analysis/pep8'),
('set_todolist_enabled', 'todo_list'),
('set_realtime_analysis_enabled', 'realtime_analysis'),
('set_realtime_analysis_timeout', 'realtime_analysis/timeout'),
('set_blanks_enabled', 'blank_spaces'),
('set_linenumbers_enabled', 'line_numbers'),
('set_edgeline_enabled', 'edge_line'),
('set_edgeline_column', 'edge_line_column'),
('set_codecompletion_auto_enabled', 'codecompletion/auto'),
('set_codecompletion_case_enabled', 'codecompletion/case_sensitive'),
('set_codecompletion_enter_enabled', 'codecompletion/enter_key'),
('set_calltips_enabled', 'calltips'),
('set_go_to_definition_enabled', 'go_to_definition'),
('set_focus_to_editor', 'focus_to_editor'),
('set_close_parentheses_enabled', 'close_parentheses'),
('set_close_quotes_enabled', 'close_quotes'),
('set_add_colons_enabled', 'add_colons'),
('set_auto_unindent_enabled', 'auto_unindent'),
('set_indent_chars', 'indent_chars'),
('set_tab_stop_width_spaces', 'tab_stop_width_spaces'),
('set_wrap_enabled', 'wrap'),
('set_tabmode_enabled', 'tab_always_indent'),
('set_intelligent_backspace_enabled', 'intelligent_backspace'),
('set_highlight_current_line_enabled', 'highlight_current_line'),
('set_highlight_current_cell_enabled', 'highlight_current_cell'),
('set_occurrence_highlighting_enabled', 'occurrence_highlighting'),
('set_occurrence_highlighting_timeout', 'occurrence_highlighting/timeout'),
('set_checkeolchars_enabled', 'check_eol_chars'),
('set_tabbar_visible', 'show_tab_bar'),
('set_always_remove_trailing_spaces', 'always_remove_trailing_spaces'),
)
for method, setting in settings:
getattr(editorstack, method)(self.get_option(setting))
editorstack.set_help_enabled(CONF.get('help', 'connect/editor'))
color_scheme = self.get_color_scheme()
editorstack.set_default_font(self.get_plugin_font(), color_scheme)
editorstack.starting_long_process.connect(self.starting_long_process)
editorstack.ending_long_process.connect(self.ending_long_process)
# Redirect signals
editorstack.redirect_stdio.connect(
lambda state: self.redirect_stdio.emit(state))
editorstack.exec_in_extconsole.connect(
lambda text, option:
self.exec_in_extconsole.emit(text, option))
editorstack.update_plugin_title.connect(
lambda: self.update_plugin_title.emit())
editorstack.editor_focus_changed.connect(self.save_focus_editorstack)
editorstack.editor_focus_changed.connect(self.set_editorstack_for_introspection)
editorstack.editor_focus_changed.connect(self.main.plugin_focus_changed)
editorstack.zoom_in.connect(lambda: self.zoom(1))
editorstack.zoom_out.connect(lambda: self.zoom(-1))
editorstack.zoom_reset.connect(lambda: self.zoom(0))
editorstack.sig_new_file.connect(lambda s: self.new(text=s))
editorstack.sig_new_file[()].connect(self.new)
editorstack.sig_close_file.connect(self.close_file_in_all_editorstacks)
editorstack.file_saved.connect(self.file_saved_in_editorstack)
editorstack.file_renamed_in_data.connect(
self.file_renamed_in_data_in_editorstack)
editorstack.create_new_window.connect(self.create_new_window)
editorstack.opened_files_list_changed.connect(
self.opened_files_list_changed)
editorstack.analysis_results_changed.connect(
self.analysis_results_changed)
editorstack.todo_results_changed.connect(self.todo_results_changed)
editorstack.update_code_analysis_actions.connect(
self.update_code_analysis_actions)
editorstack.update_code_analysis_actions.connect(
self.update_todo_actions)
editorstack.refresh_file_dependent_actions.connect(
self.refresh_file_dependent_actions)
editorstack.refresh_save_all_action.connect(self.refresh_save_all_action)
editorstack.sig_refresh_eol_chars.connect(self.refresh_eol_chars)
editorstack.save_breakpoints.connect(self.save_breakpoints)
editorstack.text_changed_at.connect(self.text_changed_at)
editorstack.current_file_changed.connect(self.current_file_changed)
editorstack.plugin_load.connect(self.load)
editorstack.plugin_load[()].connect(self.load)
editorstack.edit_goto.connect(self.load)
editorstack.sig_save_as.connect(self.save_as)
editorstack.sig_prev_edit_pos.connect(self.go_to_last_edit_location)
editorstack.sig_prev_cursor.connect(self.go_to_previous_cursor_position)
editorstack.sig_next_cursor.connect(self.go_to_next_cursor_position)
def unregister_editorstack(self, editorstack):
"""Removing editorstack only if it's not the last remaining"""
self.remove_last_focus_editorstack(editorstack)
if len(self.editorstacks) > 1:
index = self.editorstacks.index(editorstack)
self.editorstacks.pop(index)
return True
else:
# editorstack was not removed!
return False
def clone_editorstack(self, editorstack):
editorstack.clone_from(self.editorstacks[0])
for finfo in editorstack.data:
self.register_widget_shortcuts(finfo.editor)
@Slot(str, str)
def close_file_in_all_editorstacks(self, editorstack_id_str, filename):
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.blockSignals(True)
index = editorstack.get_index_from_filename(filename)
editorstack.close_file(index, force=True)
editorstack.blockSignals(False)
@Slot(str, str, str)
def file_saved_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was saved in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.file_saved_in_other_editorstack(original_filename,
filename)
@Slot(str, str, str)
def file_renamed_in_data_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was renamed in data in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.rename_in_data(original_filename, filename)
def set_editorstack_for_introspection(self):
"""
Set the current editorstack to be used by the IntrospectionManager
instance
"""
editorstack = self.__get_focus_editorstack()
if editorstack is not None:
self.introspector.set_editor_widget(editorstack)
# Disconnect active signals
try:
self.introspector.sig_send_to_help.disconnect()
self.introspector.sig_edit_goto.disconnect()
except TypeError:
pass
# Reconnect signals again
self.introspector.sig_send_to_help.connect(editorstack.send_to_help)
self.introspector.sig_edit_goto.connect(
lambda fname, lineno, name:
editorstack.edit_goto.emit(fname, lineno, name))
#------ Handling editor windows
def setup_other_windows(self):
"""Setup toolbars and menus for 'New window' instances"""
self.toolbar_list = ((_("File toolbar"), "file_toolbar",
self.main.file_toolbar_actions),
(_("Search toolbar"), "search_toolbar",
self.main.search_menu_actions),
(_("Source toolbar"), "source_toolbar",
self.main.source_toolbar_actions),
(_("Run toolbar"), "run_toolbar",
self.main.run_toolbar_actions),
(_("Debug toolbar"), "debug_toolbar",
self.main.debug_toolbar_actions),
(_("Edit toolbar"), "edit_toolbar",
self.main.edit_toolbar_actions))
self.menu_list = ((_("&File"), self.main.file_menu_actions),
(_("&Edit"), self.main.edit_menu_actions),
(_("&Search"), self.main.search_menu_actions),
(_("Sour&ce"), self.main.source_menu_actions),
(_("&Run"), self.main.run_menu_actions),
(_("&Tools"), self.main.tools_menu_actions),
(_("&View"), []),
(_("&Help"), self.main.help_menu_actions))
# Create pending new windows:
for layout_settings in self.editorwindows_to_be_created:
win = self.create_new_window()
win.set_layout_settings(layout_settings)
def create_new_window(self):
oe_options = self.outlineexplorer.get_options()
window = EditorMainWindow(self, self.stack_menu_actions,
self.toolbar_list, self.menu_list,
show_fullpath=oe_options['show_fullpath'],
show_all_files=oe_options['show_all_files'],
show_comments=oe_options['show_comments'])
window.add_toolbars_to_menu("&View", window.get_toolbars())
window.load_toolbars()
window.resize(self.size())
window.show()
self.register_editorwindow(window)
window.destroyed.connect(lambda: self.unregister_editorwindow(window))
return window
def register_editorwindow(self, window):
self.editorwindows.append(window)
def unregister_editorwindow(self, window):
self.editorwindows.pop(self.editorwindows.index(window))
#------ Accessors
def get_filenames(self):
return [finfo.filename for finfo in self.editorstacks[0].data]
def get_filename_index(self, filename):
return self.editorstacks[0].has_filename(filename)
def get_current_editorstack(self, editorwindow=None):
if self.editorstacks is not None:
if len(self.editorstacks) == 1:
editorstack = self.editorstacks[0]
else:
editorstack = self.__get_focus_editorstack()
if editorstack is None or editorwindow is not None:
editorstack = self.get_last_focus_editorstack(editorwindow)
if editorstack is None:
editorstack = self.editorstacks[0]
return editorstack
def get_current_editor(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_editor()
def get_current_finfo(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_finfo()
def get_current_filename(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_filename()
def is_file_opened(self, filename=None):
return self.editorstacks[0].is_file_opened(filename)
def set_current_filename(self, filename, editorwindow=None):
"""Set focus to *filename* if this file has been opened
Return the editor instance associated to *filename*"""
editorstack = self.get_current_editorstack(editorwindow)
return editorstack.set_current_filename(filename)
def set_path(self):
for finfo in self.editorstacks[0].data:
finfo.path = self.main.get_spyder_pythonpath()
if self.introspector:
self.introspector.change_extra_path(
self.main.get_spyder_pythonpath())
#------ FileSwitcher API
def get_current_tab_manager(self):
"""Get the widget with the TabWidget attribute."""
return self.get_current_editorstack()
#------ Refresh methods
def refresh_file_dependent_actions(self):
"""Enable/disable file dependent actions
(only if dockwidget is visible)"""
if self.dockwidget and self.dockwidget.isVisible():
enable = self.get_current_editor() is not None
for action in self.file_dependent_actions:
action.setEnabled(enable)
def refresh_save_all_action(self):
"""Enable 'Save All' if there are files to be saved"""
editorstack = self.get_current_editorstack()
if editorstack:
state = any(finfo.editor.document().isModified() or finfo.newly_created
for finfo in editorstack.data)
self.save_all_action.setEnabled(state)
def update_warning_menu(self):
"""Update warning list menu"""
editorstack = self.get_current_editorstack()
check_results = editorstack.get_analysis_results()
self.warning_menu.clear()
filename = self.get_current_filename()
for message, line_number in check_results:
error = 'syntax' in message
text = message[:1].upper()+message[1:]
icon = ima.icon('error') if error else ima.icon('warning')
# QAction.triggered works differently for PySide and PyQt
if not API == 'pyside':
slot = lambda _checked, _l=line_number: self.load(filename, goto=_l)
else:
slot = lambda _l=line_number: self.load(filename, goto=_l)
action = create_action(self, text=text, icon=icon, triggered=slot)
self.warning_menu.addAction(action)
def analysis_results_changed(self):
"""
Synchronize analysis results between editorstacks
Refresh analysis navigation buttons
"""
editorstack = self.get_current_editorstack()
results = editorstack.get_analysis_results()
index = editorstack.get_stack_index()
if index != -1:
filename = editorstack.data[index].filename
for other_editorstack in self.editorstacks:
if other_editorstack is not editorstack:
other_editorstack.set_analysis_results(filename, results)
self.update_code_analysis_actions()
def update_todo_menu(self):
"""Update todo list menu"""
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
self.todo_menu.clear()
filename = self.get_current_filename()
for text, line0 in results:
icon = ima.icon('todo')
# QAction.triggered works differently for PySide and PyQt
if not API == 'pyside':
slot = lambda _checked, _l=line0: self.load(filename, goto=_l)
else:
slot = lambda _l=line0: self.load(filename, goto=_l)
action = create_action(self, text=text, icon=icon, triggered=slot)
self.todo_menu.addAction(action)
self.update_todo_actions()
def todo_results_changed(self):
"""
Synchronize todo results between editorstacks
Refresh todo list navigation buttons
"""
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
index = editorstack.get_stack_index()
if index != -1:
filename = editorstack.data[index].filename
for other_editorstack in self.editorstacks:
if other_editorstack is not editorstack:
other_editorstack.set_todo_results(filename, results)
self.update_todo_actions()
def refresh_eol_chars(self, os_name):
os_name = to_text_string(os_name)
self.__set_eol_chars = False
if os_name == 'nt':
self.win_eol_action.setChecked(True)
elif os_name == 'posix':
self.linux_eol_action.setChecked(True)
else:
self.mac_eol_action.setChecked(True)
self.__set_eol_chars = True
#------ Slots
def opened_files_list_changed(self):
"""
Opened files list has changed:
--> open/close file action
--> modification ('*' added to title)
--> current edited file has changed
"""
# Refresh Python file dependent actions:
editor = self.get_current_editor()
if editor:
python_enable = editor.is_python()
cython_enable = python_enable or (
programs.is_module_installed('Cython') and editor.is_cython())
for action in self.pythonfile_dependent_actions:
if action in self.cythonfile_compatible_actions:
enable = cython_enable
else:
enable = python_enable
if action is self.winpdb_action:
action.setEnabled(enable and WINPDB_PATH is not None)
else:
action.setEnabled(enable)
self.open_file_update.emit(self.get_current_filename())
def update_code_analysis_actions(self):
editorstack = self.get_current_editorstack()
results = editorstack.get_analysis_results()
# Update code analysis buttons
state = (self.get_option('code_analysis/pyflakes') \
or self.get_option('code_analysis/pep8')) \
and results is not None and len(results)
for action in (self.warning_list_action, self.previous_warning_action,
self.next_warning_action):
action.setEnabled(state)
def update_todo_actions(self):
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
state = self.get_option('todo_list') \
and results is not None and len(results)
self.todo_list_action.setEnabled(state)
def rehighlight_cells(self):
"""Rehighlight cells of current editor"""
editor = self.get_current_editor()
editor.rehighlight_cells()
QApplication.processEvents()
#------ Breakpoints
def save_breakpoints(self, filename, breakpoints):
filename = to_text_string(filename)
breakpoints = to_text_string(breakpoints)
filename = osp.normpath(osp.abspath(filename))
if breakpoints:
breakpoints = eval(breakpoints)
else:
breakpoints = []
save_breakpoints(filename, breakpoints)
self.breakpoints_saved.emit()
#------ File I/O
def __load_temp_file(self):
"""Load temporary file from a text file in user home directory"""
if not osp.isfile(self.TEMPFILE_PATH):
# Creating temporary file
default = ['# -*- coding: utf-8 -*-', '"""',
'Tellurium oscillation', '"""',
'', 'import tellurium as te', 'import roadrunner',
'import antimony', '',
"r = te.loada ('''", 'model feedback()', ' // Reactions:',
' J0: $X0 -> S1; (VM1 * (X0 - S1/Keq1))/(1 + X0 + S1 + S4^h);',
' J1: S1 -> S2; (10 * S1 - 2 * S2) / (1 + S1 + S2);',
' J2: S2 -> S3; (10 * S2 - 2 * S3) / (1 + S2 + S3);',
' J3: S3 -> S4; (10 * S3 - 2 * S4) / (1 + S3 + S4);',
' J4: S4 -> $X1; (V4 * S4) / (KS4 + S4);','',
' // Species initializations:', ' S1 = 0; S2 = 0; S3 = 0;',
' S4 = 0; X0 = 10; X1 = 0;','', ' // Variable initialization:',
' VM1 = 10; Keq1 = 10; h = 10; V4 = 2.5; KS4 = 0.5;',
"end''')", '', 'result = r.simulate(0, 40, 500)', 'r.plot(result)', '']
text = os.linesep.join([encoding.to_unicode(qstr)
for qstr in default])
try:
encoding.write(to_text_string(text), self.TEMPFILE_PATH,
'utf-8')
except EnvironmentError:
self.new()
return
self.load(self.TEMPFILE_PATH)
@Slot()
def __set_workdir(self):
"""Set current script directory as working directory"""
fname = self.get_current_filename()
if fname is not None:
directory = osp.dirname(osp.abspath(fname))
self.open_dir.emit(directory)
def __add_recent_file(self, fname):
"""Add to recent file list"""
if fname is None:
return
if fname in self.recent_files:
self.recent_files.remove(fname)
self.recent_files.insert(0, fname)
if len(self.recent_files) > self.get_option('max_recent_files'):
self.recent_files.pop(-1)
def _clone_file_everywhere(self, finfo):
"""Clone file (*src_editor* widget) in all editorstacks
Cloning from the first editorstack in which every single new editor
is created (when loading or creating a new file)"""
for editorstack in self.editorstacks[1:]:
editor = editorstack.clone_editor_from(finfo, set_current=False)
self.register_widget_shortcuts(editor)
@Slot()
@Slot(str)
def new(self, fname=None, editorstack=None, text=None):
"""
Create a new file - Untitled
fname=None --> fname will be 'untitledXX.py' but do not create file
fname=<basestring> --> create file
"""
# If no text is provided, create default content
empty = False
try:
if text is None:
default_content = True
text, enc = encoding.read(self.TEMPLATE_PATH)
enc_match = re.search(r'-*- coding: ?([a-z0-9A-Z\-]*) -*-',
text)
if enc_match:
enc = enc_match.group(1)
# Initialize template variables
# Windows
username = encoding.to_unicode_from_fs(
os.environ.get('USERNAME', ''))
# Linux, Mac OS X
if not username:
username = encoding.to_unicode_from_fs(
os.environ.get('USER', '-'))
VARS = {
'date': time.ctime(),
'username': username,
}
try:
text = text % VARS
except Exception:
pass
else:
default_content = False
enc = encoding.read(self.TEMPLATE_PATH)[1]
except (IOError, OSError):
text = ''
enc = 'utf-8'
default_content = True
empty = True
create_fname = lambda n: to_text_string(_("untitled")) + ("%d.py" % n)
# Creating editor widget
if editorstack is None:
current_es = self.get_current_editorstack()
else:
current_es = editorstack
created_from_here = fname is None
if created_from_here:
while True:
fname = create_fname(self.untitled_num)
self.untitled_num += 1
if not osp.isfile(fname):
break
basedir = getcwd_or_home()
if self.main.projects.get_active_project() is not None:
basedir = self.main.projects.get_active_project_path()
else:
c_fname = self.get_current_filename()
if c_fname is not None and c_fname != self.TEMPFILE_PATH:
basedir = osp.dirname(c_fname)
fname = osp.abspath(osp.join(basedir, fname))
else:
# QString when triggered by a Qt signal
fname = osp.abspath(to_text_string(fname))
index = current_es.has_filename(fname)
if index is not None and not current_es.close_file(index):
return
# Creating the editor widget in the first editorstack (the one that
# can't be destroyed), then cloning this editor widget in all other
# editorstacks:
finfo = self.editorstacks[0].new(fname, enc, text, default_content,
empty)
finfo.path = self.main.get_spyder_pythonpath()
self._clone_file_everywhere(finfo)
current_editor = current_es.set_current_filename(finfo.filename)
self.register_widget_shortcuts(current_editor)
if not created_from_here:
self.save(force=True)
def edit_template(self):
"""Edit new file template"""
self.load(self.TEMPLATE_PATH)
def update_recent_file_menu(self):
"""Update recent file menu"""
recent_files = []
for fname in self.recent_files:
if self.is_file_opened(fname) is None and osp.isfile(fname):
recent_files.append(fname)
self.recent_file_menu.clear()
if recent_files:
for fname in recent_files:
action = create_action(self, fname,
icon=ima.icon('FileIcon'),
triggered=self.load)
action.setData(to_qvariant(fname))
self.recent_file_menu.addAction(action)
self.clear_recent_action.setEnabled(len(recent_files) > 0)
add_actions(self.recent_file_menu, (None, self.max_recent_action,
self.clear_recent_action))
@Slot()
def clear_recent_files(self):
"""Clear recent files list"""
self.recent_files = []
@Slot()
def change_max_recent_files(self):
"Change max recent files entries"""
editorstack = self.get_current_editorstack()
mrf, valid = QInputDialog.getInt(editorstack, _('Editor'),
_('Maximum number of recent files'),
self.get_option('max_recent_files'), 1, 35)
if valid:
self.set_option('max_recent_files', mrf)
@Slot()
@Slot(str)
@Slot(str, int, str)
@Slot(str, int, str, object)
def load(self, filenames=None, goto=None, word='', editorwindow=None,
processevents=True):
"""
Load a text file
editorwindow: load in this editorwindow (useful when clicking on
outline explorer with multiple editor windows)
processevents: determines if processEvents() should be called at the
end of this method (set to False to prevent keyboard events from
creeping through to the editor during debugging)
"""
editor0 = self.get_current_editor()
if editor0 is not None:
position0 = editor0.get_position('cursor')
filename0 = self.get_current_filename()
else:
position0, filename0 = None, None
if not filenames:
# Recent files action
action = self.sender()
if isinstance(action, QAction):
filenames = from_qvariant(action.data(), to_text_string)
if not filenames:
basedir = getcwd_or_home()
if self.edit_filetypes is None:
self.edit_filetypes = get_edit_filetypes()
if self.edit_filters is None:
self.edit_filters = get_edit_filters()
c_fname = self.get_current_filename()
if c_fname is not None and c_fname != self.TEMPFILE_PATH:
basedir = osp.dirname(c_fname)
self.redirect_stdio.emit(False)
parent_widget = self.get_current_editorstack()
if filename0 is not None:
selectedfilter = get_filter(self.edit_filetypes,
osp.splitext(filename0)[1])
else:
selectedfilter = ''
if not running_under_pytest():
filenames, _sf = getopenfilenames(
parent_widget,
_("Open file"), basedir,
self.edit_filters,
selectedfilter=selectedfilter,
options=QFileDialog.HideNameFilterDetails)
else:
# Use a Qt (i.e. scriptable) dialog for pytest
dialog = QFileDialog(parent_widget, _("Open file"),
options=QFileDialog.DontUseNativeDialog)
if dialog.exec_():
filenames = dialog.selectedFiles()
self.redirect_stdio.emit(True)
if filenames:
filenames = [osp.normpath(fname) for fname in filenames]
else:
return
focus_widget = QApplication.focusWidget()
if self.editorwindows and not self.dockwidget.isVisible():
# We override the editorwindow variable to force a focus on
# the editor window instead of the hidden editor dockwidget.
# See PR #5742.
if editorwindow not in self.editorwindows:
editorwindow = self.editorwindows[0]
editorwindow.setFocus()
editorwindow.raise_()
elif (self.dockwidget and not self.ismaximized
and not self.dockwidget.isAncestorOf(focus_widget)
and not isinstance(focus_widget, CodeEditor)):
self.dockwidget.setVisible(True)
self.dockwidget.setFocus()
self.dockwidget.raise_()
def _convert(fname):
fname = osp.abspath(encoding.to_unicode_from_fs(fname))
if os.name == 'nt' and len(fname) >= 2 and fname[1] == ':':
fname = fname[0].upper()+fname[1:]
return fname
if hasattr(filenames, 'replaceInStrings'):
# This is a QStringList instance (PyQt API #1), converting to list:
filenames = list(filenames)
if not isinstance(filenames, list):
filenames = [_convert(filenames)]
else:
filenames = [_convert(fname) for fname in list(filenames)]
if isinstance(goto, int):
goto = [goto]
elif goto is not None and len(goto) != len(filenames):
goto = None
for index, filename in enumerate(filenames):
# -- Do not open an already opened file
current_editor = self.set_current_filename(filename, editorwindow)
if current_editor is None:
# -- Not a valid filename:
if not osp.isfile(filename):
continue
# --
current_es = self.get_current_editorstack(editorwindow)
# Creating the editor widget in the first editorstack (the one
# that can't be destroyed), then cloning this editor widget in
# all other editorstacks:
finfo = self.editorstacks[0].load(filename, set_current=False)
finfo.path = self.main.get_spyder_pythonpath()
self._clone_file_everywhere(finfo)
current_editor = current_es.set_current_filename(filename)
current_editor.set_breakpoints(load_breakpoints(filename))
self.register_widget_shortcuts(current_editor)
current_es.analyze_script()
self.__add_recent_file(filename)
if goto is not None: # 'word' is assumed to be None as well
current_editor.go_to_line(goto[index], word=word)
position = current_editor.get_position('cursor')
self.cursor_moved(filename0, position0, filename, position)
current_editor.clearFocus()
current_editor.setFocus()
current_editor.window().raise_()
if processevents:
QApplication.processEvents()
@Slot()
def print_file(self):
"""Print current file"""
editor = self.get_current_editor()
filename = self.get_current_filename()
printer = Printer(mode=QPrinter.HighResolution,
header_font=self.get_plugin_font('printer_header'))
printDialog = QPrintDialog(printer, editor)
if editor.has_selected_text():
printDialog.setOption(QAbstractPrintDialog.PrintSelection, True)
self.redirect_stdio.emit(False)
answer = printDialog.exec_()
self.redirect_stdio.emit(True)
if answer == QDialog.Accepted:
self.starting_long_process(_("Printing..."))
printer.setDocName(filename)
editor.print_(printer)
self.ending_long_process()
@Slot()
def print_preview(self):
"""Print preview for current file"""
from qtpy.QtPrintSupport import QPrintPreviewDialog
editor = self.get_current_editor()
printer = Printer(mode=QPrinter.HighResolution,
header_font=self.get_plugin_font('printer_header'))
preview = QPrintPreviewDialog(printer, self)
preview.setWindowFlags(Qt.Window)
preview.paintRequested.connect(lambda printer: editor.print_(printer))
self.redirect_stdio.emit(False)
preview.exec_()
self.redirect_stdio.emit(True)
@Slot()
def close_file(self):
"""Close current file"""
editorstack = self.get_current_editorstack()
editorstack.close_file()
@Slot()
def close_all_files(self):
"""Close all opened scripts"""
self.editorstacks[0].close_all_files()
@Slot()
def save(self, index=None, force=False):
"""Save file"""
editorstack = self.get_current_editorstack()
return editorstack.save(index=index, force=force)
@Slot()
def save_as(self):
"""Save *as* the currently edited file"""
editorstack = self.get_current_editorstack()
if editorstack.save_as():
fname = editorstack.get_current_filename()
self.__add_recent_file(fname)
@Slot()
def save_copy_as(self):
"""Save *copy as* the currently edited file"""
editorstack = self.get_current_editorstack()
editorstack.save_copy_as()
@Slot()
def save_all(self):
"""Save all opened files"""
self.get_current_editorstack().save_all()
@Slot()
def revert(self):
"""Revert the currently edited file from disk"""
editorstack = self.get_current_editorstack()
editorstack.revert()
@Slot()
def find(self):
"""Find slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.show()
editorstack.find_widget.search_text.setFocus()
@Slot()
def find_next(self):
"""Fnd next slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.find_next()
@Slot()
def find_previous(self):
"""Find previous slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.find_previous()
@Slot()
def replace(self):
"""Replace slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.show_replace()
def open_last_closed(self):
""" Reopens the last closed tab."""
editorstack = self.get_current_editorstack()
last_closed_files = editorstack.get_last_closed_files()
if (len(last_closed_files) > 0):
file_to_open = last_closed_files[0]
last_closed_files.remove(file_to_open)
editorstack.set_last_closed_files(last_closed_files)
self.load(file_to_open)
#------ Explorer widget
def close_file_from_name(self, filename):
"""Close file from its name"""
filename = osp.abspath(to_text_string(filename))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
self.editorstacks[0].close_file(index)
def removed(self, filename):
"""File was removed in file explorer widget or in project explorer"""
self.close_file_from_name(filename)
def removed_tree(self, dirname):
"""Directory was removed in project explorer widget"""
dirname = osp.abspath(to_text_string(dirname))
for fname in self.get_filenames():
if osp.abspath(fname).startswith(dirname):
self.close_file_from_name(fname)
def renamed(self, source, dest):
"""File was renamed in file explorer widget or in project explorer"""
filename = osp.abspath(to_text_string(source))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
for editorstack in self.editorstacks:
editorstack.rename_in_data(filename,
new_filename=to_text_string(dest))
def renamed_tree(self, source, dest):
"""Directory was renamed in file explorer or in project explorer."""
dirname = osp.abspath(to_text_string(source))
tofile = to_text_string(dest)
for fname in self.get_filenames():
if osp.abspath(fname).startswith(dirname):
new_filename = fname.replace(dirname, tofile)
self.renamed(source=fname, dest=new_filename)
#------ Source code
@Slot()
def indent(self):
"""Indent current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.indent()
@Slot()
def unindent(self):
"""Unindent current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.unindent()
@Slot()
def text_uppercase (self):
"""Change current line or selection to uppercase."""
editor = self.get_current_editor()
if editor is not None:
editor.transform_to_uppercase()
@Slot()
def text_lowercase(self):
"""Change current line or selection to lowercase."""
editor = self.get_current_editor()
if editor is not None:
editor.transform_to_lowercase()
@Slot()
def toggle_comment(self):
"""Comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.toggle_comment()
@Slot()
def blockcomment(self):
"""Block comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.blockcomment()
@Slot()
def unblockcomment(self):
"""Un-block comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.unblockcomment()
@Slot()
def go_to_next_todo(self):
editor = self.get_current_editor()
position = editor.go_to_next_todo()
filename = self.get_current_filename()
self.add_cursor_position_to_history(filename, position)
@Slot()
def go_to_next_warning(self):
editor = self.get_current_editor()
position = editor.go_to_next_warning()
filename = self.get_current_filename()
self.add_cursor_position_to_history(filename, position)
@Slot()
def go_to_previous_warning(self):
editor = self.get_current_editor()
position = editor.go_to_previous_warning()
filename = self.get_current_filename()
self.add_cursor_position_to_history(filename, position)
@Slot()
def run_winpdb(self):
"""Run winpdb to debug current file"""
if self.save():
fname = self.get_current_filename()
runconf = get_run_configuration(fname)
if runconf is None:
args = []
wdir = None
else:
args = runconf.get_arguments().split()
wdir = runconf.get_working_directory()
# Handle the case where wdir comes back as an empty string
# when the working directory dialog checkbox is unchecked.
# (subprocess "cwd" default is None, so empty str
# must be changed to None in this case.)
programs.run_program(WINPDB_PATH, [fname] + args, cwd=wdir or None)
def toggle_eol_chars(self, os_name, checked):
if checked:
editor = self.get_current_editor()
if self.__set_eol_chars:
editor.set_eol_chars(sourcecode.get_eol_chars_from_os_name(os_name))
@Slot(bool)
def toggle_show_blanks(self, checked):
editor = self.get_current_editor()
editor.set_blanks_enabled(checked)
@Slot()
def remove_trailing_spaces(self):
editorstack = self.get_current_editorstack()
editorstack.remove_trailing_spaces()
@Slot()
def fix_indentation(self):
editorstack = self.get_current_editorstack()
editorstack.fix_indentation()
#------ Cursor position history management
def update_cursorpos_actions(self):
self.previous_edit_cursor_action.setEnabled(
self.last_edit_cursor_pos is not None)
self.previous_cursor_action.setEnabled(
self.cursor_pos_index is not None and self.cursor_pos_index > 0)
self.next_cursor_action.setEnabled(self.cursor_pos_index is not None \
and self.cursor_pos_index < len(self.cursor_pos_history)-1)
def add_cursor_position_to_history(self, filename, position, fc=False):
if self.__ignore_cursor_position:
return
for index, (fname, pos) in enumerate(self.cursor_pos_history[:]):
if fname == filename:
if pos == position or pos == 0:
if fc:
self.cursor_pos_history[index] = (filename, position)
self.cursor_pos_index = index
self.update_cursorpos_actions()
return
else:
if self.cursor_pos_index >= index:
self.cursor_pos_index -= 1
self.cursor_pos_history.pop(index)
break
if self.cursor_pos_index is not None:
self.cursor_pos_history = \
self.cursor_pos_history[:self.cursor_pos_index+1]
self.cursor_pos_history.append((filename, position))
self.cursor_pos_index = len(self.cursor_pos_history)-1
self.update_cursorpos_actions()
def cursor_moved(self, filename0, position0, filename1, position1):
"""Cursor was just moved: 'go to'"""
if position0 is not None:
self.add_cursor_position_to_history(filename0, position0)
self.add_cursor_position_to_history(filename1, position1)
def text_changed_at(self, filename, position):
self.last_edit_cursor_pos = (to_text_string(filename), position)
def current_file_changed(self, filename, position):
self.add_cursor_position_to_history(to_text_string(filename), position,
fc=True)
@Slot()
def go_to_last_edit_location(self):
if self.last_edit_cursor_pos is not None:
filename, position = self.last_edit_cursor_pos
if not osp.isfile(filename):
self.last_edit_cursor_pos = None
return
else:
self.load(filename)
editor = self.get_current_editor()
if position < editor.document().characterCount():
editor.set_cursor_position(position)
def __move_cursor_position(self, index_move):
if self.cursor_pos_index is None:
return
filename, _position = self.cursor_pos_history[self.cursor_pos_index]
self.cursor_pos_history[self.cursor_pos_index] = ( filename,
self.get_current_editor().get_position('cursor') )
self.__ignore_cursor_position = True
old_index = self.cursor_pos_index
self.cursor_pos_index = min([
len(self.cursor_pos_history)-1,
max([0, self.cursor_pos_index+index_move])
])
filename, position = self.cursor_pos_history[self.cursor_pos_index]
if not osp.isfile(filename):
self.cursor_pos_history.pop(self.cursor_pos_index)
if self.cursor_pos_index < old_index:
old_index -= 1
self.cursor_pos_index = old_index
else:
self.load(filename)
editor = self.get_current_editor()
if position < editor.document().characterCount():
editor.set_cursor_position(position)
self.__ignore_cursor_position = False
self.update_cursorpos_actions()
@Slot()
def go_to_previous_cursor_position(self):
self.__move_cursor_position(-1)
@Slot()
def go_to_next_cursor_position(self):
self.__move_cursor_position(1)
@Slot()
def go_to_line(self, line=None):
"""Open 'go to line' dialog"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.go_to_line(line)
@Slot()
def set_or_clear_breakpoint(self):
"""Set/Clear breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.set_or_clear_breakpoint()
@Slot()
def set_or_edit_conditional_breakpoint(self):
"""Set/Edit conditional breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.set_or_edit_conditional_breakpoint()
@Slot()
def clear_all_breakpoints(self):
"""Clear breakpoints in all files"""
clear_all_breakpoints()
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
for data in editorstack.data:
data.editor.clear_breakpoints()
self.refresh_plugin()
def clear_breakpoint(self, filename, lineno):
"""Remove a single breakpoint"""
clear_breakpoint(filename, lineno)
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
index = self.is_file_opened(filename)
if index is not None:
editorstack.data[index].editor.add_remove_breakpoint(lineno)
def debug_command(self, command):
"""Debug actions"""
self.main.ipyconsole.write_to_stdin(command)
focus_widget = self.main.ipyconsole.get_focus_widget()
if focus_widget:
focus_widget.setFocus()
#------ Run Python script
@Slot()
def edit_run_configurations(self):
dialog = RunConfigDialog(self)
dialog.size_change.connect(lambda s: self.set_dialog_size(s))
if self.dialog_size is not None:
dialog.resize(self.dialog_size)
fname = osp.abspath(self.get_current_filename())
dialog.setup(fname)
if dialog.exec_():
fname = dialog.file_to_run
if fname is not None:
self.load(fname)
self.run_file()
@Slot()
def run_file(self, debug=False):
"""Run script inside current interpreter or in a new one"""
editorstack = self.get_current_editorstack()
if editorstack.save():
editor = self.get_current_editor()
fname = osp.abspath(self.get_current_filename())
# Get fname's dirname before we escape the single and double
# quotes (Fixes Issue #6771)
dirname = osp.dirname(fname)
# Escape single and double quotes in fname and dirname
# (Fixes Issue #2158)
fname = fname.replace("'", r"\'").replace('"', r'\"')
dirname = dirname.replace("'", r"\'").replace('"', r'\"')
runconf = get_run_configuration(fname)
if runconf is None:
dialog = RunConfigOneDialog(self)
dialog.size_change.connect(lambda s: self.set_dialog_size(s))
if self.dialog_size is not None:
dialog.resize(self.dialog_size)
dialog.setup(fname)
if CONF.get('run', 'open_at_least_once',
not running_under_pytest()):
# Open Run Config dialog at least once: the first time
# a script is ever run in Spyder, so that the user may
# see it at least once and be conscious that it exists
show_dlg = True
CONF.set('run', 'open_at_least_once', False)
else:
# Open Run Config dialog only
# if ALWAYS_OPEN_FIRST_RUN_OPTION option is enabled
show_dlg = CONF.get('run', ALWAYS_OPEN_FIRST_RUN_OPTION)
if show_dlg and not dialog.exec_():
return
runconf = dialog.get_configuration()
args = runconf.get_arguments()
python_args = runconf.get_python_arguments()
interact = runconf.interact
post_mortem = runconf.post_mortem
current = runconf.current
systerm = runconf.systerm
clear_namespace = runconf.clear_namespace
if runconf.file_dir:
wdir = dirname
elif runconf.cw_dir:
wdir = ''
elif osp.isdir(runconf.dir):
wdir = runconf.dir
else:
wdir = ''
python = True # Note: in the future, it may be useful to run
# something in a terminal instead of a Python interp.
self.__last_ec_exec = (fname, wdir, args, interact, debug,
python, python_args, current, systerm,
post_mortem, clear_namespace)
self.re_run_file()
if not interact and not debug:
# If external console dockwidget is hidden, it will be
# raised in top-level and so focus will be given to the
# current external shell automatically
# (see SpyderPluginWidget.visibility_changed method)
editor.setFocus()
def set_dialog_size(self, size):
self.dialog_size = size
@Slot()
def debug_file(self):
"""Debug current script"""
self.run_file(debug=True)
@Slot()
def re_run_file(self):
"""Re-run last script"""
if self.get_option('save_all_before_run'):
self.save_all()
if self.__last_ec_exec is None:
return
(fname, wdir, args, interact, debug,
python, python_args, current, systerm,
post_mortem, clear_namespace) = self.__last_ec_exec
if not systerm:
self.run_in_current_ipyclient.emit(fname, wdir, args,
debug, post_mortem,
current, clear_namespace)
else:
self.main.open_external_console(fname, wdir, args, interact,
debug, python, python_args,
systerm, post_mortem)
@Slot()
def run_selection(self):
"""Run selection or current line in external console"""
editorstack = self.get_current_editorstack()
editorstack.run_selection()
@Slot()
def run_cell(self):
"""Run current cell"""
editorstack = self.get_current_editorstack()
editorstack.run_cell()
@Slot()
def run_cell_and_advance(self):
"""Run current cell and advance to the next one"""
editorstack = self.get_current_editorstack()
editorstack.run_cell_and_advance()
@Slot()
def re_run_last_cell(self):
"""Run last executed cell."""
editorstack = self.get_current_editorstack()
editorstack.re_run_last_cell()
#------ Zoom in/out/reset
def zoom(self, factor):
"""Zoom in/out/reset"""
editor = self.get_current_editorstack().get_current_editor()
if factor == 0:
font = self.get_plugin_font()
editor.set_font(font)
else:
font = editor.font()
size = font.pointSize() + factor
if size > 0:
font.setPointSize(size)
editor.set_font(font)
editor.update_tab_stop_width_spaces()
#------ Options
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
if self.editorstacks is not None:
# --- syntax highlight and text rendering settings
color_scheme_n = 'color_scheme_name'
color_scheme_o = self.get_color_scheme()
currentline_n = 'highlight_current_line'
currentline_o = self.get_option(currentline_n)
currentcell_n = 'highlight_current_cell'
currentcell_o = self.get_option(currentcell_n)
occurrence_n = 'occurrence_highlighting'
occurrence_o = self.get_option(occurrence_n)
occurrence_timeout_n = 'occurrence_highlighting/timeout'
occurrence_timeout_o = self.get_option(occurrence_timeout_n)
focus_to_editor_n = 'focus_to_editor'
focus_to_editor_o = self.get_option(focus_to_editor_n)
for editorstack in self.editorstacks:
if color_scheme_n in options:
editorstack.set_color_scheme(color_scheme_o)
if currentline_n in options:
editorstack.set_highlight_current_line_enabled(
currentline_o)
if currentcell_n in options:
editorstack.set_highlight_current_cell_enabled(
currentcell_o)
if occurrence_n in options:
editorstack.set_occurrence_highlighting_enabled(occurrence_o)
if occurrence_timeout_n in options:
editorstack.set_occurrence_highlighting_timeout(
occurrence_timeout_o)
if focus_to_editor_n in options:
editorstack.set_focus_to_editor(focus_to_editor_o)
# --- everything else
tabbar_n = 'show_tab_bar'
tabbar_o = self.get_option(tabbar_n)
linenb_n = 'line_numbers'
linenb_o = self.get_option(linenb_n)
blanks_n = 'blank_spaces'
blanks_o = self.get_option(blanks_n)
edgeline_n = 'edge_line'
edgeline_o = self.get_option(edgeline_n)
edgelinecol_n = 'edge_line_column'
edgelinecol_o = self.get_option(edgelinecol_n)
wrap_n = 'wrap'
wrap_o = self.get_option(wrap_n)
tabindent_n = 'tab_always_indent'
tabindent_o = self.get_option(tabindent_n)
ibackspace_n = 'intelligent_backspace'
ibackspace_o = self.get_option(ibackspace_n)
removetrail_n = 'always_remove_trailing_spaces'
removetrail_o = self.get_option(removetrail_n)
autocomp_n = 'codecompletion/auto'
autocomp_o = self.get_option(autocomp_n)
case_comp_n = 'codecompletion/case_sensitive'
case_comp_o = self.get_option(case_comp_n)
enter_key_n = 'codecompletion/enter_key'
enter_key_o = self.get_option(enter_key_n)
calltips_n = 'calltips'
calltips_o = self.get_option(calltips_n)
gotodef_n = 'go_to_definition'
gotodef_o = self.get_option(gotodef_n)
closepar_n = 'close_parentheses'
closepar_o = self.get_option(closepar_n)
close_quotes_n = 'close_quotes'
close_quotes_o = self.get_option(close_quotes_n)
add_colons_n = 'add_colons'
add_colons_o = self.get_option(add_colons_n)
autounindent_n = 'auto_unindent'
autounindent_o = self.get_option(autounindent_n)
indent_chars_n = 'indent_chars'
indent_chars_o = self.get_option(indent_chars_n)
tab_stop_width_spaces_n = 'tab_stop_width_spaces'
tab_stop_width_spaces_o = self.get_option(tab_stop_width_spaces_n)
help_n = 'connect_to_oi'
help_o = CONF.get('help', 'connect/editor')
todo_n = 'todo_list'
todo_o = self.get_option(todo_n)
pyflakes_n = 'code_analysis/pyflakes'
pyflakes_o = self.get_option(pyflakes_n)
pep8_n = 'code_analysis/pep8'
pep8_o = self.get_option(pep8_n)
rt_analysis_n = 'realtime_analysis'
rt_analysis_o = self.get_option(rt_analysis_n)
rta_timeout_n = 'realtime_analysis/timeout'
rta_timeout_o = self.get_option(rta_timeout_n)
finfo = self.get_current_finfo()
for editorstack in self.editorstacks:
if tabbar_n in options:
editorstack.set_tabbar_visible(tabbar_o)
if linenb_n in options:
editorstack.set_linenumbers_enabled(linenb_o,
current_finfo=finfo)
if blanks_n in options:
editorstack.set_blanks_enabled(blanks_o)
self.showblanks_action.setChecked(blanks_o)
if edgeline_n in options:
editorstack.set_edgeline_enabled(edgeline_o)
if edgelinecol_n in options:
editorstack.set_edgeline_column(edgelinecol_o)
if wrap_n in options:
editorstack.set_wrap_enabled(wrap_o)
if tabindent_n in options:
editorstack.set_tabmode_enabled(tabindent_o)
if ibackspace_n in options:
editorstack.set_intelligent_backspace_enabled(ibackspace_o)
if removetrail_n in options:
editorstack.set_always_remove_trailing_spaces(removetrail_o)
if autocomp_n in options:
editorstack.set_codecompletion_auto_enabled(autocomp_o)
if case_comp_n in options:
editorstack.set_codecompletion_case_enabled(case_comp_o)
if enter_key_n in options:
editorstack.set_codecompletion_enter_enabled(enter_key_o)
if calltips_n in options:
editorstack.set_calltips_enabled(calltips_o)
if gotodef_n in options:
editorstack.set_go_to_definition_enabled(gotodef_o)
if closepar_n in options:
editorstack.set_close_parentheses_enabled(closepar_o)
if close_quotes_n in options:
editorstack.set_close_quotes_enabled(close_quotes_o)
if add_colons_n in options:
editorstack.set_add_colons_enabled(add_colons_o)
if autounindent_n in options:
editorstack.set_auto_unindent_enabled(autounindent_o)
if indent_chars_n in options:
editorstack.set_indent_chars(indent_chars_o)
if tab_stop_width_spaces_n in options:
editorstack.set_tab_stop_width_spaces(tab_stop_width_spaces_o)
if help_n in options:
editorstack.set_help_enabled(help_o)
if todo_n in options:
editorstack.set_todolist_enabled(todo_o,
current_finfo=finfo)
if pyflakes_n in options:
editorstack.set_pyflakes_enabled(pyflakes_o,
current_finfo=finfo)
if pep8_n in options:
editorstack.set_pep8_enabled(pep8_o, current_finfo=finfo)
if rt_analysis_n in options:
editorstack.set_realtime_analysis_enabled(rt_analysis_o)
if rta_timeout_n in options:
editorstack.set_realtime_analysis_timeout(rta_timeout_o)
# We must update the current editor after the others:
# (otherwise, code analysis buttons state would correspond to the
# last editor instead of showing the one of the current editor)
if finfo is not None:
if todo_n in options and todo_o:
finfo.run_todo_finder()
if pyflakes_n in options or pep8_n in options:
finfo.run_code_analysis(pyflakes_o, pep8_o)
# --- Open files
def get_open_filenames(self):
"""Get the list of open files in the current stack"""
editorstack = self.editorstacks[0]
filenames = []
filenames += [finfo.filename for finfo in editorstack.data]
return filenames
def set_open_filenames(self):
"""
Set the recent opened files on editor based on active project.
If no project is active, then editor filenames are saved, otherwise
the opened filenames are stored in the project config info.
"""
if self.projects is not None:
if not self.projects.get_active_project():
filenames = self.get_open_filenames()
self.set_option('filenames', filenames)
def setup_open_files(self):
"""Open the list of saved files per project"""
self.set_create_new_file_if_empty(False)
active_project_path = None
if self.projects is not None:
active_project_path = self.projects.get_active_project_path()
if active_project_path:
filenames = self.projects.get_project_filenames()
else:
filenames = self.get_option('filenames', default=[])
self.close_all_files()
if filenames and any([osp.isfile(f) for f in filenames]):
self.load(filenames)
layout = self.get_option('layout_settings', None)
if layout is not None:
self.editorsplitter.set_layout_settings(layout)
win_layout = self.get_option('windows_layout_settings', None)
if win_layout:
for layout_settings in win_layout:
self.editorwindows_to_be_created.append(layout_settings)
self.set_last_focus_editorstack(self, self.editorstacks[0])
else:
self.__load_temp_file()
self.set_create_new_file_if_empty(True)
def save_open_files(self):
"""Save the list of open files"""
self.set_option('filenames', self.get_open_filenames())
def set_create_new_file_if_empty(self, value):
"""Change the value of create_new_file_if_empty"""
for editorstack in self.editorstacks:
editorstack.create_new_file_if_empty = value
|
import unittest
from pants_test.contrib.python.checks.checker.plugin_test_base import CheckstylePluginTestBase
from pants.contrib.python.checks.checker.print_statements import PrintStatements
class PrintStatementsTest(CheckstylePluginTestBase):
plugin_type = PrintStatements
def test_print_override(self):
statement = """
from __future__ import print_function
print("I do what I want")
class Foo(object):
def print(self):
"I can do this because it's not a reserved word."
"""
self.assertNoNits(statement)
def test_print_function(self):
statement = """
print("I do what I want")
"""
self.assertNoNits(statement)
@unittest.skip(reason="#7979: Rework tests so that we can run this with Python 2.")
def test_print_statement(self):
statement = """
print["I do what I want"]
"""
self.assertNit(statement, "T607")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.