repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
mancoast/CPythonPyc_test | fail/325_test_ioctl.py | 87 | 3327 | import array
import unittest
from test.support import run_unittest, import_module, get_attribute
import os, struct
fcntl = import_module('fcntl')
termios = import_module('termios')
get_attribute(termios, 'TIOCGPGRP') #Can't run tests without this feature
try:
tty = open("/dev/tty", "rb")
except IOError:
raise unittest.SkipTest("Unable to open /dev/tty")
else:
# Skip if another process is in foreground
r = fcntl.ioctl(tty, termios.TIOCGPGRP, " ")
tty.close()
rpgrp = struct.unpack("i", r)[0]
if rpgrp not in (os.getpgrp(), os.getsid(0)):
raise unittest.SkipTest("Neither the process group nor the session "
"are attached to /dev/tty")
del tty, r, rpgrp
try:
import pty
except ImportError:
pty = None
class IoctlTests(unittest.TestCase):
def test_ioctl(self):
# If this process has been put into the background, TIOCGPGRP returns
# the session ID instead of the process group id.
ids = (os.getpgrp(), os.getsid(0))
with open("/dev/tty", "rb") as tty:
r = fcntl.ioctl(tty, termios.TIOCGPGRP, " ")
rpgrp = struct.unpack("i", r)[0]
self.assertIn(rpgrp, ids)
def _check_ioctl_mutate_len(self, nbytes=None):
buf = array.array('i')
intsize = buf.itemsize
ids = (os.getpgrp(), os.getsid(0))
# A fill value unlikely to be in `ids`
fill = -12345
if nbytes is not None:
# Extend the buffer so that it is exactly `nbytes` bytes long
buf.extend([fill] * (nbytes // intsize))
self.assertEqual(len(buf) * intsize, nbytes) # sanity check
else:
buf.append(fill)
with open("/dev/tty", "rb") as tty:
r = fcntl.ioctl(tty, termios.TIOCGPGRP, buf, 1)
rpgrp = buf[0]
self.assertEqual(r, 0)
self.assertIn(rpgrp, ids)
def test_ioctl_mutate(self):
self._check_ioctl_mutate_len()
def test_ioctl_mutate_1024(self):
# Issue #9758: a mutable buffer of exactly 1024 bytes wouldn't be
# copied back after the system call.
self._check_ioctl_mutate_len(1024)
def test_ioctl_mutate_2048(self):
# Test with a larger buffer, just for the record.
self._check_ioctl_mutate_len(2048)
def test_ioctl_signed_unsigned_code_param(self):
if not pty:
raise unittest.SkipTest('pty module required')
mfd, sfd = pty.openpty()
try:
if termios.TIOCSWINSZ < 0:
set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ
set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffff
else:
set_winsz_opcode_pos = termios.TIOCSWINSZ
set_winsz_opcode_maybe_neg, = struct.unpack("i",
struct.pack("I", termios.TIOCSWINSZ))
our_winsz = struct.pack("HHHH",80,25,0,0)
# test both with a positive and potentially negative ioctl code
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz)
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz)
finally:
os.close(mfd)
os.close(sfd)
def test_main():
run_unittest(IoctlTests)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
risicle/django | django/db/migrations/graph.py | 351 | 10956 | from __future__ import unicode_literals
import warnings
from collections import deque
from functools import total_ordering
from django.db.migrations.state import ProjectState
from django.utils.datastructures import OrderedSet
from django.utils.encoding import python_2_unicode_compatible
from .exceptions import CircularDependencyError, NodeNotFoundError
RECURSION_DEPTH_WARNING = (
"Maximum recursion depth exceeded while generating migration graph, "
"falling back to iterative approach. If you're experiencing performance issues, "
"consider squashing migrations as described at "
"https://docs.djangoproject.com/en/dev/topics/migrations/#squashing-migrations."
)
@python_2_unicode_compatible
@total_ordering
class Node(object):
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<Node: (%r, %r)>' % self.key
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def ancestors(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_ancestors' not in self.__dict__:
ancestors = deque([self.key])
for parent in sorted(self.parents):
ancestors.extendleft(reversed(parent.ancestors()))
self.__dict__['_ancestors'] = list(OrderedSet(ancestors))
return self.__dict__['_ancestors']
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def descendants(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_descendants' not in self.__dict__:
descendants = deque([self.key])
for child in sorted(self.children):
descendants.extendleft(reversed(child.descendants()))
self.__dict__['_descendants'] = list(OrderedSet(descendants))
return self.__dict__['_descendants']
@python_2_unicode_compatible
class MigrationGraph(object):
"""
Represents the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
self.cached = False
def add_node(self, key, implementation):
node = Node(key)
self.node_map[key] = node
self.nodes[key] = implementation
self.clear_cache()
def add_dependency(self, migration, child, parent):
if child not in self.nodes:
raise NodeNotFoundError(
"Migration %s dependencies reference nonexistent child node %r" % (migration, child),
child
)
if parent not in self.nodes:
raise NodeNotFoundError(
"Migration %s dependencies reference nonexistent parent node %r" % (migration, parent),
parent
)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
self.clear_cache()
def clear_cache(self):
if self.cached:
for node in self.nodes:
self.node_map[node].__dict__.pop('_ancestors', None)
self.node_map[node].__dict__.pop('_descendants', None)
self.cached = False
def forwards_plan(self, target):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use parent.key instead of parent to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (parent.key for parent in self.node_map[x].parents))
self.cached = True
node = self.node_map[target]
try:
return node.ancestors()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node)
def backwards_plan(self, target):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use child.key instead of child to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (child.key for child in self.node_map[x].children))
self.cached = True
node = self.node_map[target]
try:
return node.descendants()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node, forwards=False)
def iterative_dfs(self, start, forwards=True):
"""
Iterative depth first search, for finding dependencies.
"""
visited = deque()
visited.append(start)
if forwards:
stack = deque(sorted(start.parents))
else:
stack = deque(sorted(start.children))
while stack:
node = stack.popleft()
visited.appendleft(node)
if forwards:
children = sorted(node.parents, reverse=True)
else:
children = sorted(node.children, reverse=True)
# reverse sorting is needed because prepending using deque.extendleft
# also effectively reverses values
stack.extendleft(children)
return list(OrderedSet(visited))
def root_nodes(self, app=None):
"""
Returns all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if (not any(key[0] == node[0] for key in self.node_map[node].parents)
and (not app or app == node[0])):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if (not any(key[0] == node[0] for key in self.node_map[node].children)
and (not app or app == node[0])):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self, start, get_children):
# Algo from GvR:
# http://neopythonic.blogspot.co.uk/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in get_children(top):
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if len(nodes) == 0:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan:
if not at_end and migration in nodes:
continue
plan.append(migration)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
| bsd-3-clause |
davenovak/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-xcode-gcc-clang.py | 254 | 1403 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xcode-style GCC_... settings that require clang are handled
properly.
"""
import TestGyp
import os
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'xcode-gcc'
test.run_gyp('test-clang.gyp', chdir=CHDIR)
test.build('test-clang.gyp', 'aliasing_yes', chdir=CHDIR)
test.run_built_executable('aliasing_yes', chdir=CHDIR, stdout="1\n")
test.build('test-clang.gyp', 'aliasing_no', chdir=CHDIR)
test.run_built_executable('aliasing_no', chdir=CHDIR, stdout="0\n")
# The default behavior changed: strict aliasing used to be off, now it's on
# by default. The important part is that this is identical for all generators
# (which it is). TODO(thakis): Enable this once the bots have a newer Xcode.
#test.build('test-clang.gyp', 'aliasing_default', chdir=CHDIR)
#test.run_built_executable('aliasing_default', chdir=CHDIR, stdout="1\n")
# For now, just check the generated ninja file:
if test.format == 'ninja':
contents = open(test.built_file_path('obj/aliasing_default.ninja',
chdir=CHDIR)).read()
if 'strict-aliasing' in contents:
test.fail_test()
test.pass_test()
| gpl-3.0 |
13xforever/webserver | admin/CTK/CTK/Container.py | 5 | 1701 | # -*- coding: utf-8 -*-
# CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2009-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from Widget import Widget
class Container (Widget):
"""
Base container widget. Calling the Render method will produce the
HTML associated with the widgets contained within. Thus, if no
such widget exists, no HTML will be rendered.
"""
def __init__ (self):
Widget.__init__ (self)
self.child = []
def __getitem__ (self, n):
return self.child[n]
def __len__ (self):
return len(self.child)
def __nonzero__ (self):
# It's an obj, no matter its child.
return True
def __iadd__ (self, widget):
assert isinstance(widget, Widget)
self.child.append (widget)
return self
def Empty (self):
self.child = []
def Render (self):
render = Widget.Render(self)
for c in self.child:
tmp = c.Render()
render += tmp
return render
| gpl-2.0 |
skycucumber/xuemc | python/venv/lib/python2.7/site-packages/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| gpl-2.0 |
powerjg/gem5-ci-test | ext/ply/test/yacc_error1.py | 174 | 1530 | # -----------------------------------------------------------------------------
# yacc_error1.py
#
# Bad p_error() function
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t,s):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| bsd-3-clause |
qiyuangong/leetcode | python/032_Longest_Valid_Parentheses.py | 2 | 1488 | import pdb
class Solution(object):
# def longestValidParentheses(self, s):
# """
# :type s: str
# :rtype: int
# """
# ls = len(s)
# start = [0] * (ls + 1)
# all = [0] * (ls + 1)
# for i in reversed(range(ls - 1)):
# if s[i] == '(':
# if s[i + 1] == ')':
# start[i] = 2
# if start[i + 1] + i + 1 < ls and s[start[i + 1] + i + 1] == ')':
# start[i] = 2 + start[i + 1]
# if start[start[i] + i] > 0:
# start[i] += start[start[i] + i]
# all[i] = max(start[i], all[i + 1])
# return all[0]
def longestValidParentheses(self, s):
# https://leetcode.com/discuss/87988/my-easy-o-n-java-solution-with-explanation
ls = len(s)
stack = []
data = [0] * ls
for i in range(ls):
curr = s[i]
if curr == '(':
stack.append(i)
else:
if len(stack) > 0:
data[i] = 1
data[stack.pop(-1)] = 1
tep, res = 0, 0
for t in data:
if t == 1:
tep += 1
else:
res = max(tep, res)
tep = 0
return max(tep, res)
if __name__ == '__main__':
s = Solution()
# print s.longestValidParentheses(")(((((()())()()))()(()))(")
print s.longestValidParentheses(')()())')
| mit |
harisbal/pandas | pandas/tests/reshape/merge/test_merge_index_as_string.py | 7 | 5670 | import numpy as np
import pytest
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
@pytest.fixture
def df1():
return DataFrame(dict(
outer=[1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4],
inner=[1, 2, 3, 1, 2, 3, 4, 1, 2, 1, 2],
v1=np.linspace(0, 1, 11)))
@pytest.fixture
def df2():
return DataFrame(dict(
outer=[1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3],
inner=[1, 2, 2, 3, 3, 4, 2, 3, 1, 1, 2, 3],
v2=np.linspace(10, 11, 12)))
@pytest.fixture(params=[[], ['outer'], ['outer', 'inner']])
def left_df(request, df1):
""" Construct left test DataFrame with specified levels
(any of 'outer', 'inner', and 'v1')"""
levels = request.param
if levels:
df1 = df1.set_index(levels)
return df1
@pytest.fixture(params=[[], ['outer'], ['outer', 'inner']])
def right_df(request, df2):
""" Construct right test DataFrame with specified levels
(any of 'outer', 'inner', and 'v2')"""
levels = request.param
if levels:
df2 = df2.set_index(levels)
return df2
def compute_expected(df_left, df_right,
on=None, left_on=None, right_on=None, how=None):
"""
Compute the expected merge result for the test case.
This method computes the expected result of merging two DataFrames on
a combination of their columns and index levels. It does so by
explicitly dropping/resetting their named index levels, performing a
merge on their columns, and then finally restoring the appropriate
index in the result.
Parameters
----------
df_left : DataFrame
The left DataFrame (may have zero or more named index levels)
df_right : DataFrame
The right DataFrame (may have zero or more named index levels)
on : list of str
The on parameter to the merge operation
left_on : list of str
The left_on parameter to the merge operation
right_on : list of str
The right_on parameter to the merge operation
how : str
The how parameter to the merge operation
Returns
-------
DataFrame
The expected merge result
"""
# Handle on param if specified
if on is not None:
left_on, right_on = on, on
# Compute input named index levels
left_levels = [n for n in df_left.index.names if n is not None]
right_levels = [n for n in df_right.index.names if n is not None]
# Compute output named index levels
output_levels = [i for i in left_on
if i in right_levels and i in left_levels]
# Drop index levels that aren't involved in the merge
drop_left = [n for n in left_levels if n not in left_on]
if drop_left:
df_left = df_left.reset_index(drop_left, drop=True)
drop_right = [n for n in right_levels if n not in right_on]
if drop_right:
df_right = df_right.reset_index(drop_right, drop=True)
# Convert remaining index levels to columns
reset_left = [n for n in left_levels if n in left_on]
if reset_left:
df_left = df_left.reset_index(level=reset_left)
reset_right = [n for n in right_levels if n in right_on]
if reset_right:
df_right = df_right.reset_index(level=reset_right)
# Perform merge
expected = df_left.merge(df_right,
left_on=left_on,
right_on=right_on,
how=how)
# Restore index levels
if output_levels:
expected = expected.set_index(output_levels)
return expected
@pytest.mark.parametrize('on,how',
[(['outer'], 'inner'),
(['inner'], 'left'),
(['outer', 'inner'], 'right'),
(['inner', 'outer'], 'outer')])
def test_merge_indexes_and_columns_on(left_df, right_df, on, how):
# Construct expected result
expected = compute_expected(left_df, right_df, on=on, how=how)
# Perform merge
result = left_df.merge(right_df, on=on, how=how)
assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize('left_on,right_on,how',
[(['outer'], ['outer'], 'inner'),
(['inner'], ['inner'], 'right'),
(['outer', 'inner'], ['outer', 'inner'], 'left'),
(['inner', 'outer'], ['inner', 'outer'], 'outer')])
def test_merge_indexes_and_columns_lefton_righton(
left_df, right_df, left_on, right_on, how):
# Construct expected result
expected = compute_expected(left_df, right_df,
left_on=left_on,
right_on=right_on,
how=how)
# Perform merge
result = left_df.merge(right_df,
left_on=left_on, right_on=right_on, how=how)
assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize('left_index',
['inner', ['inner', 'outer']])
def test_join_indexes_and_columns_on(df1, df2, left_index, join_type):
# Construct left_df
left_df = df1.set_index(left_index)
# Construct right_df
right_df = df2.set_index(['outer', 'inner'])
# Result
expected = (left_df.reset_index()
.join(right_df, on=['outer', 'inner'], how=join_type,
lsuffix='_x', rsuffix='_y')
.set_index(left_index))
# Perform join
result = left_df.join(right_df, on=['outer', 'inner'], how=join_type,
lsuffix='_x', rsuffix='_y')
assert_frame_equal(result, expected, check_like=True)
| bsd-3-clause |
escapewindow/signingscript | src/signingscript/vendored/mozbuild/mozbuild/jar.py | 2 | 23403 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''jarmaker.py provides a python class to package up chrome content by
processing jar.mn files.
See the documentation for jar.mn on MDC for further details on the format.
'''
from __future__ import absolute_import, print_function
import sys
import os
import errno
import re
import six
import logging
from time import localtime
from MozZipFile import ZipFile
from cStringIO import StringIO
from mozbuild.preprocessor import Preprocessor
from mozbuild.action.buildlist import addEntriesToListFile
from mozbuild.util import ensure_bytes
from mozpack.files import FileFinder
import mozpack.path as mozpath
if sys.platform == 'win32':
from ctypes import windll, WinError
CreateHardLink = windll.kernel32.CreateHardLinkA
__all__ = ['JarMaker']
class ZipEntry(object):
'''Helper class for jar output.
This class defines a simple file-like object for a zipfile.ZipEntry
so that we can consecutively write to it and then close it.
This methods hooks into ZipFile.writestr on close().
'''
def __init__(self, name, zipfile):
self._zipfile = zipfile
self._name = name
self._inner = StringIO()
def write(self, content):
'''Append the given content to this zip entry'''
self._inner.write(content)
return
def close(self):
'''The close method writes the content back to the zip file.'''
self._zipfile.writestr(self._name, self._inner.getvalue())
def getModTime(aPath):
if not os.path.isfile(aPath):
return 0
mtime = os.stat(aPath).st_mtime
return localtime(mtime)
class JarManifestEntry(object):
def __init__(self, output, source, is_locale=False, preprocess=False):
self.output = output
self.source = source
self.is_locale = is_locale
self.preprocess = preprocess
class JarInfo(object):
def __init__(self, base_or_jarinfo, name=None):
if name is None:
assert isinstance(base_or_jarinfo, JarInfo)
self.base = base_or_jarinfo.base
self.name = base_or_jarinfo.name
else:
assert not isinstance(base_or_jarinfo, JarInfo)
self.base = base_or_jarinfo or ''
self.name = name
# For compatibility with existing jar.mn files, if there is no
# base, the jar name is under chrome/
if not self.base:
self.name = mozpath.join('chrome', self.name)
self.relativesrcdir = None
self.chrome_manifests = []
self.entries = []
class DeprecatedJarManifest(Exception):
pass
class JarManifestParser(object):
ignore = re.compile('\s*(\#.*)?$')
jarline = re.compile('''
(?:
(?:\[(?P<base>[\w\d.\-\_\\\/{}@]+)\]\s*)? # optional [base/path]
(?P<jarfile>[\w\d.\-\_\\\/{}]+).jar\: # filename.jar:
|
(?:\s*(\#.*)?) # comment
)\s*$ # whitespaces
''', re.VERBOSE)
relsrcline = re.compile('relativesrcdir\s+(?P<relativesrcdir>.+?):')
regline = re.compile('\%\s+(.*)$')
entryre = '(?P<optPreprocess>\*)?(?P<optOverwrite>\+?)\s+'
entryline = re.compile(
entryre + ('(?P<output>[\w\d.\-\_\\\/\+\@]+)\s*'
'(\((?P<locale>\%?)(?P<source>[\w\d.\-\_\\\/\@\*]+)\))?\s*$')
)
def __init__(self):
self._current_jar = None
self._jars = []
def write(self, line):
# A Preprocessor instance feeds the parser through calls to this method.
# Ignore comments and empty lines
if self.ignore.match(line):
return
# A jar manifest file can declare several different sections, each of
# which applies to a given "jar file". Each of those sections starts
# with "<name>.jar:", in which case the path is assumed relative to
# a "chrome" directory, or "[<base/path>] <subpath/name>.jar:", where
# a base directory is given (usually pointing at the root of the
# application or addon) and the jar path is given relative to the base
# directory.
if self._current_jar is None:
m = self.jarline.match(line)
if not m:
raise RuntimeError(line)
if m.group('jarfile'):
self._current_jar = JarInfo(m.group('base'),
m.group('jarfile'))
self._jars.append(self._current_jar)
return
# Within each section, there can be three different types of entries:
# - indications of the relative source directory we pretend to be in
# when considering localization files, in the following form;
# "relativesrcdir <path>:"
m = self.relsrcline.match(line)
if m:
if self._current_jar.chrome_manifests or self._current_jar.entries:
self._current_jar = JarInfo(self._current_jar)
self._jars.append(self._current_jar)
self._current_jar.relativesrcdir = m.group('relativesrcdir')
return
# - chrome manifest entries, prefixed with "%".
m = self.regline.match(line)
if m:
rline = ' '.join(m.group(1).split())
if rline not in self._current_jar.chrome_manifests:
self._current_jar.chrome_manifests.append(rline)
return
# - entries indicating files to be part of the given jar. They are
# formed thusly:
# "<dest_path>"
# or
# "<dest_path> (<source_path>)"
# The <dest_path> is where the file(s) will be put in the chrome jar.
# The <source_path> is where the file(s) can be found in the source
# directory. The <source_path> may start with a "%" for files part
# of a localization directory, in which case the "%" counts as the
# locale.
# Each entry can be prefixed with "*" for preprocessing.
m = self.entryline.match(line)
if m:
if m.group('optOverwrite'):
raise DeprecatedJarManifest(
'The "+" prefix is not supported anymore')
self._current_jar.entries.append(JarManifestEntry(
m.group('output'),
m.group('source') or mozpath.basename(m.group('output')),
is_locale=bool(m.group('locale')),
preprocess=bool(m.group('optPreprocess')),
))
return
self._current_jar = None
self.write(line)
def __iter__(self):
return iter(self._jars)
class JarMaker(object):
'''JarMaker reads jar.mn files and process those into jar files or
flat directories, along with chrome.manifest files.
'''
def __init__(self, outputFormat='flat', useJarfileManifest=True,
useChromeManifest=False):
self.outputFormat = outputFormat
self.useJarfileManifest = useJarfileManifest
self.useChromeManifest = useChromeManifest
self.pp = Preprocessor()
self.topsourcedir = None
self.sourcedirs = []
self.localedirs = None
self.l10nbase = None
self.l10nmerge = None
self.relativesrcdir = None
self.rootManifestAppId = None
self._seen_output = set()
def getCommandLineParser(self):
'''Get a optparse.OptionParser for jarmaker.
This OptionParser has the options for jarmaker as well as
the options for the inner PreProcessor.
'''
# HACK, we need to unescape the string variables we get,
# the perl versions didn't grok strings right
p = self.pp.getCommandLineParser(unescapeDefines=True)
p.add_option('-f', type='choice', default='jar',
choices=('jar', 'flat', 'symlink'),
help='fileformat used for output',
metavar='[jar, flat, symlink]',
)
p.add_option('-v', action='store_true', dest='verbose',
help='verbose output')
p.add_option('-q', action='store_false', dest='verbose',
help='verbose output')
p.add_option('-e', action='store_true',
help='create chrome.manifest instead of jarfile.manifest'
)
p.add_option('-s', type='string', action='append', default=[],
help='source directory')
p.add_option('-t', type='string', help='top source directory')
p.add_option('-c', '--l10n-src', type='string',
action='append', help='localization directory')
p.add_option('--l10n-base', type='string', action='store',
help='base directory to be used for localization (requires relativesrcdir)'
)
p.add_option('--locale-mergedir', type='string', action='store',
help='base directory to be used for l10n-merge '
'(requires l10n-base and relativesrcdir)'
)
p.add_option('--relativesrcdir', type='string',
help='relativesrcdir to be used for localization')
p.add_option('-d', type='string', help='base directory')
p.add_option('--root-manifest-entry-appid', type='string',
help='add an app id specific root chrome manifest entry.'
)
return p
def finalizeJar(self, jardir, jarbase, jarname, chromebasepath, register, doZip=True):
'''Helper method to write out the chrome registration entries to
jarfile.manifest or chrome.manifest, or both.
The actual file processing is done in updateManifest.
'''
# rewrite the manifest, if entries given
if not register:
return
chromeManifest = os.path.join(jardir, jarbase, 'chrome.manifest')
if self.useJarfileManifest:
self.updateManifest(os.path.join(jardir, jarbase,
jarname + '.manifest'),
chromebasepath.format(''), register)
if jarname != 'chrome':
addEntriesToListFile(chromeManifest,
['manifest {0}.manifest'.format(jarname)])
if self.useChromeManifest:
chromebase = os.path.dirname(jarname) + '/'
self.updateManifest(chromeManifest,
chromebasepath.format(chromebase), register)
# If requested, add a root chrome manifest entry (assumed to be in the parent directory
# of chromeManifest) with the application specific id. In cases where we're building
# lang packs, the root manifest must know about application sub directories.
if self.rootManifestAppId:
rootChromeManifest = \
os.path.join(os.path.normpath(os.path.dirname(chromeManifest)),
'..', 'chrome.manifest')
rootChromeManifest = os.path.normpath(rootChromeManifest)
chromeDir = \
os.path.basename(os.path.dirname(os.path.normpath(chromeManifest)))
logging.info("adding '%s' entry to root chrome manifest appid=%s"
% (chromeDir, self.rootManifestAppId))
addEntriesToListFile(rootChromeManifest,
['manifest %s/chrome.manifest application=%s'
% (chromeDir,
self.rootManifestAppId)])
def updateManifest(self, manifestPath, chromebasepath, register):
'''updateManifest replaces the % in the chrome registration entries
with the given chrome base path, and updates the given manifest file.
'''
myregister = dict.fromkeys(map(lambda s: s.replace('%',
chromebasepath), register))
addEntriesToListFile(manifestPath, six.iterkeys(myregister))
def makeJar(self, infile, jardir):
'''makeJar is the main entry point to JarMaker.
It takes the input file, the output directory, the source dirs and the
top source dir as argument, and optionally the l10n dirs.
'''
# making paths absolute, guess srcdir if file and add to sourcedirs
def _normpath(p): return os.path.normpath(os.path.abspath(p))
self.topsourcedir = _normpath(self.topsourcedir)
self.sourcedirs = [_normpath(p) for p in self.sourcedirs]
if self.localedirs:
self.localedirs = [_normpath(p) for p in self.localedirs]
elif self.relativesrcdir:
self.localedirs = \
self.generateLocaleDirs(self.relativesrcdir)
if isinstance(infile, basestring):
logging.info('processing ' + infile)
self.sourcedirs.append(_normpath(os.path.dirname(infile)))
pp = self.pp.clone()
pp.out = JarManifestParser()
pp.do_include(infile)
for info in pp.out:
self.processJarSection(info, jardir)
def generateLocaleDirs(self, relativesrcdir):
if os.path.basename(relativesrcdir) == 'locales':
# strip locales
l10nrelsrcdir = os.path.dirname(relativesrcdir)
else:
l10nrelsrcdir = relativesrcdir
locdirs = []
# generate locales dirs, merge, l10nbase, en-US
if self.l10nmerge:
locdirs.append(os.path.join(self.l10nmerge, l10nrelsrcdir))
if self.l10nbase:
locdirs.append(os.path.join(self.l10nbase, l10nrelsrcdir))
if self.l10nmerge or not self.l10nbase:
# add en-US if we merge, or if it's not l10n
locdirs.append(os.path.join(self.topsourcedir,
relativesrcdir, 'en-US'))
return locdirs
def processJarSection(self, jarinfo, jardir):
'''Internal method called by makeJar to actually process a section
of a jar.mn file.
'''
# chromebasepath is used for chrome registration manifests
# {0} is getting replaced with chrome/ for chrome.manifest, and with
# an empty string for jarfile.manifest
chromebasepath = '{0}' + os.path.basename(jarinfo.name)
if self.outputFormat == 'jar':
chromebasepath = 'jar:' + chromebasepath + '.jar!'
chromebasepath += '/'
jarfile = os.path.join(jardir, jarinfo.base, jarinfo.name)
jf = None
if self.outputFormat == 'jar':
# jar
jarfilepath = jarfile + '.jar'
try:
os.makedirs(os.path.dirname(jarfilepath))
except OSError as error:
if error.errno != errno.EEXIST:
raise
jf = ZipFile(jarfilepath, 'a', lock=True)
outHelper = self.OutputHelper_jar(jf)
else:
outHelper = getattr(self, 'OutputHelper_'
+ self.outputFormat)(jarfile)
if jarinfo.relativesrcdir:
self.localedirs = self.generateLocaleDirs(jarinfo.relativesrcdir)
for e in jarinfo.entries:
self._processEntryLine(e, outHelper, jf)
self.finalizeJar(jardir, jarinfo.base, jarinfo.name, chromebasepath,
jarinfo.chrome_manifests)
if jf is not None:
jf.close()
def _processEntryLine(self, e, outHelper, jf):
out = e.output
src = e.source
# pick the right sourcedir -- l10n, topsrc or src
if e.is_locale:
# If the file is a Fluent l10n resource, we want to skip the
# 'en-US' fallbacking.
#
# To achieve that, we're testing if we have more than one localedir,
# and if the last of those has 'en-US' in it.
# If that's the case, we're removing the last one.
if (e.source.endswith('.ftl') and
len(self.localedirs) > 1 and
'en-US' in self.localedirs[-1]):
src_base = self.localedirs[:-1]
else:
src_base = self.localedirs
elif src.startswith('/'):
# path/in/jar/file_name.xul (/path/in/sourcetree/file_name.xul)
# refers to a path relative to topsourcedir, use that as base
# and strip the leading '/'
src_base = [self.topsourcedir]
src = src[1:]
else:
# use srcdirs and the objdir (current working dir) for relative paths
src_base = self.sourcedirs + [os.getcwd()]
if '*' in src:
def _prefix(s):
for p in s.split('/'):
if '*' not in p:
yield p + '/'
prefix = ''.join(_prefix(src))
emitted = set()
for _srcdir in src_base:
finder = FileFinder(_srcdir)
for path, _ in finder.find(src):
# If the path was already seen in one of the other source
# directories, skip it. That matches the non-wildcard case
# below, where we pick the first existing file.
reduced_path = path[len(prefix):]
if reduced_path in emitted:
continue
emitted.add(reduced_path)
e = JarManifestEntry(
mozpath.join(out, reduced_path),
path,
is_locale=e.is_locale,
preprocess=e.preprocess,
)
self._processEntryLine(e, outHelper, jf)
return
# check if the source file exists
realsrc = None
for _srcdir in src_base:
if os.path.isfile(os.path.join(_srcdir, src)):
realsrc = os.path.join(_srcdir, src)
break
if realsrc is None:
if jf is not None:
jf.close()
raise RuntimeError('File "{0}" not found in {1}'.format(src,
', '.join(src_base)))
if out in self._seen_output:
raise RuntimeError('%s already added' % out)
self._seen_output.add(out)
if e.preprocess:
outf = outHelper.getOutput(out)
inf = open(realsrc)
pp = self.pp.clone()
if src[-4:] == '.css':
pp.setMarker('%')
pp.out = outf
pp.do_include(inf)
pp.failUnused(realsrc)
outf.close()
inf.close()
return
# copy or symlink if newer
if getModTime(realsrc) > outHelper.getDestModTime(e.output):
if self.outputFormat == 'symlink':
outHelper.symlink(realsrc, out)
return
outf = outHelper.getOutput(out)
# open in binary mode, this can be images etc
inf = open(realsrc, 'rb')
outf.write(inf.read())
outf.close()
inf.close()
class OutputHelper_jar(object):
'''Provide getDestModTime and getOutput for a given jarfile.'''
def __init__(self, jarfile):
self.jarfile = jarfile
def getDestModTime(self, aPath):
try:
info = self.jarfile.getinfo(aPath)
return info.date_time
except Exception:
return 0
def getOutput(self, name):
return ZipEntry(name, self.jarfile)
class OutputHelper_flat(object):
'''Provide getDestModTime and getOutput for a given flat
output directory. The helper method ensureDirFor is used by
the symlink subclass.
'''
def __init__(self, basepath):
self.basepath = basepath
def getDestModTime(self, aPath):
return getModTime(os.path.join(self.basepath, aPath))
def getOutput(self, name):
out = self.ensureDirFor(name)
# remove previous link or file
try:
os.remove(out)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return open(out, 'wb')
def ensureDirFor(self, name):
out = os.path.join(self.basepath, name)
outdir = os.path.dirname(out)
if not os.path.isdir(outdir):
try:
os.makedirs(outdir)
except OSError as error:
if error.errno != errno.EEXIST:
raise
return out
class OutputHelper_symlink(OutputHelper_flat):
'''Subclass of OutputHelper_flat that provides a helper for
creating a symlink including creating the parent directories.
'''
def symlink(self, src, dest):
out = self.ensureDirFor(dest)
# remove previous link or file
try:
os.remove(out)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if sys.platform != 'win32':
os.symlink(src, out)
else:
# On Win32, use ctypes to create a hardlink
rv = CreateHardLink(ensure_bytes(out), ensure_bytes(src), None)
if rv == 0:
raise WinError()
def main(args=None):
args = args or sys.argv
jm = JarMaker()
p = jm.getCommandLineParser()
(options, args) = p.parse_args(args)
jm.outputFormat = options.f
jm.sourcedirs = options.s
jm.topsourcedir = options.t
if options.e:
jm.useChromeManifest = True
jm.useJarfileManifest = False
if options.l10n_base:
if not options.relativesrcdir:
p.error('relativesrcdir required when using l10n-base')
if options.l10n_src:
p.error('both l10n-src and l10n-base are not supported')
jm.l10nbase = options.l10n_base
jm.relativesrcdir = options.relativesrcdir
jm.l10nmerge = options.locale_mergedir
if jm.l10nmerge and not os.path.isdir(jm.l10nmerge):
logging.warning("WARNING: --locale-mergedir passed, but '%s' does not exist. "
"Ignore this message if the locale is complete." % jm.l10nmerge)
elif options.locale_mergedir:
p.error('l10n-base required when using locale-mergedir')
jm.localedirs = options.l10n_src
if options.root_manifest_entry_appid:
jm.rootManifestAppId = options.root_manifest_entry_appid
noise = logging.INFO
if options.verbose is not None:
noise = options.verbose and logging.DEBUG or logging.WARN
if sys.version_info[:2] > (2, 3):
logging.basicConfig(format='%(message)s')
else:
logging.basicConfig()
logging.getLogger().setLevel(noise)
topsrc = options.t
topsrc = os.path.normpath(os.path.abspath(topsrc))
if not args:
infile = sys.stdin
else:
(infile, ) = args
jm.makeJar(infile, options.d)
| mpl-2.0 |
eeshangarg/oh-mainline | vendor/packages/docutils/test/test_writers/test_html4css1_misc.py | 16 | 7496 | #! /usr/bin/env python
# coding: utf-8
# $Id: test_html4css1_misc.py 7630 2013-03-15 22:27:04Z milde $
# Authors: Lea Wiemann, Dmitry Shachnev, Günter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous HTML writer tests.
"""
from __init__ import DocutilsTestSupport
from docutils import core
from docutils._compat import b
class EncodingTestCase(DocutilsTestSupport.StandardTestCase):
def test_xmlcharrefreplace(self):
# Test that xmlcharrefreplace is the default output encoding
# error handler.
settings_overrides={
'output_encoding': 'latin1',
'stylesheet': '',
'_disable_config': True,}
result = core.publish_string(
u'EUR = \u20ac', writer_name='html4css1',
settings_overrides=settings_overrides)
# Encoding a euro sign with latin1 doesn't work, so the
# xmlcharrefreplace handler is used.
self.assertIn(b('EUR = €'), result)
class SettingsTestCase(DocutilsTestSupport.StandardTestCase):
data = 'test'
def test_default_stylesheet(self):
# default style sheet, embedded
mysettings = {'_disable_config': True,}
styles = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['stylesheet']
self.assertIn('Default cascading style sheet '
'for the HTML output of Docutils.', styles)
def test_default_stylesheet_linked(self):
# default style sheet, linked
mysettings = {'_disable_config': True,
'embed_stylesheet': False}
styles = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['stylesheet']
self.assertIn('docutils/writers/html4css1/html4css1.css', styles)
def test_math_stylesheet_linked(self):
# default + math style sheet, linked
mysettings = {'_disable_config': True,
'embed_stylesheet': False,
'stylesheet_path': 'html4css1.css, math.css'}
styles = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['stylesheet']
self.assertIn('docutils/writers/html4css1/html4css1.css', styles)
self.assertIn('docutils/writers/html4css1/math.css', styles)
def test_custom_stylesheet_linked(self):
# default + custom style sheet, linked
mysettings = {'_disable_config': True,
'embed_stylesheet': False,
'stylesheet_path': 'html4css1.css, '
'data/ham.css'}
styles = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['stylesheet']
self.assertIn('docutils/writers/html4css1/html4css1.css', styles)
self.assertIn('href="data/ham.css"', styles)
def test_custom_stylesheet_dir(self):
mysettings = {'_disable_config': True,
'embed_stylesheet': False,
'stylesheet_dirs': ('../docutils/writers/html4css1/',
'data'),
'stylesheet_path': 'html4css1.css, ham.css'}
styles = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['stylesheet']
self.assertIn('docutils/writers/html4css1/html4css1.css', styles)
self.assertIn('href="data/ham.css"', styles)
def test_custom_stylesheet_dir_embedded(self):
mysettings = {'_disable_config': True,
'embed_stylesheet': True,
'stylesheet_dirs': ('../docutils/writers/html4css1/',
'data'),
'stylesheet_path': 'ham.css'}
styles = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['stylesheet']
self.assertIn('dl.docutils dd {\n margin-bottom: 0.5em }', styles)
class MathTestCase(DocutilsTestSupport.StandardTestCase):
"""Attention: This class tests the current implementation of maths support
which is open to change in future Docutils releases. """
mathjax_script = '<script type="text/javascript" src="%s">'
default_mathjax_url = ('http://cdn.mathjax.org/mathjax/latest/MathJax.js'
'?config=TeX-AMS-MML_HTMLorMML')
custom_mathjax_url = ('file:///usr/share/javascript/mathjax/MathJax.js'
'?config=TeX-AMS-MML_HTMLorMML')
data = ':math:`42`'
def test_math_output_default(self):
# HTML with math.css stylesheet (since 0.11)
mysettings = {'_disable_config': True,}
styles = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['stylesheet']
self.assertIn('convert LaTeX equations to HTML output.', styles)
def test_math_output_mathjax(self):
# Explicitly specifying math_output=MathJax, case insensitively
# use default MathJax URL
mysettings = {'_disable_config': True,
'math_output': 'MathJax'}
head = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['head']
self.assertIn(self.mathjax_script % self.default_mathjax_url, head)
def test_math_output_mathjax_custom(self):
# Customizing MathJax URL
mysettings = {'_disable_config': True,
'math_output':
'mathjax %s' % self.custom_mathjax_url}
head = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['head']
self.assertIn(self.mathjax_script % self.custom_mathjax_url, head)
def test_math_output_html(self):
mysettings = {'_disable_config': True,
'math_output': 'HTML'}
head = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['head']
# There should be no MathJax script when math_output is not MathJax
self.assertNotIn('MathJax.js', head)
def test_math_output_html_stylesheet(self):
mysettings = {'_disable_config': True,
'math_output': 'HTML math.css,custom/style.css',
'stylesheet_dirs': ('.', 'functional/input/data'),
'embed_stylesheet': False}
styles = core.publish_parts(self.data, writer_name='html4css1',
settings_overrides=mysettings)['stylesheet']
self.assertEqual(u"""\
<link rel="stylesheet" href="functional/input/data/html4css1.css" type="text/css" />
<link rel="stylesheet" href="functional/input/data/math.css" type="text/css" />
<link rel="stylesheet" href="custom/style.css" type="text/css" />
""", styles)
def test_math_output_mathjax_no_math(self):
mysettings = {'_disable_config': True,
'math_output': 'MathJax'}
# There should be no math script when text does not contain math
head = core.publish_parts('No math.', writer_name='html4css1')['head']
self.assertNotIn('MathJax', head)
if __name__ == '__main__':
import unittest
unittest.main()
| agpl-3.0 |
SaschaMester/delicium | tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py | 176 | 9706 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dromaeo benchmark automation script.
Script runs dromaeo tests in browsers specified by --browser switch and saves
results to a spreadsheet on docs.google.com.
Prerequisites:
1. Install Google Data APIs Python Client Library from
http://code.google.com/p/gdata-python-client.
2. Checkout Dromaeo benchmark from
http://src.chromium.org/svn/trunk/src/chrome/test/data/dromaeo and provide
local path to it in --dromaeo_home switch.
3. Create a spreadsheet at http://docs.google.com and specify its name in
--spreadsheet switch
Benchmark results are presented in the following format:
browser | date time
test 1 name|m11|...|m1n|test 1 average mean| |e11|...|e1n|test 1 average error
test 2 name|m21|...|m2n|test 2 average mean| |e21|...|e2n|test 2 average error
...
Here mij is mean run/s in individual dromaeo test i during benchmark run j,
eij is error in individual dromaeo test i during benchmark run j.
Example usage:
dromaeo_benchmark_runner.py -b "E:\chromium\src\chrome\Release\chrome.exe"
-b "C:\Program Files (x86)\Safari\safari.exe"
-b "C:\Program Files (x86)\Opera 10.50 pre-alpha\opera.exe" -n 1
-d "E:\chromium\src\chrome\test\data\dromaeo" -f dom -e example@gmail.com
"""
import getpass
import json
import os
import re
import subprocess
import time
import urlparse
from optparse import OptionParser
from BaseHTTPServer import HTTPServer
import SimpleHTTPServer
import gdata.spreadsheet.service
max_spreadsheet_columns = 20
test_props = ['mean', 'error']
def ParseArguments():
parser = OptionParser()
parser.add_option("-b", "--browser",
action="append", dest="browsers",
help="list of browsers to test")
parser.add_option("-n", "--run_count", dest="run_count", type="int",
default=5, help="number of runs")
parser.add_option("-d", "--dromaeo_home", dest="dromaeo_home",
help="directory with your dromaeo files")
parser.add_option("-p", "--port", dest="port", type="int",
default=8080, help="http server port")
parser.add_option("-f", "--filter", dest="filter",
default="dom", help="dromaeo suite filter")
parser.add_option("-e", "--email", dest="email",
help="your google docs account")
parser.add_option("-s", "--spreadsheet", dest="spreadsheet_title",
default="dromaeo",
help="your google docs spreadsheet name")
options = parser.parse_args()[0]
if not options.dromaeo_home:
raise Exception('please specify dromaeo_home')
return options
def KillProcessByName(process_name):
process = subprocess.Popen('wmic process get processid, executablepath',
stdout=subprocess.PIPE)
stdout = str(process.communicate()[0])
match = re.search(re.escape(process_name) + '\s+(\d+)', stdout)
if match:
pid = match.group(1)
subprocess.call('taskkill /pid %s' % pid)
class SpreadsheetWriter(object):
"Utility class for storing benchmarking results in Google spreadsheets."
def __init__(self, email, spreadsheet_title):
'''Login to google docs and search for spreadsheet'''
self.token_file = os.path.expanduser("~/.dromaeo_bot_auth_token")
self.gd_client = gdata.spreadsheet.service.SpreadsheetsService()
authenticated = False
if os.path.exists(self.token_file):
token = ''
try:
file = open(self.token_file, 'r')
token = file.read()
file.close()
self.gd_client.SetClientLoginToken(token)
self.gd_client.GetSpreadsheetsFeed()
authenticated = True
except (IOError, gdata.service.RequestError):
pass
if not authenticated:
self.gd_client.email = email
self.gd_client.password = getpass.getpass('Password for %s: ' % email)
self.gd_client.source = 'python robot for dromaeo'
self.gd_client.ProgrammaticLogin()
token = self.gd_client.GetClientLoginToken()
try:
file = open(self.token_file, 'w')
file.write(token)
file.close()
except (IOError):
pass
os.chmod(self.token_file, 0600)
# Search for the spreadsheet with title = spreadsheet_title.
spreadsheet_feed = self.gd_client.GetSpreadsheetsFeed()
for spreadsheet in spreadsheet_feed.entry:
if spreadsheet.title.text == spreadsheet_title:
self.spreadsheet_key = spreadsheet.id.text.rsplit('/', 1)[1]
if not self.spreadsheet_key:
raise Exception('Spreadsheet %s not found' % spreadsheet_title)
# Get the key of the first worksheet in spreadsheet.
worksheet_feed = self.gd_client.GetWorksheetsFeed(self.spreadsheet_key)
self.worksheet_key = worksheet_feed.entry[0].id.text.rsplit('/', 1)[1]
def _InsertRow(self, row):
row = dict([('c' + str(i), row[i]) for i in xrange(len(row))])
self.gd_client.InsertRow(row, self.spreadsheet_key, self.worksheet_key)
def _InsertBlankRow(self):
self._InsertRow('-' * self.columns_count)
def PrepareSpreadsheet(self, run_count):
"""Update cells in worksheet topmost row with service information.
Calculate column count corresponding to run_count and create worksheet
column titles [c0, c1, ...] in the topmost row to speed up spreadsheet
updates (it allows to insert a whole row with a single request)
"""
# Calculate the number of columns we need to present all test results.
self.columns_count = (run_count + 2) * len(test_props)
if self.columns_count > max_spreadsheet_columns:
# Google spreadsheet has just max_spreadsheet_columns columns.
max_run_count = max_spreadsheet_columns / len(test_props) - 2
raise Exception('maximum run count is %i' % max_run_count)
# Create worksheet column titles [c0, c1, ..., cn].
for i in xrange(self.columns_count):
self.gd_client.UpdateCell(1, i + 1, 'c' + str(i), self.spreadsheet_key,
self.worksheet_key)
def WriteColumnTitles(self, run_count):
"Create titles for test results (mean 1, mean 2, ..., average mean, ...)"
row = []
for prop in test_props:
row.append('')
for i in xrange(run_count):
row.append('%s %i' % (prop, i + 1))
row.append('average ' + prop)
self._InsertRow(row)
def WriteBrowserBenchmarkTitle(self, browser_name):
"Create browser benchmark title (browser name, date time)"
self._InsertBlankRow()
self._InsertRow([browser_name, time.strftime('%d.%m.%Y %H:%M:%S')])
def WriteBrowserBenchmarkResults(self, test_name, test_data):
"Insert a row with single test results"
row = []
for prop in test_props:
if not row:
row.append(test_name)
else:
row.append('')
row.extend([str(x) for x in test_data[prop]])
row.append(str(sum(test_data[prop]) / len(test_data[prop])))
self._InsertRow(row)
class DromaeoHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_POST(self):
self.send_response(200)
self.end_headers()
self.wfile.write("<HTML>POST OK.<BR><BR>");
length = int(self.headers.getheader('content-length'))
parameters = urlparse.parse_qs(self.rfile.read(length))
self.server.got_post = True
self.server.post_data = parameters['data']
class BenchmarkResults(object):
"Storage class for dromaeo benchmark results"
def __init__(self):
self.data = {}
def ProcessBrowserPostData(self, data):
"Convert dromaeo test results in internal format"
tests = json.loads(data[0])
for test in tests:
test_name = test['name']
if test_name not in self.data:
# Test is encountered for the first time.
self.data[test_name] = dict([(prop, []) for prop in test_props])
# Append current run results.
for prop in test_props:
value = -1
if prop in test: value = test[prop] # workaround for Opera 10.5
self.data[test_name][prop].append(value)
def main():
options = ParseArguments()
# Start sever with dromaeo.
os.chdir(options.dromaeo_home)
server = HTTPServer(('', options.port), DromaeoHandler)
# Open and prepare spreadsheet on google docs.
spreadsheet_writer = SpreadsheetWriter(options.email,
options.spreadsheet_title)
spreadsheet_writer.PrepareSpreadsheet(options.run_count)
spreadsheet_writer.WriteColumnTitles(options.run_count)
for browser in options.browsers:
browser_name = os.path.splitext(os.path.basename(browser))[0]
spreadsheet_writer.WriteBrowserBenchmarkTitle(browser_name)
benchmark_results = BenchmarkResults()
for run_number in xrange(options.run_count):
print '%s run %i' % (browser_name, run_number + 1)
# Run browser.
test_page = 'http://localhost:%i/index.html?%s&automated&post_json' % (
options.port, options.filter)
browser_process = subprocess.Popen('%s "%s"' % (browser, test_page))
server.got_post = False
server.post_data = None
# Wait until POST request from browser.
while not server.got_post:
server.handle_request()
benchmark_results.ProcessBrowserPostData(server.post_data)
# Kill browser.
KillProcessByName(browser)
browser_process.wait()
# Insert test results into spreadsheet.
for (test_name, test_data) in benchmark_results.data.iteritems():
spreadsheet_writer.WriteBrowserBenchmarkResults(test_name, test_data)
server.socket.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
lotharwissler/bioinformatics | python/blast/remove-from-blastout.py | 1 | 3024 | #!/usr/bin/python
import os, sys # low level handling, such as command line stuff
import string # string methods available
import re # regular expressions
import getopt # comand line argument handling
from low import * # custom functions, written by myself
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <path> -i <path>" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -b blastout file (-m 8)" )
stdout( " -i file with the IDs to keep" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hi:b:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
args['verbose'] = 0
for key, value in keys:
if key == '-b': args['in-blastout'] = value
if key == '-i': args['in-ids'] = value
if not args.has_key('in-blastout'):
stderr( "in-blastout file missing." )
show_help()
if not args.has_key('in-ids'):
stderr( "in-ids file missing." )
show_help()
if not file_exists( args.get('in-blastout') ):
stderr( "in-blastout file does not exist." )
show_help()
if not file_exists( args.get('in-ids') ):
stderr( "in-ids file does not exist." )
show_help()
return args
# =============================================================================
def get_ids_to_remove( args ):
"""
reads in the in-ids file and gathers all IDs to which
the out fasta file will be reduced to.
"""
fo = open( args.get('in-ids'), 'r' )
ids = {}
for line in fo:
line = line.rstrip()
ids[ line.replace('>','') ] = 1
fo.close()
return ids
# =============================================================================
def reduce_blastout( args, rmids ):
"""
reads in in-fasta and creates out-fasta that only contains the records
whose id is contained in the hash keepids.
"""
retained = 0
fo = open( args.get('in-blastout') )
for line in fo:
line = line.rstrip()
if len(line) == 0: continue
hid, qid = line.split("\t")[0:2]
if rmids.has_key(hid) or rmids.has_key(qid): continue
print line
retained += 1
fo.close()
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
args = handle_arguments( )
rmids = get_ids_to_remove( args )
reduce_blastout( args, rmids )
| mit |
306777HC/libforensics | unittests/tests/win/shell/link/objects.py | 13 | 47648 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the lf.win.shell.link.objects module."""
# stdlib imports
from io import BytesIO
from uuid import UUID
from unittest import TestCase
from os.path import join
# local imports
from lf.dec import ByteIStream, RawIStream
from lf.time import FILETIMETodatetime
from lf.win.objects import LCID, GUIDToUUID
from lf.win.con.objects import COORD
from lf.win.shell.objects import SHITEMID, ITEMIDLIST
from lf.win.shell.link.ctypes import (
file_attributes, link_flags, shell_link_header, domain_relative_obj_id
)
from lf.win.shell.link.objects import (
ShellLink, FileAttributes, LinkFlags, ShellLinkHeader, StringData,
LinkInfo, VolumeID, CNRL, ExtraDataBlock, ConsoleProps, ConsoleFEProps,
DarwinProps, ExpandableStringsDataBlock, EnvironmentProps,
IconEnvironmentProps, KnownFolderProps, PropertyStoreProps, ShimProps,
SpecialFolderProps, DomainRelativeObjId, TrackerProps,
VistaAndAboveIDListProps, TerminalBlock, ExtraDataBlockFactory,
StringDataSet
)
__docformat__ = "restructuredtext en"
__all__ = [
"ShellLinkHeaderTestCase", "FileAttributesTestCase", "LinkFlagsTestCase",
"LinkInfoTestCase", "VolumeIDTestCase", "CNRLTestCase",
"StringDataTestCase", "ExtraDataBlockTestCase", "ConsolePropsTestCase",
"ConsoleFEPropsTestCase", "DarwinPropsTestCase",
"ExpandableStringsDataBlockTestCase", "EnvironmentPropsTestCase",
"IconEnvironmentPropsTestCase", "KnownFolderPropsTestCase",
"PropertyStorePropsTestCase", "ShimPropsTestCase",
"SpecialFolderPropsTestCase", "DomainRelativeObjIdTestCase",
"TrackerPropsTestCase", "VistaAndAboveIDListPropsTestCase",
"TerminalBlockTestCase", "ExtraDataBlockFactoryTestCase"
]
class StringDataTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
# good size, good data format, is_unicode = True
stream = ByteIStream(b"\x04\x00a\x00b\x00c\x00d\x00")
sd1 = StringData.from_stream(stream, is_unicode=True)
sd2 = StringData.from_stream(stream, 0, True)
ae(sd1.size, 10)
ae(sd2.size, 10)
ae(sd1.char_count, 4)
ae(sd1.string, "abcd")
ae(sd2.char_count, 4)
ae(sd2.string, "abcd")
# good size, good data format, is_unicode = False
stream = ByteIStream(b"\x04\x00a\x01cd")
sd1 = StringData.from_stream(stream, is_unicode=False)
sd2 = StringData.from_stream(stream, 0, False)
ae(sd1.size, 6)
ae(sd2.size, 6)
ae(sd1.char_count, 4)
ae(sd1.string, b"a\x01cd")
ae(sd2.char_count, 4)
ae(sd2.string, b"a\x01cd")
# good size, bad data format, is_unicode = True
stream = ByteIStream(b"\x02\x00\x00\xD8\x00\xD8")
sd1 = StringData.from_stream(stream, is_unicode=True)
sd2 = StringData.from_stream(stream, 0, True)
ae(sd1.size, 6)
ae(sd2.size, 6)
ae(sd1.char_count, 2)
ae(sd1.string, "\x00")
ae(sd2.char_count, 2)
ae(sd2.string, "\x00")
# good size, bad data format, is_unicode = False
stream = ByteIStream(b"\x02\x00\x00\xD8\x00\xD8")
sd1 = StringData.from_stream(stream, is_unicode=False)
sd2 = StringData.from_stream(stream, 0, is_unicode=False)
ae(sd1.size, 4)
ae(sd2.size, 4)
ae(sd1.char_count, 2)
ae(sd1.string, b"\x00\xD8")
ae(sd2.char_count, 2)
ae(sd2.string, b"\x00\xD8")
# bad size, good data format, is_unicode = True
stream = ByteIStream(b"\x04\x00a\x01")
sd1 = StringData.from_stream(stream, is_unicode=True)
sd2 = StringData.from_stream(stream, 0, True)
ae(sd1.size, 10)
ae(sd2.size, 10)
ae(sd1.char_count, 4)
ae(sd1.string, "\u0161")
ae(sd2.char_count, 4)
ae(sd2.string, "\u0161")
# bad size, good data format, is_unicode = False
stream = ByteIStream(b"\x04\x00a\x01")
sd1 = StringData.from_stream(stream, is_unicode=False)
sd2 = StringData.from_stream(stream, 0, False)
ae(sd1.size, 6)
ae(sd2.size, 6)
ae(sd1.char_count, 4)
ae(sd1.string, b"a\x01")
ae(sd2.char_count, 4)
ae(sd2.string, b"a\x01")
# bad size, bad data format, is_unicode = True
stream = ByteIStream(b"\x04\x00\x00\xD8")
sd1 = StringData.from_stream(stream, is_unicode=True)
sd2 = StringData.from_stream(stream, 0, True)
ae(sd1.size, 10)
ae(sd2.size, 10)
ae(sd1.char_count, 4)
ae(sd1.string, b"\x00\xD8")
ae(sd2.char_count, 4)
ae(sd2.string, b"\x00\xD8")
# bad size, bad data format, is_unicode = False
stream = ByteIStream(b"\x04\x00\x00\xD8")
sd1 = StringData.from_stream(stream, is_unicode=False)
sd2 = StringData.from_stream(stream, 0, False)
ae(sd1.size, 6)
ae(sd2.size, 6)
ae(sd1.char_count, 4)
ae(sd1.string, b"\x00\xD8")
ae(sd2.char_count, 4)
ae(sd2.string, b"\x00\xD8")
# end def test_from_stream
# end class StringDataTestCase
class FileAttributesTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
attrs = FileAttributes.from_stream(ByteIStream(b"\x55\x55\x55\x55"))
ae(attrs.read_only, 1)
ae(attrs.hidden, 0)
ae(attrs.system, 1)
ae(attrs.directory, 1)
ae(attrs.archive, 0)
ae(attrs.normal, 0)
ae(attrs.temp, 1)
ae(attrs.sparse, 0)
ae(attrs.reparse_point, 1)
ae(attrs.compressed, 0)
ae(attrs.offline, 1)
ae(attrs.not_content_indexed, 0)
ae(attrs.encrypted, 1)
attrs = FileAttributes.from_stream(ByteIStream(b"\xAA\xAA\xAA\xAA"))
ae(attrs.read_only, 0)
ae(attrs.hidden, 1)
ae(attrs.system, 0)
ae(attrs.directory, 0)
ae(attrs.archive, 1)
ae(attrs.normal, 1)
ae(attrs.temp, 0)
ae(attrs.sparse, 1)
ae(attrs.reparse_point, 0)
ae(attrs.compressed, 1)
ae(attrs.offline, 0)
ae(attrs.not_content_indexed, 1)
ae(attrs.encrypted, 0)
# end def test_from_stream
def test_from_ctype(self):
ae = self.assertEqual
ctype = file_attributes.from_buffer_copy(b"\x55\x55\x55\x55")
attrs = FileAttributes.from_ctype(ctype)
ae(attrs.read_only, 1)
ae(attrs.hidden, 0)
ae(attrs.system, 1)
ae(attrs.directory, 1)
ae(attrs.archive, 0)
ae(attrs.normal, 0)
ae(attrs.temp, 1)
ae(attrs.sparse, 0)
ae(attrs.reparse_point, 1)
ae(attrs.compressed, 0)
ae(attrs.offline, 1)
ae(attrs.not_content_indexed, 0)
ae(attrs.encrypted, 1)
ctype = file_attributes.from_buffer_copy(b"\xAA\xAA\xAA\xAA")
attrs = FileAttributes.from_ctype(ctype)
ae(attrs.read_only, 0)
ae(attrs.hidden, 1)
ae(attrs.system, 0)
ae(attrs.directory, 0)
ae(attrs.archive, 1)
ae(attrs.normal, 1)
ae(attrs.temp, 0)
ae(attrs.sparse, 1)
ae(attrs.reparse_point, 0)
ae(attrs.compressed, 1)
ae(attrs.offline, 0)
ae(attrs.not_content_indexed, 1)
ae(attrs.encrypted, 0)
# end def test_from_ctype
# end class FileAttributesTestCase
class LinkFlagsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
stream = ByteIStream(b"\x55\x55\x55\x55")
flags = LinkFlags.from_stream(stream)
ae(flags.has_idlist, 1)
ae(flags.has_link_info, 0)
ae(flags.has_name, 1)
ae(flags.has_relative_path, 0)
ae(flags.has_working_dir, 1)
ae(flags.has_args, 0)
ae(flags.has_icon_location, 1)
ae(flags.is_unicode, 0)
ae(flags.force_no_link_info, 1)
ae(flags.has_exp_string, 0)
ae(flags.run_in_separate_proc, 1)
ae(flags.has_logo3_id, 0)
ae(flags.has_darwin_id, 1)
ae(flags.run_as_user, 0)
ae(flags.has_exp_icon, 1)
ae(flags.no_pidl_alias, 0)
ae(flags.force_unc_name, 1)
ae(flags.run_with_shim_layer, 0)
ae(flags.force_no_link_track, 1)
ae(flags.enable_target_metadata, 0)
ae(flags.disable_link_path_tracking, 1)
ae(flags.disable_known_folder_rel_tracking, 0)
ae(flags.no_kf_alias, 1)
ae(flags.allow_link_to_link, 0)
ae(flags.unalias_on_save, 1)
ae(flags.prefer_environment_path, 0)
ae(flags.keep_local_idlist_for_unc_target, 1)
stream = ByteIStream(b"\xAA\xAA\xAA\xAA")
flags = LinkFlags.from_stream(stream)
ae(flags.has_idlist, 0)
ae(flags.has_link_info, 1)
ae(flags.has_name, 0)
ae(flags.has_relative_path, 1)
ae(flags.has_working_dir, 0)
ae(flags.has_args, 1)
ae(flags.has_icon_location, 0)
ae(flags.is_unicode, 1)
ae(flags.force_no_link_info, 0)
ae(flags.has_exp_string, 1)
ae(flags.run_in_separate_proc, 0)
ae(flags.has_logo3_id, 1)
ae(flags.has_darwin_id, 0)
ae(flags.run_as_user, 1)
ae(flags.has_exp_icon, 0)
ae(flags.no_pidl_alias, 1)
ae(flags.force_unc_name, 0)
ae(flags.run_with_shim_layer, 1)
ae(flags.force_no_link_track, 0)
ae(flags.enable_target_metadata, 1)
ae(flags.disable_link_path_tracking, 0)
ae(flags.disable_known_folder_rel_tracking, 1)
ae(flags.no_kf_alias, 0)
ae(flags.allow_link_to_link, 1)
ae(flags.unalias_on_save, 0)
ae(flags.prefer_environment_path, 1)
ae(flags.keep_local_idlist_for_unc_target, 0)
# end def test_from_stream
def test_from_ctype(self):
ae = self.assertEqual
ctype = link_flags.from_buffer_copy(b"\x55\x55\x55\x55")
flags = LinkFlags.from_ctype(ctype)
ae(flags.has_idlist, 1)
ae(flags.has_link_info, 0)
ae(flags.has_name, 1)
ae(flags.has_relative_path, 0)
ae(flags.has_working_dir, 1)
ae(flags.has_args, 0)
ae(flags.has_icon_location, 1)
ae(flags.is_unicode, 0)
ae(flags.force_no_link_info, 1)
ae(flags.has_exp_string, 0)
ae(flags.run_in_separate_proc, 1)
ae(flags.has_logo3_id, 0)
ae(flags.has_darwin_id, 1)
ae(flags.run_as_user, 0)
ae(flags.has_exp_icon, 1)
ae(flags.no_pidl_alias, 0)
ae(flags.force_unc_name, 1)
ae(flags.run_with_shim_layer, 0)
ae(flags.force_no_link_track, 1)
ae(flags.enable_target_metadata, 0)
ae(flags.disable_link_path_tracking, 1)
ae(flags.disable_known_folder_rel_tracking, 0)
ae(flags.no_kf_alias, 1)
ae(flags.allow_link_to_link, 0)
ae(flags.unalias_on_save, 1)
ae(flags.prefer_environment_path, 0)
ae(flags.keep_local_idlist_for_unc_target, 1)
ctype = link_flags.from_buffer_copy(b"\xAA\xAA\xAA\xAA")
flags = LinkFlags.from_ctype(ctype)
ae(flags.has_idlist, 0)
ae(flags.has_link_info, 1)
ae(flags.has_name, 0)
ae(flags.has_relative_path, 1)
ae(flags.has_working_dir, 0)
ae(flags.has_args, 1)
ae(flags.has_icon_location, 0)
ae(flags.is_unicode, 1)
ae(flags.force_no_link_info, 0)
ae(flags.has_exp_string, 1)
ae(flags.run_in_separate_proc, 0)
ae(flags.has_logo3_id, 1)
ae(flags.has_darwin_id, 0)
ae(flags.run_as_user, 1)
ae(flags.has_exp_icon, 0)
ae(flags.no_pidl_alias, 1)
ae(flags.force_unc_name, 0)
ae(flags.run_with_shim_layer, 1)
ae(flags.force_no_link_track, 0)
ae(flags.enable_target_metadata, 1)
ae(flags.disable_link_path_tracking, 0)
ae(flags.disable_known_folder_rel_tracking, 1)
ae(flags.no_kf_alias, 0)
ae(flags.allow_link_to_link, 1)
ae(flags.unalias_on_save, 0)
ae(flags.prefer_environment_path, 1)
ae(flags.keep_local_idlist_for_unc_target, 0)
# end def test_from_ctype
# end class LinkFlagsTestCase
class ExtraDataBlockTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
stream = ByteIStream(b"\x0A\x00\x00\x00\x01\x02\x03\x04\x64\x53")
edb1 = ExtraDataBlock.from_stream(stream)
edb2 = ExtraDataBlock.from_stream(stream, 0)
ae(edb1.size, 0xA)
ae(edb2.size, 0xA)
ae(edb1.sig, 0x04030201)
ae(edb2.sig, 0x04030201)
ae(edb1.data, b"\x64\x53")
ae(edb2.data, b"\x64\x53")
stream = ByteIStream(b"\xFF\x00\x00\x00\x01\x02\x03\x04\x64\x53")
edb1 = ExtraDataBlock.from_stream(stream)
edb2 = ExtraDataBlock.from_stream(stream, 0)
ae(edb1.size, 0xFF)
ae(edb2.size, 0xFF)
ae(edb1.sig, 0x04030201)
ae(edb2.sig, 0x04030201)
ae(edb1.data, b"\x64\x53")
ae(edb2.data, b"\x64\x53")
# end def test_from_stream
# end class ExtraDataBlockTestCase
class ConsolePropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
stream = ByteIStream(bytes([x for x in range(204)]))
cp1 = ConsoleProps.from_stream(stream)
cp2 = ConsoleProps.from_stream(stream, 0)
for cp in (cp1, cp2):
ae(cp.size, 0x03020100)
ae(cp.sig, 0x07060504)
ae(cp.fill_attributes, 0x0908)
ae(cp.popup_fill_attributes, 0x0B0A)
coord_data = b"\x0C\x0D\x0E\x0F"
coord = COORD.from_stream(ByteIStream(coord_data))
ae(cp2.screen_buffer_size, coord)
coord_data = b"\x10\x11\x12\x13"
coord = COORD.from_stream(ByteIStream(coord_data))
ae(cp.window_size, coord)
coord_data = b"\x14\x15\x16\x17"
coord = COORD.from_stream(ByteIStream(coord_data))
ae(cp.window_origin, coord)
ae(cp.font, 0x1B1A1918)
ae(cp.input_buf_size, 0x1F1E1D1C)
ae(cp.font_size, 0x23222120)
ae(cp.font_family, 0x27262524)
ae(cp.font_weight, 0x2B2A2928)
face_name = bytes([x for x in range(0x2C, 0x6C)])
face_name = face_name.decode("utf_16_le")
ae(cp.face_name, face_name)
ae(cp.cursor_size, 0x6F6E6D6C)
ae(cp.full_screen, 0x73727170)
ae(cp.quick_edit, 0x77767574)
ae(cp.insert_mode, 0x7B7A7978)
ae(cp.auto_position, 0x7F7E7D7C)
ae(cp.history_buf_size, 0x83828180)
ae(cp.history_buf_count, 0x87868584)
ae(cp.history_no_dup, 0x8B8A8988)
color_table = [
0x8F8E8D8C, 0x93929190, 0x97969594, 0x9B9A9998,
0x9F9E9D9C, 0xA3A2A1A0, 0xA7A6A5A4, 0xABAAA9A8,
0xAFAEADAC, 0xB3B2B1B0, 0xB7B6B5B4, 0xBBBAB9B8,
0xBFBEBDBC, 0xC3C2C1C0, 0xC7C6C5C4, 0xCBCAC9C8,
]
ae(cp.color_table, color_table)
# end for
# end def test_from_stream
# end class ConsolePropsTestCase
class ConsoleFEPropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
stream = ByteIStream(bytes([x for x in range(12)]))
cfep1 = ConsoleFEProps.from_stream(stream)
cfep2 = ConsoleFEProps.from_stream(stream, 0)
ae(cfep1.size, 0x03020100)
ae(cfep2.size, 0x03020100)
ae(cfep1.sig, 0x07060504)
ae(cfep2.sig, 0x07060504)
lcid = LCID.from_stream(ByteIStream(b"\x08\x09\x0A\x0B"))
ae(cfep1.code_page, lcid)
ae(cfep2.code_page, lcid)
# end def test_from_stream
# end class ConsoleFEPropsTestCase
class DarwinPropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
data.extend(b"\x00\x01\x02\x03") # block size
data.extend(b"\x04\x05\x06\x07") # blog sig
data.extend([0x41] * 260) # darwin_data_ansi
data.extend([0xEB, 0xFE] * 260) # darwin_data_uni
stream = ByteIStream(data)
dp1 = DarwinProps.from_stream(stream)
dp2 = DarwinProps.from_stream(stream, 0)
ae(dp1.size, 0x03020100)
ae(dp2.size, 0x03020100)
ae(dp1.sig, 0x07060504)
ae(dp2.sig, 0x07060504)
ae(dp1.darwin_data_ansi, b"\x41" * 260)
ae(dp2.darwin_data_ansi, b"\x41" * 260)
darwin_data_uni = bytes([0xEB, 0xFE] * 260)
darwin_data_uni = darwin_data_uni.decode("utf_16_le", "ignore")
darwin_data_uni = darwin_data_uni.split("\x00", 1)[0]
ae(dp1.darwin_data_uni, darwin_data_uni)
ae(dp2.darwin_data_uni, darwin_data_uni)
# end def test_from_stream
# end class DarwinPropsTestCase
class ExpandableStringsDataBlockTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
data.extend(b"\x00\x01\x02\x03") # block size
data.extend(b"\x04\x05\x06\x07") # blog sig
data.extend([0x41] * 260) # target_ansi
data.extend([0xEB, 0xFE] * 260) # target_uni
stream = ByteIStream(data)
esdb1 = ExpandableStringsDataBlock.from_stream(stream)
esdb2 = ExpandableStringsDataBlock.from_stream(stream, 0)
ae(esdb1.size, 0x03020100)
ae(esdb2.size, 0x03020100)
ae(esdb1.sig, 0x07060504)
ae(esdb2.sig, 0x07060504)
ae(esdb1.target_ansi, b"\x41" * 260)
ae(esdb2.target_ansi, b"\x41" * 260)
target_uni = bytes([0xEB, 0xFE] * 260)
target_uni = target_uni.decode("utf_16_le", "ignore")
target_uni = target_uni.split("\x00", 1)[0]
ae(esdb1.target_uni, target_uni)
ae(esdb2.target_uni, target_uni)
# end def test_from_stream
# end class ExpandableStringsDataBlockTestCase
class EnvironmentPropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
data.extend(b"\x00\x01\x02\x03") # block size
data.extend(b"\x04\x05\x06\x07") # blog sig
data.extend([0x41] * 260) # target_ansi
data.extend([0xEB, 0xFE] * 260) # target_uni
stream = ByteIStream(data)
ep1 = EnvironmentProps.from_stream(stream)
ep2 = EnvironmentProps.from_stream(stream, 0)
ae(ep1.size, 0x03020100)
ae(ep2.size, 0x03020100)
ae(ep1.sig, 0x07060504)
ae(ep2.sig, 0x07060504)
ae(ep1.target_ansi, b"\x41" * 260)
ae(ep2.target_ansi, b"\x41" * 260)
target_uni = bytes([0xEB, 0xFE] * 260)
target_uni = target_uni.decode("utf_16_le", "ignore")
target_uni = target_uni.split("\x00", 1)[0]
ae(ep1.target_uni, target_uni)
ae(ep2.target_uni, target_uni)
# end def test_from_stream
# end class EnvironmentPropsTestCase
class IconEnvironmentPropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
data.extend(b"\x00\x01\x02\x03") # block size
data.extend(b"\x04\x05\x06\x07") # blog sig
data.extend([0x41] * 260) # target_ansi
data.extend([0xEB, 0xFE] * 260) # target_uni
stream = ByteIStream(data)
iep1 = IconEnvironmentProps.from_stream(stream)
iep2 = IconEnvironmentProps.from_stream(stream, 0)
ae(iep1.size, 0x03020100)
ae(iep2.size, 0x03020100)
ae(iep1.sig, 0x07060504)
ae(iep2.sig, 0x07060504)
ae(iep1.target_ansi, b"\x41" * 260)
ae(iep2.target_ansi, b"\x41" * 260)
target_uni = bytes([0xEB, 0xFE] * 260)
target_uni = target_uni.decode("utf_16_le", "ignore")
target_uni = target_uni.split("\x00", 1)[0]
ae(iep1.target_uni, target_uni)
ae(iep2.target_uni, target_uni)
# end def test_from_stream
# end class IconEnvironmentPropsTestCase
class KnownFolderPropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytes([x for x in range(28)])
stream = ByteIStream(data)
kfp1 = KnownFolderProps.from_stream(stream)
kfp2 = KnownFolderProps.from_stream(stream, 0)
ae(kfp1.size, 0x03020100)
ae(kfp2.size, 0x03020100)
ae(kfp1.sig, 0x07060504)
ae(kfp2.sig, 0x07060504)
kf_id = GUIDToUUID.from_stream(ByteIStream(data[8:24]))
ae(kfp1.kf_id, kf_id)
ae(kfp2.kf_id, kf_id)
ae(kfp1.offset, 0x1B1A1918)
ae(kfp2.offset, 0x1B1A1918)
# end def test_from_stream
# end class KnownFolderPropsTestCase
class PropertyStorePropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
data.extend(b"\x10\x00\x00\x00") # block size
data.extend(b"\x00\x01\x02\x03") # block sig
data.extend(b"\x04\x05\x06\x07\x08\x09\x0A\x0B") # data
stream = ByteIStream(data)
psp1 = PropertyStoreProps.from_stream(stream)
psp2 = PropertyStoreProps.from_stream(stream, 0)
ae(psp1.size, 0x10)
ae(psp2.size, 0x10)
ae(psp1.sig, 0x03020100)
ae(psp2.sig, 0x03020100)
ae(psp1.property_store, b"\x04\x05\x06\x07\x08\x09\x0A\x0B")
ae(psp2.property_store, b"\x04\x05\x06\x07\x08\x09\x0A\x0B")
# end def test_from_stream
# end class PropertyStorePropsTestCase
class ShimPropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
stream = ByteIStream(b"\x0A\x00\x00\x00\x01\x02\x03\x04\x64\x53")
sp1 = ShimProps.from_stream(stream)
sp2 = ShimProps.from_stream(stream, 0)
ae(sp1.size, 0xA)
ae(sp2.size, 0xA)
ae(sp1.sig, 0x04030201)
ae(sp2.sig, 0x04030201)
ae(sp1.layer_name, "\u5364")
ae(sp2.layer_name, "\u5364")
stream = ByteIStream(b"\xFF\x00\x00\x00\x01\x02\x03\x04\x64\x53")
sp1 = ShimProps.from_stream(stream)
sp2 = ShimProps.from_stream(stream, 0)
ae(sp1.size, 0xFF)
ae(sp2.size, 0xFF)
ae(sp1.sig, 0x04030201)
ae(sp2.sig, 0x04030201)
ae(sp1.layer_name, "\u5364")
ae(sp2.layer_name, "\u5364")
# end def test_from_stream
# end class ShimPropsTestCase
class SpecialFolderPropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
stream = ByteIStream(bytes([x for x in range(16)]))
sfp1 = SpecialFolderProps.from_stream(stream)
sfp2 = SpecialFolderProps.from_stream(stream, 0)
ae(sfp1.size, 0x03020100)
ae(sfp2.size, 0x03020100)
ae(sfp1.sig, 0x07060504)
ae(sfp2.sig, 0x07060504)
ae(sfp1.sf_id, 0x0B0A0908)
ae(sfp2.sf_id, 0x0B0A0908)
ae(sfp1.offset, 0x0F0E0D0C)
ae(sfp2.offset, 0x0F0E0D0C)
# end def test_from_stream
# end class SpecialFolderPropsTestCase
class DomainRelativeObjIdTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
data.extend(bytes([x for x in range(16)])) # volume
data.extend(bytes([x for x in range(15, -1, -1)])) # object
stream = ByteIStream(data)
droid1 = DomainRelativeObjId.from_stream(stream)
droid2 = DomainRelativeObjId.from_stream(stream, 0)
ae(droid1.volume, UUID(bytes_le=data[:16]))
ae(droid2.volume, UUID(bytes_le=data[:16]))
ae(droid1.object, UUID(bytes_le=data[16:]))
ae(droid2.object, UUID(bytes_le=data[16:]))
# end def test_from_stream
def test_from_ctype(self):
ae = self.assertEqual
data = bytearray()
data.extend(bytes([x for x in range(16)]))
data.extend(bytes([x for x in range(15, -1, -1)]))
droid = domain_relative_obj_id.from_buffer_copy(data)
droid = DomainRelativeObjId.from_ctype(droid)
ae(droid.volume, UUID(bytes_le=data[:16]))
ae(droid.object, UUID(bytes_le=data[16:]))
# end def test_from_ctype
# end class DomainRelativeObjIdTestCase
class TrackerPropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data = bytearray()
data.extend(b"\x00\x01\x02\x03") # block size
data.extend(b"\x04\x05\x06\x07") # block sig
data.extend(b"\x50\x00\x00\x00") # length
data.extend(b"\x0B\x0A\x09\x08") # version
data.extend(b"abcdefgh") # machine_id
data.extend(bytes([x for x in range(32)]))
data.extend(bytes([x for x in range(31, -1, -1)]))
stream = ByteIStream(data)
tp1 = TrackerProps.from_stream(stream)
tp2 = TrackerProps.from_stream(stream, 0)
ae(tp1.size, 0x03020100)
ae(tp2.size, 0x03020100)
ae(tp1.sig, 0x07060504)
ae(tp2.sig, 0x07060504)
ae(tp1.length, 0x50)
ae(tp2.length, 0x50)
ae(tp1.machine_id, b"abcdefgh")
ae(tp2.machine_id, b"abcdefgh")
droid_stream = ByteIStream(data[24:56])
droid = DomainRelativeObjId.from_stream(droid_stream)
ae(tp1.droid, droid)
ae(tp2.droid, droid)
droid_stream = ByteIStream(data[56:])
droid = DomainRelativeObjId.from_stream(droid_stream)
ae(tp1.droid_birth, droid)
ae(tp2.droid_birth, droid)
# end def test_from_stream
# end class TrackerPropsTestCase
class VistaAndAboveIDListPropsTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
item1 = b"\x06\x00\x01\x02\x03\x04"
item2 = b"\x05\x00\x05\x06\x07"
item3 = b"\x04\x00\x08\x09"
little_item = b"\x01\x00"
big_item = b"\xFF\x00\x64\x53"
null_item = b"\x00\x00"
itemid1 = SHITEMID((6, 6, b"\x01\x02\x03\x04"))
itemid2 = SHITEMID((5, 5, b"\x05\x06\x07"))
itemid3 = SHITEMID((4, 4, b"\x08\x09"))
little_itemid = SHITEMID((2, 1, None))
big_itemid = SHITEMID((2, 0xFF, b"\x64\x53"))
null_itemid = SHITEMID((2, 0, None))
data = bytearray()
data.extend(b"\x00\x01\x02\x03") # block size
data.extend(b"\x04\x05\x06\x07") # block signature
data.extend(b"".join([item1, item2, item3, null_item]))
stream = ByteIStream(data)
vaaidlp1 = VistaAndAboveIDListProps.from_stream(stream)
vaaidlp2 = VistaAndAboveIDListProps.from_stream(stream, 0)
ae(vaaidlp1.size, 0x03020100)
ae(vaaidlp2.size, 0x03020100)
ae(vaaidlp1.sig, 0x07060504)
ae(vaaidlp2.sig, 0x07060504)
id_list = [itemid1, itemid2, itemid3, null_itemid]
ae(vaaidlp1.idlist.mkid, id_list)
ae(vaaidlp2.idlist.mkid, id_list)
# end def test_from_stream
# end class VistaAndAboveIDLisPropsTestCase
class TerminalBlockTestCase(TestCase):
def test__init__(self):
ae = self.assertEqual
tb = TerminalBlock((0, None))
ae(tb.size, 0)
ae(tb.sig, None)
# end def test__init__
# end class TerminalBlockTestCase
class ExtraDataBlockFactoryTestCase(TestCase):
def test_make_blocks(self):
ae = self.assertEqual
# This will hold the entire stream of blocks.
data = bytearray()
# Make the ConsoleProps
console_props_data = bytearray()
# block size
console_props_data.extend(b"\xCC\x00\x00\x00")
# block signature
console_props_data.extend(b"\x02\x00\x00\xA0")
# fill attributes (bright green on black)
console_props_data.extend(b"\x0A\x00")
# popup fill attributes (bright red on bright green)
console_props_data.extend(b"\xB4\x00")
# screen buffer size (x,y)
console_props_data.extend(b"\x64\x53\xEB\xFE")
# window size (x,y)
console_props_data.extend(b"\x41\x41\x42\x42")
# window origin (x, y)
console_props_data.extend(b"\xAD\xBA\x0D\xF0")
# font
console_props_data.extend(b"\xAA\xBB\xCC\xDD")
# input_buf_size
console_props_data.extend(b"\xFF\xFF\x00\x00")
# Font size
console_props_data.extend(b"\x10\x00\x00\x00")
# Font family (modern)
console_props_data.extend(b"\x30\x00\x00\x00")
# Font weight (bold)
console_props_data.extend(b"\x35\x19\x00\x00")
# Face name
face_name = "thisisnotthedatayouarelookingfor"
console_props_data.extend(face_name.encode("utf_16_le"))
# Cursor size (medium)
console_props_data.extend(b"\x1A\x00\x00\x00")
# Full screen (yes)
console_props_data.extend(b"\x01\x00\x00\x00")
# Quick edit (yes)
console_props_data.extend(b"\x08\x00\x00\x00")
# Insert mode (yes)
console_props_data.extend(b"\x98\xBA\xDC\xFE")
# Auto position (yes)
console_props_data.extend(b"\x02\x00\x00\x00")
# History buffer size
console_props_data.extend(b"\xFF\x00\x00\x00")
# Number of history buffers
console_props_data.extend(b"\x04\x00\x00\x00")
# HistoryNoDup (duplicates allowed)
console_props_data.extend(b"\x03\x00\x00\x00")
# Color table
console_props_data.extend([(x % 256) for x in range(1000, 1064)])
console_props = \
ConsoleProps.from_stream(ByteIStream(console_props_data))
# Make the ConsoleFEProps
console_fe_props_data = bytearray()
console_fe_props_data.extend(b"\x0C\x00\x00\x00") # block size
console_fe_props_data.extend(b"\x04\x00\x00\xA0") # block signature
console_fe_props_data.extend(b"\x04\x04\x04\x00") # LCID (zh-TW_radstr)
console_fe_props = \
ConsoleFEProps.from_stream(ByteIStream(console_fe_props_data))
# Make the DarwinProps
darwin_props_data = bytearray()
# block size
darwin_props_data.extend(b"\x14\x03\x00\x00")
# block signature
darwin_props_data.extend(b"\x06\x00\x00\xA0")
# Darwin data ANSI
darwin_props_data.extend(b"".join([b"\x41" * 259, b"\x00"]))
# Darwin data unicode
darwin_props_data.extend(b"".join([b"\x41\x00" * 259, b"\x00\x00"]))
darwin_props = DarwinProps.from_stream(ByteIStream(darwin_props_data))
# Make the EnvironmentProps
environment_props_data = bytearray()
# block size
environment_props_data.extend(b"\x14\x03\x00\x00")
# block signature
environment_props_data.extend(b"\x01\x00\x00\xA0")
# Target ANSI
environment_props_data.extend(b"".join([b"\x41" * 259, b"\x00"]))
# Target unicode
environment_props_data.extend(b"\x41\x00" * 260)
environment_props = EnvironmentProps.from_stream(ByteIStream(
environment_props_data
))
# Make the IconEnvironmentProps
icon_environment_props_data = bytearray()
# block size
icon_environment_props_data.extend(b"\x14\x03\x00\x00")
# block signature
icon_environment_props_data.extend(b"\x07\x00\x00\xA0")
# Target ANSI
icon_environment_props_data.extend(b"".join([b"\x41" * 259, b"\x00"]))
# Target unicode
icon_environment_props_data.extend(b"\x41\x00" * 260)
icon_environment_props = IconEnvironmentProps.from_stream(
ByteIStream(icon_environment_props_data)
)
# Make the KnownFolderProps
known_folder_props_data = bytearray()
known_folder_props_data.extend(b"\x1C\x00\x00\x00") # block size
known_folder_props_data.extend(b"\x0B\x00\x00\xA0") # block signature
known_folder_props_data.extend([x for x in range(16)]) # kf_id
known_folder_props_data.extend(b"\x01\x02\x03\x04") # offset
known_folder_props = \
KnownFolderProps.from_stream(ByteIStream(known_folder_props_data))
# Make the PropertyStoreProps
property_store_props_data = bytearray()
# block size
property_store_props_data.extend(b"\x10\x00\x00\x00")
# block signature
property_store_props_data.extend(b"\x09\x00\x00\xA0")
# property store
property_store_props_data.extend([x for x in range(32, 40)])
property_store_props = PropertyStoreProps.from_stream(
ByteIStream(property_store_props_data)
)
# Make the ShimProps
shim_props_data = bytearray()
shim_props_data.extend(b"\x90\x00\x00\x00") # block size
shim_props_data.extend(b"\x08\x00\x00\xA0") # block signature
shim_props_data.extend(b"a\x00b\x00c\x00d\x00" * 17) # layer name
shim_props = ShimProps.from_stream(ByteIStream(shim_props_data))
# Make the SpecialFolderProps
special_folder_props_data = bytearray()
special_folder_props_data.extend(b"\x10\x00\x00\x00") # block size
special_folder_props_data.extend(b"\x05\x00\x00\xA0") # block signature
special_folder_props_data.extend(b"\x53\x64\x53\x64") # sf_id
special_folder_props_data.extend(b"\x32\x54\x76\x98") # offset
special_folder_props = SpecialFolderProps.from_stream(
ByteIStream(special_folder_props_data)
)
# Make the TrackerProps
tracker_props_data = bytearray()
tracker_props_data.extend(b"\x60\x00\x00\x00") # block size
tracker_props_data.extend(b"\x03\x00\x00\xA0") # block signature
tracker_props_data.extend(b"\x58\x00\x00\x00") # length
tracker_props_data.extend(b"\x00\x00\x00\x00") # version
tracker_props_data.extend(b"0123456789012345") # machine id
tracker_props_data.extend([x for x in range(128, 160)]) # droid
tracker_props_data.extend([x for x in range(160, 192)]) # droid birth
tracker_props = \
TrackerProps.from_stream(ByteIStream(tracker_props_data))
# Make the VistaAndAboveIDListProps
item1 = b"\x05\x00\x0A\x0B\x0C"
item2 = b"\x03\x00\xFF"
null_item = b"\x00\x00"
id_list = b"".join([item1, item2, null_item])
vista_and_above_id_list_props_data = bytearray()
# block size
vista_and_above_id_list_props_data.extend(b"\x12\x00\x00\x00")
# block signature
vista_and_above_id_list_props_data.extend(b"\x0C\x00\x00\xA0")
# id list
vista_and_above_id_list_props_data.extend(id_list)
vista_and_above_id_list_props = VistaAndAboveIDListProps.from_stream(
ByteIStream(vista_and_above_id_list_props_data)
)
data = bytearray()
data.extend(b"".join([
console_props_data,
console_fe_props_data,
darwin_props_data,
environment_props_data,
icon_environment_props_data,
known_folder_props_data,
property_store_props_data,
shim_props_data,
special_folder_props_data,
tracker_props_data,
vista_and_above_id_list_props_data
]))
stream = ByteIStream(data)
ref_properties = [
console_props,
console_fe_props,
darwin_props,
environment_props,
icon_environment_props,
known_folder_props,
property_store_props,
shim_props,
special_folder_props,
tracker_props,
vista_and_above_id_list_props
]
test_properties = list(ExtraDataBlockFactory.make_blocks(stream))
ae(test_properties, ref_properties)
# end def test_make_blocks
# end class ExtraDataBlockFactoryTestCase
class ShellLinkTestCase(TestCase):
def test__init__(self):
ae = self.assertEqual
filename0 = join("data", "lnk", "shortcut_to_local_exe.lnk")
filename1 = join("data", "lnk", "shortcut_to_mapped_exe.lnk")
stream0 = RawIStream(filename0)
stream1 = RawIStream(filename1)
sl0 = ShellLink(stream0)
sl1 = ShellLink(stream0, 0)
sl2 = ShellLink(stream1)
sl3 = ShellLink(stream1, 0)
header0 = ShellLinkHeader.from_stream(stream0, 0)
header1 = ShellLinkHeader.from_stream(stream1, 0)
ae(sl0.header, header0)
ae(sl1.header, header0)
ae(sl2.header, header1)
ae(sl3.header, header1)
idlist0 = ITEMIDLIST.from_stream(stream0, 78)
ae(sl0.idlist, idlist0)
ae(sl1.idlist, idlist0)
ae(sl2.idlist, None)
ae(sl3.idlist, None)
li0 = LinkInfo.from_stream(stream0, 285)
li1 = LinkInfo.from_stream(stream1, 76)
ae(sl0.link_info, li0)
ae(sl1.link_info, li0)
ae(sl2.link_info, li1)
ae(sl3.link_info, li1)
sds0 = StringDataSet((
StringData((16, 7, "comment")),
StringData((52, 25, "..\\..\\..\\Windows\PING.EXE")),
StringData((32, 15, "c:\\start-in-dir")),
StringData((30, 14, "arg1 arg2 arg3")),
StringData((68, 33, "%SystemRoot%\\system32\\SHELL32.dll"))
))
sds1 = StringDataSet((
None,
None,
StringData((40, 19, "X:\\windows\\system32")),
None,
None
))
ae(sl0.string_data, sds0)
ae(sl1.string_data, sds0)
ae(sl2.string_data, sds1)
ae(sl3.string_data, sds1)
edbs0 = list(ExtraDataBlockFactory.make_blocks(stream0, 549))
edbs1 = list(ExtraDataBlockFactory.make_blocks(stream1, 217))
ae(sl0.extra_data, edbs0)
ae(sl1.extra_data, edbs0)
ae(sl2.extra_data, edbs1)
ae(sl3.extra_data, edbs1)
# end def test__init__
# end class ShellLinkTestCase
class LinkInfoTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
ain = self.assertIsNone
ainn = self.assertIsNotNone
data0 = bytearray()
# link info header
data0.extend(b"\x78\x00\x00\x00") # link info size
data0.extend(b"\x24\x00\x00\x00") # link info header size
data0.extend(b"\x13\x00\x00\x00") # link info flags
data0.extend(b"\x24\x00\x00\x00") # volume id offset
data0.extend(b"\x3C\x00\x00\x00") # local base path offset
data0.extend(b"\x40\x00\x00\x00") # cnrl offset
data0.extend(b"\x6C\x00\x00\x00") # common path suffix offset
data0.extend(b"\x70\x00\x00\x00") # local base path offset unicode
data0.extend(b"\x74\x00\x00\x00") # comon path suffix offset unicode
# volume id
data0.extend(b"\x18\x00\x00\x00") # volume id size
data0.extend(b"\x01\x02\x03\x04") # drive type
data0.extend(b"\x05\x06\x07\x08") # drive serial number
data0.extend(b"\x14\x00\x00\x00") # volume label offset
data0.extend(b"\x14\x00\x00\x00")
data0.extend(b"d\x00\x00\x00") # data
# local base path
data0.extend(b"ijk\x00")
# cnrl
data0.extend(b"\x2C\x00\x00\x00") # cnrl size
data0.extend(b"\x03\x00\x00\x00") # cnrl flags
data0.extend(b"\x1C\x00\x00\x00") # net name offset
data0.extend(b"\x20\x00\x00\x00") # device name offset
data0.extend(b"\x64\x53\x64\x53") # network provider type
data0.extend(b"\x24\x00\x00\x00") # net name offset unicode
data0.extend(b"\x28\x00\x00\x00") # device name offset unicode
data0.extend(b"abc\x00") # net name
data0.extend(b"def\x00") # device name
data0.extend(b"g\x00\x00\x00") # net name unicode
data0.extend(b"h\x00\x00\x00") # device name unicode
# common path suffix
data0.extend(b"lmn\x00")
# local base path unicode
data0.extend(b"o\x00\x00\x00")
# common path suffix unicode
data0.extend(b"p\x00\x00\x00")
stream0 = ByteIStream(data0)
stream1 = ByteIStream(b"".join([b"\x64\x53", data0]))
li0 = LinkInfo.from_stream(stream0)
li1 = LinkInfo.from_stream(stream1, 2)
volid = VolumeID((0x18, 0x04030201, 0x08070605, 0x14, 0x14, "d"))
cnrl = CNRL((
0x2C,
1,
1,
0x1C,
0x20,
0x53645364,
0x24,
0x28,
b"abc",
b"def",
"g",
"h"
))
ae(li0.size, 0x78)
ae(li1.size, 0x78)
ae(li0.header_size, 0x24)
ae(li1.header_size, 0x24)
ae(li0.vol_id_and_local_base_path, 1)
ae(li1.vol_id_and_local_base_path, 1)
ae(li0.cnrl_and_path_suffix, 1)
ae(li1.cnrl_and_path_suffix, 1)
ae(li0.vol_id_offset, 0x24)
ae(li1.vol_id_offset, 0x24)
ae(li0.local_base_path_offset, 0x3C)
ae(li1.local_base_path_offset, 0x3C)
ae(li0.cnrl_offset, 0x40)
ae(li1.cnrl_offset, 0x40)
ae(li0.path_suffix_offset, 0x6C)
ae(li1.path_suffix_offset, 0x6C)
ae(li0.local_base_path_offset_uni, 0x70)
ae(li1.local_base_path_offset_uni, 0x70)
ae(li0.path_suffix_offset_uni, 0x74)
ae(li1.path_suffix_offset_uni, 0x74)
ae(li0.vol_id, volid)
ae(li1.vol_id, volid)
ae(li0.cnrl, cnrl)
ae(li1.cnrl, cnrl)
# end def test_from_stream
# end class LinkInfoTestCase
class VolumeIDTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data0 = bytearray()
data0.extend(b"\x14\x00\x00\x00") # volume id size
data0.extend(b"\x01\x02\x03\x04") # drive type
data0.extend(b"\x05\x06\x07\x08") # drive serial number
data0.extend(b"\x10\x00\x00\x00") # volume label offset
data0.extend(b"abc\x00") # data
data1 = bytearray()
data1.extend(b"\x18\x00\x00\x00") # volume id size
data1.extend(b"\x01\x02\x03\x04") # drive type
data1.extend(b"\x05\x06\x07\x08") # drive serial number
data1.extend(b"\x14\x00\x00\x00") # volume label offset
data1.extend(b"\x14\x00\x00\x00")
data1.extend(b"d\x00\x00\x00") # data
stream0 = ByteIStream(data0)
stream1 = ByteIStream(b"".join([b"\x64\x53", data0]))
stream2 = ByteIStream(data1)
stream3 = ByteIStream(b"".join([b"\x64\x53", data1]))
volid0 = VolumeID.from_stream(stream0)
volid1 = VolumeID.from_stream(stream1, 2)
volid2 = VolumeID.from_stream(stream2)
volid3 = VolumeID.from_stream(stream3, 2)
ae(volid0.size, 0x14)
ae(volid1.size, 0x14)
ae(volid2.size, 0x18)
ae(volid3.size, 0x18)
ae(volid0.drive_type, 0x04030201)
ae(volid1.drive_type, 0x04030201)
ae(volid2.drive_type, 0x04030201)
ae(volid3.drive_type, 0x04030201)
ae(volid0.drive_serial_num, 0x08070605)
ae(volid1.drive_serial_num, 0x08070605)
ae(volid2.drive_serial_num, 0x08070605)
ae(volid3.drive_serial_num, 0x08070605)
ae(volid0.volume_label_offset, 0x10)
ae(volid1.volume_label_offset, 0x10)
ae(volid2.volume_label_offset, 0x14)
ae(volid2.volume_label_offset, 0x14)
ae(volid0.volume_label_offset_uni, None)
ae(volid1.volume_label_offset_uni, None)
ae(volid2.volume_label_offset_uni, 0x14)
ae(volid3.volume_label_offset_uni, 0x14)
ae(volid0.volume_label, b"abc")
ae(volid1.volume_label, b"abc")
ae(volid2.volume_label, "d")
ae(volid3.volume_label, "d")
# end def test_from_stream
# end class VolumeIDTestCase
class CNRLTestCase(TestCase):
def test_from_stream(self):
ae = self.assertEqual
data0 = bytearray()
data0.extend(b"\x2C\x00\x00\x00") # cnrl size
data0.extend(b"\x03\x00\x00\x00") # cnrl flags
data0.extend(b"\x1C\x00\x00\x00") # net name offset
data0.extend(b"\x20\x00\x00\x00") # device name offset
data0.extend(b"\x64\x53\x64\x53") # network provider type
data0.extend(b"\x24\x00\x00\x00") # net name offset unicode
data0.extend(b"\x28\x00\x00\x00") # device name offset unicode
data0.extend(b"abc\x00") # net name
data0.extend(b"def\x00") # device name
data0.extend(b"g\x00\x00\x00") # net name unicode
data0.extend(b"h\x00\x00\x00") # device name unicode
data1 = bytearray()
data1.extend(b"\x1C\x00\x00\x00") # cnrl size
data1.extend(b"\x03\x00\x00\x00") # cnrl flags
data1.extend(b"\x14\x00\x00\x00") # net name offset
data1.extend(b"\x18\x00\x00\x00") # device name offset
data1.extend(b"\x64\x53\x64\x53") # network provider type
data1.extend(b"abc\x00") # net name
data1.extend(b"def\x00") # device name
stream0 = ByteIStream(data0)
stream1 = ByteIStream(b"".join([b"\x64\x53", data0]))
stream2 = ByteIStream(data1)
stream3 = ByteIStream(b"".join([b"\x64\x53", data1]))
cnrl0 = CNRL.from_stream(stream0)
cnrl1 = CNRL.from_stream(stream1, 2)
cnrl2 = CNRL.from_stream(stream2)
cnrl3 = CNRL.from_stream(stream3, 2)
ae(cnrl0.size, 0x2C)
ae(cnrl1.size, 0x2C)
ae(cnrl2.size, 0x1C)
ae(cnrl3.size, 0x1C)
ae(cnrl0.valid_device, 1)
ae(cnrl1.valid_device, 1)
ae(cnrl2.valid_device, 1)
ae(cnrl3.valid_device, 1)
ae(cnrl0.valid_net_type, 1)
ae(cnrl1.valid_net_type, 1)
ae(cnrl2.valid_net_type, 1)
ae(cnrl3.valid_net_type, 1)
ae(cnrl0.net_name_offset, 0x1C)
ae(cnrl1.net_name_offset, 0x1C)
ae(cnrl2.net_name_offset, 0x14)
ae(cnrl3.net_name_offset, 0x14)
ae(cnrl0.device_name_offset, 0x20)
ae(cnrl1.device_name_offset, 0x20)
ae(cnrl2.device_name_offset, 0x18)
ae(cnrl3.device_name_offset, 0x18)
ae(cnrl0.net_type, 0x53645364)
ae(cnrl1.net_type, 0x53645364)
ae(cnrl2.net_type, 0x53645364)
ae(cnrl3.net_type, 0x53645364)
ae(cnrl0.net_name_offset_uni, 0x24)
ae(cnrl1.net_name_offset_uni, 0x24)
ae(cnrl2.net_name_offset_uni, None)
ae(cnrl3.net_name_offset_uni, None)
ae(cnrl0.net_name, b"abc")
ae(cnrl1.net_name, b"abc")
ae(cnrl2.net_name, b"abc")
ae(cnrl3.net_name, b"abc")
ae(cnrl0.device_name, b"def")
ae(cnrl1.device_name, b"def")
ae(cnrl2.device_name, b"def")
ae(cnrl3.device_name, b"def")
ae(cnrl0.net_name_uni, "g")
ae(cnrl1.net_name_uni, "g")
ae(cnrl2.net_name_uni, None)
ae(cnrl3.net_name_uni, None)
ae(cnrl0.device_name_uni, "h")
ae(cnrl1.device_name_uni, "h")
ae(cnrl2.device_name_uni, None)
ae(cnrl3.device_name_uni, None)
# end def test_from_stream
# end class CNRLTestCase
| lgpl-3.0 |
mbrukman/mapnik | scons/scons-local-2.3.6/SCons/Tool/tlib.py | 4 | 1861 | """SCons.Tool.tlib
XXX
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tlib.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import SCons.Tool
import SCons.Tool.bcc32
import SCons.Util
def generate(env):
SCons.Tool.bcc32.findIt('tlib', env)
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
env['AR'] = 'tlib'
env['ARFLAGS'] = SCons.Util.CLVar('')
env['ARCOM'] = '$AR $TARGET $ARFLAGS /a $SOURCES'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
def exists(env):
return SCons.Tool.bcc32.findIt('tlib', env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
chokribr/invenio | invenio/modules/accounts/upgrades/accounts_2014_11_07_usergroup_name_column_unique.py | 14 | 1968 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Upgrade Usergroup table."""
from invenio.legacy.dbquery import run_sql
from invenio.modules.upgrader.api import op
from sqlalchemy.exc import OperationalError
depends_on = ['invenio_release_1_1_0']
def info():
"""Upgrade Usergroup table."""
return "Upgrade Usergroup table"
def do_upgrade():
"""Implement your upgrades here."""
try:
op.drop_index('ix_usergroup_name', table_name='usergroup')
except OperationalError:
pass
try:
op.drop_index('name', table_name='usergroup')
except OperationalError:
pass
op.create_index(op.f('ix_usergroup_name'), 'usergroup', ['name'],
unique=True)
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
return 1
def pre_upgrade():
"""Run pre-upgrade checks (optional)."""
result = run_sql("""
SELECT name, count(name) as count
FROM usergroup
GROUP BY name HAVING count > 1;
""")
if len(result) > 0:
raise RuntimeError("Integrity problem in the table Usergroup",
"Duplicate Usergroup name")
def post_upgrade():
"""Run post-upgrade checks (optional)."""
| gpl-2.0 |
Yen-HuaChen/STA663-Final-Project | pygraphviz/pygraphviz/agraph.py | 1 | 58897 | # -*- coding: utf-8 -*-
"""
A Python interface to Graphviz.
"""
# Copyright (C) 2006-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Manos Renieris, http://www.cs.brown.edu/~er/
# Distributed with BSD license.
# All rights reserved, see LICENSE for details.
from __future__ import print_function
import re
import shlex
import subprocess
import sys
import threading
import warnings
from collections import MutableMapping
from . import graphviz as gv
_DEFAULT_ENCODING = 'UTF-8'
_PY2 = sys.version_info[0] == 2
_TEXT_TYPE = unicode if _PY2 else str
_STRING_TYPES = (basestring,) if _PY2 else (str,)
def is_string_like(obj):
return isinstance(obj, _STRING_TYPES)
class PipeReader(threading.Thread):
"""Read and write pipes using threads.
"""
def __init__(self, result, pipe):
threading.Thread.__init__(self)
self.result = result
self.pipe = pipe
def run(self):
try:
while True:
chunk = self.pipe.read()
if not chunk:
break
self.result.append(chunk)
finally:
self.pipe.close()
class _Action(object):
find, create = 0, 1
class DotError(ValueError):
"""Dot data parsing error"""
class AGraph(object):
"""Class for Graphviz agraph type.
Example use
>>> from pygraphviz import *
>>> G=AGraph()
>>> G=AGraph(directed=True)
>>> G=AGraph("file.dot") # doctest: +SKIP
Graphviz graph keyword parameters are processed so you may add
them like
>>> G=AGraph(landscape='true',ranksep='0.1')
or alternatively
>>> G=AGraph()
>>> G.graph_attr.update(landscape='true',ranksep='0.1')
and
>>> G.node_attr.update(color='red')
>>> G.edge_attr.update(len='2.0',color='blue')
See http://www.graphviz.org/doc/info/attrs.html
for a list of attributes.
Keyword parameters:
thing is a generic input type (filename, string, handle to pointer,
dictionary of dictionaries). An attempt is made to automaticaly
detect the type so you may write for example:
>>> d={'1': {'2': None}, '2': {'1': None, '3': None}, '3': {'2': None}}
>>> A=AGraph(d)
>>> s=A.to_string()
>>> B=AGraph(s)
>>> h=B.handle
>>> C=AGraph(h)
Parameters::
name: Name for the graph
strict: True|False (True for simple graphs)
directed: True|False
data: Dictionary of dictionaries or dictionary of lists
representing nodes or edges to load into initial graph
string: String containing a dot format graph
handle: Swig pointer to an agraph_t data structure
"""
def __init__(self, thing=None,
filename=None, data=None, string=None, handle=None,
name='', strict=True, directed=False, **attr):
self.handle = None # assign first in case the __init__ bombs
# initialization can take no arguments (gives empty graph) or
# a file name
# a string of graphviz dot language
# a swig pointer (handle) to a graph
# a dict of dicts (or dict of lists) data structure
self.has_layout = False # avoid creating members outside of init
# backward compability
filename = attr.pop('file', filename)
# guess input type if specified as first (nonkeyword) argument
if thing is not None:
# can't specify first argument and also file,data,string,handle
filename = None
data = None
string = None
handle = None
if isinstance(thing, dict):
data = thing # a dictionary of dictionaries (or lists)
elif hasattr(thing, 'own'): # a Swig pointer - graph handle
handle = thing
elif is_string_like(thing):
pattern = re.compile('(strict)?\s*(graph|digraph).*{.*}\s*',
re.DOTALL)
if pattern.match(thing):
string = thing # this is a dot format graph in a string
else:
filename = thing # assume this is a file name
else:
raise TypeError('Unrecognized input %s' % thing)
if handle is not None:
# if handle was specified, reference it
self.handle = handle
elif filename is not None:
# load new graph from file (creates self.handle)
self.read(filename)
elif string is not None:
# load new graph from string (creates self.handle)
# get the charset from the string to properly encode it for
# writing to the temporary file in from_string()
match = re.search(r'charset\s*=\s*"([^"]+)"', string)
if match is not None:
self.encoding = match.group(1)
else:
self.encoding = _DEFAULT_ENCODING
self.from_string(string)
else:
# no handle, need to
self.handle = None
if self.handle is not None:
# the handle was specified or created
# get the encoding from the "charset" graph attribute
item = gv.agget(self.handle, b'charset')
if item is not None:
self.encoding = item
else:
self.encoding = _DEFAULT_ENCODING
else:
# no handle was specified or created
# get encoding from the "charset" kwarg
self.encoding = attr.get('charset', _DEFAULT_ENCODING)
try:
if name is None:
name = ''
# instantiate a new, empty graph
self.handle = gv.agraphnew(name.encode(self.encoding),
strict, directed)
except TypeError:
raise TypeError("Graph name must be a string: %s" % name)
# encoding is already set but if it was specified explicitly
# as an attr, then set it explicitly for the graph
if 'charset' in attr:
gv.agattr_label(self.handle, 0, 'charset', self.encoding)
# if data is specified, populate the newly created graph
if data is not None:
# load from dict of dicts or dict of lists
for node in data:
for nbr in data[node]:
self.add_edge(node, nbr)
self.add_nodes_from(data.keys())
# throw away the charset attribute, if one exists,
# since we've already set it, and now it should not be changed
if 'charset' in attr:
del attr['charset']
# assign any attributes specified through keywords
self.graph_attr = Attribute(self.handle, 0) # default graph attributes
self.graph_attr.update(attr) # apply attributes passed to init
self.node_attr = Attribute(self.handle, 1) # default node attributes
self.edge_attr = Attribute(self.handle, 2) # default edge attribtes
def __enter__(self):
return self
def __exit__(self, ext_type, exc_value, traceback):
self.close()
if _PY2:
def __unicode__(self):
return self.string()
def __str__(self):
return unicode(self).encode(self.encoding, 'replace')
else:
def __str__(self):
return self.string()
def __repr__(self):
name = gv.agnameof(self.handle)
if name is None:
return '<AGraph %s>' % self.handle
return '<AGraph %s %s>' % (name, self.handle)
def __eq__(self, other):
# two graphs are equal if they have exact same string representation
# this is not graph isomorphism
return self.string() == other.string()
def __hash__(self):
# hash the string representation for id
return hash(self.string())
def __iter__(self):
# provide "for n in G"
return self.nodes_iter()
def __contains__(self, n):
# provide "n in G"
return self.has_node(n)
def __len__(self):
return self.number_of_nodes()
def __getitem__(self, n):
# "G[n]" returns nodes attached to n
return self.neighbors(n)
# not implemented, but could be...
# def __setitem__(self,u,v):
# self.add_edge(u,v)
def get_name(self):
name = gv.agnameof(self.handle)
if name is not None:
name = name.decode(self.encoding)
return name
name = property(get_name)
def add_node(self, n, **attr):
"""Add a single node n.
If n is not a string, conversion to a string will be attempted.
String conversion will work if n has valid string representation
(try str(n) if you are unsure).
>>> G=AGraph()
>>> G.add_node('a')
>>> G.nodes() # doctest: +IGNORE_UNICODE
[u'a']
>>> G.add_node(1) # will be converted to a string
>>> G.nodes() # doctest: +IGNORE_UNICODE
[u'a', u'1']
Attributes can be added to nodes on creation or updated after creation
(attribute values must be strings)
>>> G.add_node(2,color='red')
See http://www.graphviz.org/doc/info/attrs.html
for a list of attributes.
Anonymous Graphviz nodes are currently not implemented.
"""
if not is_string_like(n):
n = str(n)
n = n.encode(self.encoding)
try:
nh = gv.agnode(self.handle, n, _Action.find)
except KeyError:
nh = gv.agnode(self.handle, n, _Action.create)
node = Node(self, nh=nh)
node.attr.update(**attr)
def add_nodes_from(self, nbunch, **attr):
"""Add nodes from a container nbunch.
nbunch can be any iterable container such as a list or dictionary
>>> G=AGraph()
>>> nlist=['a','b',1,'spam']
>>> G.add_nodes_from(nlist)
>>> sorted(G.nodes()) # doctest: +IGNORE_UNICODE
[u'1', u'a', u'b', u'spam']
Attributes can be added to nodes on creation or updated after creation
>>> G.add_nodes_from(nlist, color='red') # set all nodes in nlist red
"""
for n in nbunch:
self.add_node(n, **attr)
def remove_node(self, n):
"""Remove the single node n.
Attempting to remove a node that isn't in the graph will produce
an error.
>>> G=AGraph()
>>> G.add_node('a')
>>> G.remove_node('a')
"""
if not is_string_like(n):
n = str(n)
n = n.encode(self.encoding)
try:
nh = gv.agnode(self.handle, n, _Action.find)
gv.agdelnode(self.handle, nh)
except KeyError:
raise KeyError("Node %s not in graph." % n.decode(self.encoding))
delete_node = remove_node
def remove_nodes_from(self, nbunch):
"""Remove nodes from a container nbunch.
nbunch can be any iterable container such as a list or dictionary
>>> G=AGraph()
>>> nlist=['a','b',1,'spam']
>>> G.add_nodes_from(nlist)
>>> G.remove_nodes_from(nlist)
"""
for n in nbunch:
self.remove_node(n)
delete_nodes_from = remove_nodes_from
def nodes_iter(self):
"""Return an iterator over all the nodes in the graph.
Note: modifying the graph structure while iterating over
the nodes may produce unpredictable results. Use nodes()
as an alternative.
"""
nh = gv.agfstnode(self.handle)
while nh is not None:
yield Node(self, nh=nh)
nh = gv.agnxtnode(self.handle, nh)
raise StopIteration
iternodes = nodes_iter
def nodes(self):
"""Return a list of all nodes in the graph."""
return list(self.nodes_iter())
def number_of_nodes(self):
"""Return the number of nodes in the graph."""
return gv.agnnodes(self.handle)
def order(self):
"""Return the number of nodes in the graph."""
return self.number_of_nodes()
def has_node(self, n):
"""Return True if n is in the graph or False if not.
>>> G=AGraph()
>>> G.add_node('a')
>>> G.has_node('a')
True
>>> 'a' in G # same as G.has_node('a')
True
"""
try:
node = Node(self, n)
return True
except KeyError:
return False
def get_node(self, n):
"""Return a node object (Node) corresponding to node n.
>>> G=AGraph()
>>> G.add_node('a')
>>> node=G.get_node('a')
>>> print(node)
a
"""
return Node(self, n)
def add_edge(self, u, v=None, key=None, **attr):
"""Add a single edge between nodes u and v.
If the nodes u and v are not in the graph they will added.
If u and v are not strings, conversion to a string will be attempted.
String conversion will work if u and v have valid string representation
(try str(u) if you are unsure).
>>> G=AGraph()
>>> G.add_edge('a','b')
>>> G.edges() # doctest: +IGNORE_UNICODE
[(u'a', u'b')]
The optional key argument allows assignment of a key to the
edge. This is especially useful to distinguish between
parallel edges in multi-edge graphs (strict=False).
>>> G=AGraph(strict=False)
>>> G.add_edge('a','b','first')
>>> G.add_edge('a','b','second')
>>> sorted(G.edges(keys=True)) # doctest: +IGNORE_UNICODE
[(u'a', u'b', u'first'), (u'a', u'b', u'second')]
Attributes can be added when edges are created or updated after creation
>>> G.add_edge('a','b',color='green')
Attributes must be valid strings.
See http://www.graphviz.org/doc/info/attrs.html
for a list of attributes.
"""
if v is None:
(u, v) = u # no v given, assume u is an edge tuple
try:
uh = Node(self, u).handle
except:
self.add_node(u)
uh = Node(self, u).handle
try:
vh = Node(self, v).handle
except:
self.add_node(v)
vh = Node(self, v).handle
if key is not None:
if not is_string_like(key):
key = str(key)
key = key.encode(self.encoding)
try:
# new
eh = gv.agedge(self.handle, uh, vh, key, _Action.create)
except KeyError:
# for strict graph, or already added
eh = gv.agedge(self.handle, uh, vh, key, _Action.find)
e = Edge(self, eh=eh)
e.attr.update(**attr)
def add_edges_from(self, ebunch, **attr):
"""Add nodes to graph from a container ebunch.
ebunch is a container of edges such as a list or dictionary.
>>> G=AGraph()
>>> elist=[('a','b'),('b','c')]
>>> G.add_edges_from(elist)
Attributes can be added when edges are created or updated after creation
>>> G.add_edges_from(elist, color='green')
"""
for e in ebunch:
self.add_edge(e, **attr)
def get_edge(self, u, v, key=None):
"""Return an edge object (Edge) corresponding to edge (u,v).
>>> G=AGraph()
>>> G.add_edge('a','b')
>>> edge=G.get_edge('a','b')
>>> print(edge) # doctest: +IGNORE_UNICODE
(u'a', u'b')
With optional key argument will only get edge matching (u,v,key).
"""
return Edge(self, u, v, key)
def remove_edge(self, u, v=None, key=None):
"""Remove edge between nodes u and v from the graph.
With optional key argument will only remove an edge
matching (u,v,key).
"""
if v is None:
(u, v) = u # no v given, assume u is an edge tuple
e = Edge(self, u, v, key)
try:
gv.agdeledge(self.handle, e.handle)
except KeyError:
raise KeyError("Edge %s-%s not in graph." % (u, v))
delete_edge = remove_edge
def remove_edges_from(self, ebunch):
"""Remove edges from ebunch (a container of edges)."""
for e in ebunch:
self.remove_edge(e)
delete_edges_from = remove_edges_from
def has_edge(self, u, v=None, key=None):
"""Return True an edge u-v is in the graph or False if not.
>>> G=AGraph()
>>> G.add_edge('a','b')
>>> G.has_edge('a','b')
True
Optional key argument will restrict match to edges (u,v,key).
"""
if v is None:
(u, v) = u # no v given, assume u is an edge tuple
try:
Edge(self, u, v, key)
return True
except KeyError:
return False
def edges(self, nbunch=None, keys=False):
"""Return list of edges in the graph.
If the optional nbunch (container of nodes) only edges
adjacent to nodes in nbunch will be returned.
>>> G=AGraph()
>>> G.add_edge('a','b')
>>> G.add_edge('c','d')
>>> print(sorted(G.edges())) # doctest: +IGNORE_UNICODE
[(u'a', u'b'), (u'c', u'd')]
>>> print(G.edges('a')) # doctest: +IGNORE_UNICODE
[(u'a', u'b')]
"""
return list(self.edges_iter(nbunch=nbunch, keys=keys))
def has_neighbor(self, u, v, key=None):
"""Return True if u has an edge to v or False if not.
>>> G=AGraph()
>>> G.add_edge('a','b')
>>> G.has_neighbor('a','b')
True
Optional key argument will only find edges (u,v,key).
"""
return self.has_edge(u, v)
def neighbors_iter(self, n):
"""Return iterator over the nodes attached to n.
Note: modifying the graph structure while iterating over
node neighbors may produce unpredictable results. Use neighbors()
as an alternative.
"""
n = Node(self, n)
nh = n.handle
eh = gv.agfstedge(self.handle, nh)
while eh is not None:
(s, t) = Edge(self, eh=eh)
if s == n:
yield Node(self, t)
else:
yield Node(self, s)
eh = gv.agnxtedge(self.handle, eh, nh)
raise StopIteration
def neighbors(self, n):
"""Return a list of the nodes attached to n."""
return list(self.neighbors_iter(n))
iterneighbors = neighbors_iter
def out_edges_iter(self, nbunch=None, keys=False):
"""Return iterator over out edges in the graph.
If the optional nbunch (container of nodes) only out edges
adjacent to nodes in nbunch will be returned.
Note: modifying the graph structure while iterating over
edges may produce unpredictable results. Use out_edges()
as an alternative.
"""
if nbunch is None: # all nodes
nh = gv.agfstnode(self.handle)
while nh is not None:
eh = gv.agfstout(self.handle, nh)
while eh is not None:
e = Edge(self, eh=eh)
if keys:
yield (e[0], e[1], e.name)
else:
yield e
eh = gv.agnxtout(self.handle, eh)
nh = gv.agnxtnode(self.handle, nh)
elif nbunch in self: # if nbunch is a single node
n = Node(self, nbunch)
nh = n.handle
eh = gv.agfstout(self.handle, nh)
while eh is not None:
e = Edge(self, eh=eh)
if keys:
yield (e[0], e[1], e.name)
else:
yield e
eh = gv.agnxtout(self.handle, eh)
else: # if nbunch is a sequence of nodes
try:
bunch = [n for n in nbunch if n in self]
except TypeError:
raise TypeError("nbunch is not a node or a sequence of nodes.")
for n in nbunch:
try:
nh = Node(self, n).handle
except KeyError:
continue
eh = gv.agfstout(self.handle, nh)
while eh is not None:
e = Edge(self, eh=eh)
if keys:
yield (e[0], e[1], e.name)
else:
yield e
eh = gv.agnxtout(self.handle, eh)
raise StopIteration
iteroutedges = out_edges_iter
def in_edges_iter(self, nbunch=None, keys=False):
"""Return iterator over out edges in the graph.
If the optional nbunch (container of nodes) only out edges
adjacent to nodes in nbunch will be returned.
Note: modifying the graph structure while iterating over
edges may produce unpredictable results. Use in_edges()
as an alternative.
"""
if nbunch is None: # all nodes
nh = gv.agfstnode(self.handle)
while nh is not None:
eh = gv.agfstin(self.handle, nh)
while eh is not None:
e = Edge(self, eh=eh)
if keys:
yield (e[0], e[1], e.name)
else:
yield e
eh = gv.agnxtin(self.handle, eh)
nh = gv.agnxtnode(self.handle, nh)
elif nbunch in self: # if nbunch is a single node
n = Node(self, nbunch)
nh = n.handle
eh = gv.agfstin(self.handle, nh)
while eh is not None:
e = Edge(self, eh=eh)
if keys:
yield (e[0], e[1], e.name)
else:
yield e
eh = gv.agnxtin(self.handle, eh)
else: # if nbunch is a sequence of nodes
try:
bunch = [n for n in nbunch if n in self]
except TypeError:
raise TypeError("nbunch is not a node or a sequence of nodes.")
for n in nbunch:
try:
nh = Node(self, n).handle
except KeyError:
continue
eh = gv.agfstin(self.handle, nh)
while eh is not None:
e = Edge(self, eh=eh)
if keys:
yield (e[0], e[1], e.name)
else:
yield e
eh = gv.agnxtin(self.handle, eh)
raise StopIteration
def edges_iter(self, nbunch=None, keys=False):
"""Return iterator over edges in the graph.
If the optional nbunch (container of nodes) only edges
adjacent to nodes in nbunch will be returned.
Note: modifying the graph structure while iterating over
edges may produce unpredictable results. Use edges()
as an alternative.
"""
if nbunch is None: # all nodes
for e in self.out_edges_iter(keys=keys):
yield e
elif nbunch in self: # only one node
for e in self.out_edges_iter(nbunch, keys=keys):
yield e
for e in self.in_edges_iter(nbunch, keys=keys):
if e != (nbunch, nbunch):
yield e
else: # a group of nodes
used = set()
for e in self.out_edges_iter(nbunch, keys=keys):
yield e
used.add(e)
for e in self.in_edges_iter(nbunch, keys=keys):
if e not in used:
yield e
iterinedges = in_edges_iter
iteredges = edges_iter
def out_edges(self, nbunch=None, keys=False):
"""Return list of out edges in the graph.
If the optional nbunch (container of nodes) only out edges
adjacent to nodes in nbunch will be returned.
"""
return list(self.out_edges_iter(nbunch=nbunch, keys=keys))
def in_edges(self, nbunch=None, keys=False):
"""Return list of in edges in the graph.
If the optional nbunch (container of nodes) only in edges
adjacent to nodes in nbunch will be returned.
"""
return list(self.in_edges_iter(nbunch=nbunch, keys=keys))
def predecessors_iter(self, n):
"""Return iterator over predecessor nodes of n.
Note: modifying the graph structure while iterating over
node predecessors may produce unpredictable results. Use
predecessors() as an alternative.
"""
n = Node(self, n)
nh = n.handle
eh = gv.agfstin(self.handle, nh)
while eh is not None:
(s, t) = Edge(self, eh=eh)
if s == n:
yield Node(self, t)
else:
yield Node(self, s)
eh = gv.agnxtin(self.handle, eh)
raise StopIteration
iterpred = predecessors_iter
def successors_iter(self, n):
"""Return iterator over successor nodes of n.
Note: modifying the graph structure while iterating over
node successors may produce unpredictable results. Use
successors() as an alternative.
"""
n = Node(self, n)
nh = n.handle
eh = gv.agfstout(self.handle, nh)
while eh is not None:
(s, t) = Edge(self, eh=eh)
if s == n:
yield Node(self, t)
else:
yield Node(self, s)
eh = gv.agnxtout(self.handle, eh)
raise StopIteration
itersucc = successors_iter
def successors(self, n):
"""Return list of successor nodes of n."""
return list(self.successors_iter(n))
def predecessors(self, n):
"""Return list of predecessor nodes of n."""
return list(self.predecessors_iter(n))
# digraph definitions
out_neighbors = successors
in_neighbors = predecessors
def degree_iter(self, nbunch=None, indeg=True, outdeg=True):
"""Return an iterator over the degree of the nodes given in
nbunch container.
Returns paris of (node,degree).
"""
# prepare nbunch
if nbunch is None: # include all nodes via iterator
bunch = [n for n in self.nodes_iter()]
elif nbunch in self: # if nbunch is a single node
bunch = [Node(self, nbunch)]
else: # if nbunch is a sequence of nodes
try:
bunch = [Node(self, n) for n in nbunch if n in self]
except TypeError:
raise TypeError("nbunch is not a node or a sequence of nodes.")
for n in bunch:
yield (Node(self, n), gv.agdegree(self.handle,
n.handle, indeg, outdeg))
def in_degree_iter(self, nbunch=None):
"""Return an iterator over the in-degree of the nodes given in
nbunch container.
Returns paris of (node,degree).
"""
return self.degree_iter(nbunch, indeg=True, outdeg=False)
def out_degree_iter(self, nbunch=None):
"""Return an iterator over the out-degree of the nodes given in
nbunch container.
Returns paris of (node,degree).
"""
return self.degree_iter(nbunch, indeg=False, outdeg=True)
iteroutdegree = out_degree_iter
iterindegree = in_degree_iter
def out_degree(self, nbunch=None, with_labels=False):
"""Return the out-degree of nodes given in nbunch container.
Using optional with_labels=True returns a dictionary
keyed by node with value set to the degree.
"""
if with_labels:
return dict(self.out_degree_iter(nbunch))
else:
dlist = list(d for n, d in self.out_degree_iter(nbunch))
if nbunch in self:
return dlist[0]
else:
return dlist
def in_degree(self, nbunch=None, with_labels=False):
"""Return the in-degree of nodes given in nbunch container.
Using optional with_labels=True returns a dictionary
keyed by node with value set to the degree.
"""
if with_labels:
return dict(self.in_degree_iter(nbunch))
else:
dlist = list(d for n, d in self.in_degree_iter(nbunch))
if nbunch in self:
return dlist[0]
else:
return dlist
def reverse(self):
"""Return copy of directed graph with edge directions reversed."""
if self.directed:
# new empty DiGraph
H = self.__class__(strict=self.strict, directed=True, name=self.name)
H.graph_attr.update(self.graph_attr)
H.node_attr.update(self.node_attr)
H.edge_attr.update(self.edge_attr)
for n in self.nodes():
H.add_node(n)
new_n = Node(H, n)
new_n.attr.update(n.attr)
for e in self.edges():
(u, v) = e
H.add_edge(v, u)
uv = H.get_edge(v, u)
uv.attr.update(e.attr)
return H
else:
return self
def degree(self, nbunch=None, with_labels=False):
"""Return the degree of nodes given in nbunch container.
Using optional with_labels=True returns a dictionary
keyed by node with value set to the degree.
"""
if with_labels:
return dict(self.degree_iter(nbunch))
else:
dlist = list(d for n, d in self.degree_iter(nbunch))
if nbunch in self:
return dlist[0]
else:
return dlist
iterdegree = degree_iter
def number_of_edges(self):
"""Return the number of edges in the graph."""
return gv.agnedges(self.handle)
def clear(self):
"""Remove all nodes, edges, and attributes from the graph."""
self.remove_edges_from(self.edges())
self.remove_nodes_from(self.nodes())
# now "close" existing graph and create a new graph
name = gv.agnameof(self.handle)
strict = self.strict
directed = self.directed
gv.agclose(self.handle)
self.handle = gv.agraphnew(name, strict, directed)
self.graph_attr.handle = self.handle
self.node_attr.handle = self.handle
self.edge_attr.handle = self.handle
def close(self):
# may be useful to clean up graphviz data
# this should completely remove all of the existing graphviz data
gv.agclose(self.handle)
def copy(self):
"""Return a copy of the graph."""
from tempfile import TemporaryFile
fh = TemporaryFile()
# Cover TemporaryFile wart: on 'nt' we need the file member
if hasattr(fh, 'file'):
fhandle = fh.file
else:
fhandle = fh
self.write(fhandle)
fh.seek(0)
return self.__class__(filename=fhandle)
def add_path(self, nlist):
"""Add the path of nodes given in nlist."""
fromv = nlist.pop(0)
while len(nlist) > 0:
tov = nlist.pop(0)
self.add_edge(fromv, tov)
fromv = tov
def add_cycle(self, nlist):
"""Add the cycle of nodes given in nlist."""
self.add_path(nlist + [nlist[0]])
def prepare_nbunch(self, nbunch=None):
# private function to build bunch from nbunch
if nbunch is None: # include all nodes via iterator
bunch = self.nodes_iter()
elif nbunch in self: # if nbunch is a single node
bunch = [Node(self, nbunch)]
else: # if nbunch is a sequence of nodes
try: # capture error for nonsequence/iterator entries.
bunch = [Node(self, n) for n in nbunch if n in self]
# bunch=(n for n in nbunch if n in self) # need python 2.4
except TypeError:
raise TypeError("nbunch is not a node or a sequence of nodes.")
return bunch
def add_subgraph(self, nbunch=None, name=None, **attr):
"""Return subgraph induced by nodes in nbunch.
"""
if name is not None:
name = name.encode(self.encoding)
try:
handle = gv.agsubg(self.handle, name, _Action.create)
except TypeError:
raise TypeError("Subgraph name must be a string: %s" % name.decode(self.encoding))
H = self.__class__(strict=self.strict,
directed=self.directed,
handle=handle, name=name,
**attr)
if nbunch is None: return H
# add induced subgraph on nodes in nbunch
bunch = self.prepare_nbunch(nbunch)
for n in bunch:
node = Node(self, n)
nh = gv.agsubnode(handle, node.handle, _Action.create)
for (u, v, k) in self.edges(keys=True):
if u in H and v in H:
edge = Edge(self, u, v, k)
eh = gv.agsubedge(handle, edge.handle, _Action.create)
return H
def remove_subgraph(self, name):
"""Remove subgraph with given name."""
try:
handle = gv.agsubg(self.handle, name.encode(self.encoding),
_Action.find)
except TypeError:
raise TypeError("Subgraph name must be a string: %s" % name)
if handle is None:
raise KeyError("Subgraph %s not in graph." % name)
gv.agdelsubg(self.handle, handle)
delete_subgraph = remove_subgraph
subgraph = add_subgraph
def subgraph_parent(self, nbunch=None, name=None):
"""Return parent graph of subgraph or None if graph is root graph.
"""
handle = gv.agparent(self.handle)
if handle is None:
return None
H = self.__class__(strict=self.strict,
directed=self.directed,
handle=handle,
name=name)
return H
def subgraph_root(self, nbunch=None, name=None):
"""Return root graph of subgraph or None if graph is root graph.
"""
handle = gv.agroot(self.handle)
if handle is None:
return None
H = self.__class__(strict=self.strict,
directed=self.directed,
handle=handle, name=name)
return H
def get_subgraph(self, name):
"""Return existing subgraph with specified name or None if it
doesn't exist.
"""
try:
handle = gv.agsubg(self.handle, name.encode(self.encoding)
, _Action.find)
except TypeError:
raise TypeError("Subgraph name must be a string: %s" % name)
if handle is None:
return None
H = self.__class__(strict=self.strict,
directed=self.directed,
handle=handle)
return H
def subgraphs_iter(self):
"""Iterator over subgraphs."""
handle = gv.agfstsubg(self.handle)
while handle is not None:
yield self.__class__(strict=self.strict,
directed=self.directed,
handle=handle)
handle = gv.agnxtsubg(handle)
raise StopIteration
def subgraphs(self):
"""Return a list of all subgraphs in the graph."""
return list(self.subgraphs_iter())
# directed, undirected tests and conversions
def is_strict(self):
"""Return True if graph is strict or False if not.
Strict graphs do not allow parallel edges or self loops.
"""
if gv.agisstrict(self.handle) == 1:
return True
else:
return False
strict = property(is_strict)
def is_directed(self):
"""Return True if graph is directed or False if not."""
if gv.agisdirected(self.handle) == 1:
return True
else:
return False
directed = property(is_directed)
def is_undirected(self):
"""Return True if graph is undirected or False if not."""
if gv.agisundirected(self.handle) == 1:
return True
else:
return False
def to_undirected(self):
"""Return undirected copy of graph."""
if not self.directed:
return self.copy()
else:
U = AGraph(strict=self.strict)
U.graph_attr.update(self.graph_attr)
U.node_attr.update(self.node_attr)
U.edge_attr.update(self.edge_attr)
for n in self.nodes():
U.add_node(n)
new_n = Node(U, n)
new_n.attr.update(n.attr)
for e in self.edges():
(u, v) = e
U.add_edge(u, v)
uv = U.get_edge(u, v)
uv.attr.update(e.attr)
return U
def to_directed(self, **kwds):
"""Return directed copy of graph.
Each undirected edge u-v is represented as two directed
edges u->v and v->u.
"""
if not self.directed:
D = AGraph(strict=self.strict, directed=True)
D.graph_attr.update(self.graph_attr)
D.node_attr.update(self.node_attr)
D.edge_attr.update(self.edge_attr)
for n in self.nodes():
D.add_node(n)
new_n = Node(D, n)
new_n.attr.update(n.attr)
for e in self.edges():
(u, v) = e
D.add_edge(u, v)
D.add_edge(v, u)
uv = D.get_edge(u, v)
vu = D.get_edge(v, u)
uv.attr.update(e.attr)
uv.attr.update(e.attr)
vu.attr.update(e.attr)
return D
else:
return self.copy()
# io
def read(self, path):
"""Read graph from dot format file on path.
path can be a file name or file handle
use::
G.read('file.dot')
"""
fh = self._get_fh(path)
try:
if self.handle is not None:
gv.agclose(self.handle)
try:
self.handle = gv.agread(fh, None)
except ValueError:
raise DotError
except IOError:
print("IO error reading file")
def write(self, path=None):
"""Write graph in dot format to file on path.
path can be a file name or file handle
use::
G.write('file.dot')
"""
if path is None:
path = sys.stdout
fh = self._get_fh(path, 'w')
try:
gv.agwrite(self.handle, fh)
except IOError:
print("IO error writing file")
finally:
if hasattr(fh, 'close') and not hasattr(path, 'write'):
fh.close()
def string_nop(self):
"""Return a string (unicode) representation of graph in dot format."""
# this will fail for graphviz-2.8 because of a broken nop
# so use tempfile version below
return self.draw(format='dot', prog='nop').decode(self.encoding)
def to_string(self):
"""Return a string (unicode) representation of graph in dot format."""
from tempfile import TemporaryFile
fh = TemporaryFile()
# Cover TemporaryFile wart: on 'nt' we need the file member
if hasattr(fh, 'file'):
self.write(fh.file)
else:
self.write(fh)
fh.seek(0)
data = fh.read()
fh.close()
return data.decode(self.encoding)
def string(self):
"""Return a string (unicode) representation of graph in dot format."""
# return self.to_string()
return self.string_nop()
def from_string(self, string):
"""Load a graph from a string in dot format.
Overwrites any existing graph.
To make a new graph from a string use
>>> s='digraph {1 -> 2}'
>>> A=AGraph()
>>> t=A.from_string(s)
>>> A=AGraph(string=s) # specify s is a string
>>> A=AGraph(s) # s assumed to be a string during initialization
"""
# allow either unicode or encoded string
try:
string = string.decode(self.encoding)
except (UnicodeEncodeError, AttributeError):
pass
from tempfile import TemporaryFile
fh = TemporaryFile()
fh.write(string.encode(self.encoding))
fh.seek(0)
# Cover TemporaryFile wart: on 'nt' we need the file member
if hasattr(fh, 'file'):
self.read(fh.file)
else:
self.read(fh)
fh.close()
return self
def _get_prog(self, prog):
# private: get path of graphviz program
progs = ['neato', 'dot', 'twopi', 'circo', 'fdp', 'nop',
'wc', 'acyclic', 'gvpr', 'gvcolor', 'ccomps', 'sccmap', 'tred',
'sfdp']
if prog not in progs:
raise ValueError("Program %s is not one of: %s." %
(prog, ', '.join(progs)))
try: # user must pick one of the graphviz programs...
runprog = self._which(prog)
except:
raise ValueError("Program %s not found in path." % prog)
return runprog
def _run_prog(self, prog='nop', args=''):
"""Apply graphviz program to graph and return the result as a string.
>>> A = AGraph()
>>> s = A._run_prog() # doctest: +SKIP
>>> s = A._run_prog(prog='acyclic') # doctest: +SKIP
Use keyword args to add additional arguments to graphviz programs.
"""
runprog = r'"%s"' % self._get_prog(prog)
cmd = ' '.join([runprog, args])
dotargs = shlex.split(cmd)
p = subprocess.Popen(dotargs,
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=False)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
# Use threading to avoid blocking
data = []
errors = []
threads = [PipeReader(data, child_stdout),
PipeReader(errors, child_stderr)]
for t in threads:
t.start()
self.write(child_stdin)
child_stdin.close()
for t in threads:
t.join()
if not data:
raise IOError(b"".join(errors).decode(self.encoding))
if len(errors) > 0:
warnings.warn(b"".join(errors).decode(self.encoding), RuntimeWarning)
return b"".join(data)
def layout(self, prog='neato', args=''):
"""Assign positions to nodes in graph.
Optional prog=['neato'|'dot'|'twopi'|'circo'|'fdp'|'nop']
will use specified graphviz layout method.
>>> A=AGraph()
>>> A.layout() # uses neato
>>> A.layout(prog='dot')
Use keyword args to add additional arguments to graphviz programs.
The layout might take a long time on large graphs.
"""
fmt = 'dot'
data = self._run_prog(prog, ' '.join([args, "-T", fmt]))
self.from_string(data)
self.has_layout = True
return
def tred(self, args='', copy=False):
"""Transitive reduction of graph. Modifies existing graph.
To create a new graph use
>>> A=AGraph()
>>> B=A.tred(copy=True) # doctest: +SKIP
See the graphviz "tred" program for details of the algorithm.
"""
data = self._run_prog('tred', args)
if copy:
return self.__class__(string=data.decode(self.encoding))
else:
return self.from_string(data)
def acyclic(self, args='', copy=False):
"""Reverse sufficient edges in digraph to make graph acyclic.
Modifies existing graph.
To create a new graph use
>>> A=AGraph()
>>> B=A.acyclic(copy=True) # doctest: +SKIP
See the graphviz "acyclic" program for details of the algorithm.
"""
data = self._run_prog('acyclic', args)
if copy:
return self.__class__(string=data.decode(self.encoding))
else:
return self.from_string(data)
def draw(self, path=None, format=None, prog=None, args=''):
"""Output graph to path in specified format.
An attempt will be made to guess the output format based on the file
extension of `path`. If that fails, then the `format` parameter will
be used.
Note, if `path` is a file object returned by a call to os.fdopen(),
then the method for discovering the format will not work. In such
cases, one should explicitly set the `format` parameter; otherwise, it
will default to 'dot'.
Formats (not all may be available on every system depending on
how Graphviz was built)
'canon', 'cmap', 'cmapx', 'cmapx_np', 'dia', 'dot',
'fig', 'gd', 'gd2', 'gif', 'hpgl', 'imap', 'imap_np',
'ismap', 'jpe', 'jpeg', 'jpg', 'mif', 'mp', 'pcl', 'pdf',
'pic', 'plain', 'plain-ext', 'png', 'ps', 'ps2', 'svg',
'svgz', 'vml', 'vmlz', 'vrml', 'vtx', 'wbmp', 'xdot', 'xlib'
If prog is not specified and the graph has positions
(see layout()) then no additional graph positioning will
be performed.
Optional prog=['neato'|'dot'|'twopi'|'circo'|'fdp'|'nop']
will use specified graphviz layout method.
>>> G = AGraph()
>>> G.layout()
# use current node positions, output ps in 'file.ps'
>>> G.draw('file.ps')
# use dot to position, output png in 'file'
>>> G.draw('file', format='png',prog='dot')
# use keyword 'args' to pass additional arguments to graphviz
>>> G.draw('test.ps',prog='twopi',args='-Gepsilon=1')
The layout might take a long time on large graphs.
"""
import os
# try to guess format from extension
if format is None and path is not None:
p = path
# in case we got a file handle get its name instead
if not is_string_like(p):
p = path.name
format = os.path.splitext(p)[-1].lower()[1:]
if format is None or format == '':
format = 'dot'
if prog is None:
if self.has_layout:
prog = 'neato'
args += "-n2"
else:
raise AttributeError(
"""Graph has no layout information, see layout() or specify prog=%s.""" %
("|".join(['neato', 'dot', 'twopi', 'circo', 'fdp', 'nop'])))
else:
if self.number_of_nodes() > 1000:
sys.stderr.write(
"Warning: graph has %s nodes...layout may take a long time.\n" %
self.number_of_nodes())
if prog == 'nop': # nop takes no switches
args = ''
else:
args = ' '.join([args, "-T" + format])
data = self._run_prog(prog, args)
if path is not None:
fh = self._get_fh(path, 'w+b')
fh.write(data)
if is_string_like(path):
fh.close()
d = None
else:
d = data
return d
# some private helper functions
def _get_fh(self, path, mode='r'):
""" Return a file handle for given path.
Path can be a string or a file handle.
Attempt to uncompress/compress files ending in '.gz' and '.bz2'.
"""
import os
if is_string_like(path):
if path.endswith('.gz'):
#import gzip
#fh = gzip.open(path,mode=mode) # doesn't return real fh
fh = os.popen("gzcat " + path) # probably not portable
elif path.endswith('.bz2'):
#import bz2
#fh = bz2.BZ2File(path,mode=mode) # doesn't return real fh
fh = os.popen("bzcat " + path) # probably not portable
else:
fh = open(path, mode=mode)
elif hasattr(path, 'write'):
# Note, mode of file handle is unchanged.
fh = path
else:
raise TypeError('path must be a string or file handle.')
return fh
def _which(self, name):
"""Searches for name in exec path and returns full path"""
import os
import glob
paths = os.environ["PATH"]
if os.name == "nt":
exe = ".exe"
else:
exe = ""
for path in paths.split(os.pathsep):
match = glob.glob(os.path.join(path, name + exe))
if match:
return match[0]
raise ValueError("No prog %s in path." % name)
class Node(_TEXT_TYPE):
"""Node object based on unicode.
If G is a graph
>>> G=AGraph()
then
>>> G.add_node(1)
will create a node object labeled by the string "1".
To get the object use
>>> node=Node(G,1)
or
>>> node=G.get_node(1)
The node object is derived from a string and can be manipulated as such.
Each node has attributes that can be directly accessed through
the attr dictionary:
>>> node.attr['color']='red'
"""
def __new__(self, graph, name=None, nh=None):
if nh is not None:
n = super(Node, self).__new__(self, gv.agnameof(nh), graph.encoding)
else:
n = super(Node, self).__new__(self, name)
try:
nh = gv.agnode(graph.handle, n.encode(graph.encoding), _Action.find)
except KeyError:
raise KeyError("Node %s not in graph." % n)
n.ghandle = graph.handle
n.attr = ItemAttribute(nh, 1)
n.handle = nh
n.encoding = graph.encoding
return n
def get_handle(self):
"""Return pointer to graphviz node object."""
return gv.agnode(self.ghandle, self.encode(self.encoding), _Action.find)
# handle=property(get_handle)
def get_name(self):
name = gv.agnameof(self.handle)
if name is not None:
name = name.decode(self.encoding)
return name
name = property(get_name)
class Edge(tuple):
"""Edge object based on tuple.
If G is a graph
>>> G=AGraph()
then
>>> G.add_edge(1,2)
will add the edge 1-2 to the graph.
>>> edge=Edge(G,1,2)
or
>>> edge=G.get_edge(1,2)
will get the edge object.
An optional key can be used
>>> G.add_edge(2,3,'spam')
>>> edge=Edge(G,2,3,'spam')
The edge is represented as a tuple (u,v) or (u,v,key)
and can be manipulated as such.
Each edge has attributes that can be directly accessed through
the attr dictionary:
>>> edge.attr['color']='red'
"""
def __new__(self, graph, source=None, target=None, key=None, eh=None):
# edge handle given, reconstruct node object
if eh is not None:
(source, target) = (gv.agtail(eh), gv.aghead(eh))
s = Node(graph, nh=source)
t = Node(graph, nh=target)
# no edge handle, search for edge and construct object
else:
s = Node(graph, source)
t = Node(graph, target)
if key is not None:
if not is_string_like(key):
key = str(key)
key = key.encode(graph.encoding)
try:
eh = gv.agedge(graph.handle,
s.handle,
t.handle,
key,
_Action.find)
except KeyError:
raise KeyError("Edge %s-%s not in graph." % (source, target))
tp = tuple.__new__(self, (s, t))
tp.ghandle = graph.handle
tp.handle = eh
tp.attr = ItemAttribute(eh, 3)
tp.encoding = graph.encoding
return tp
def get_name(self):
name = gv.agnameof(self.handle)
if name is not None:
name = name.decode(self.encoding)
return name
name = property(get_name)
key = property(get_name)
class Attribute(MutableMapping):
"""Default attributes for graphs.
Assigned on initialization of AGraph class.
and manipulated through the class data.
>>> G=AGraph() # initialize, G.graph_attr, G.node_attr, G.edge_attr
>>> G.graph_attr['splines']='true'
>>> G.node_attr['shape']='circle'
>>> G.edge_attr['color']='red'
See
http://graphviz.org/doc/info/attrs.html
for a list of all attributes.
"""
# use for graph, node, and edge default attributes
# atype:graph=0, node=1,edge=3
def __init__(self, handle, atype):
self.handle = handle
self.type = atype
# get the encoding
ghandle = gv.agraphof(handle)
root_handle = gv.agroot(ghandle) # get root graph
try:
ah = gv.agattr(root_handle, 0, b'charset', None)
self.encoding = gv.agattrdefval(ah)
except KeyError:
self.encoding = _DEFAULT_ENCODING
def __setitem__(self, name, value):
if name == 'charset' and self.type == 0:
raise ValueError('Graph charset is immutable!')
if not is_string_like(value):
value = str(value)
ghandle = gv.agroot(self.handle) # get root graph
if ghandle == self.handle:
gv.agattr_label(self.handle, self.type,
name.encode(self.encoding),
value.encode(self.encoding))
else:
gv.agsafeset_label(ghandle, self.handle,
name.encode(self.encoding),
value.encode(self.encoding), b'')
def __getitem__(self, name):
item = gv.agget(self.handle, name.encode(self.encoding))
if item is None:
ah = gv.agattr(self.handle, self.type,
name.encode(self.encoding),
None)
item = gv.agattrdefval(ah)
return item.decode(self.encoding)
def __delitem__(self, name):
gv.agattr(self.handle, self.type, name.encode(self.encoding), b'')
def __contains__(self, name):
try:
self.__getitem__(name)
return True
except:
return False
def __len__(self):
return len(list(self.__iter__()))
def has_key(self, name):
return self.__contains__(name)
def keys(self):
return list(self.__iter__())
def __iter__(self):
for (k, v) in self.iteritems():
yield k
def iteritems(self):
ah = None
while True:
try:
ah = gv.agnxtattr(self.handle, self.type, ah)
yield (gv.agattrname(ah).decode(self.encoding),
gv.agattrdefval(ah).decode(self.encoding))
except KeyError: # gv.agattrdefval returned KeyError, skip
continue
class ItemAttribute(Attribute):
"""Attributes for individual nodes and edges.
Assigned on initialization of Node or Edge classes
and manipulated through the class data.
>>> G=AGraph()
>>> G.add_edge('a','b')
>>> n=Node(G,'a')
>>> n.attr['shape']='circle'
>>> e=Edge(G,'a','b')
>>> e.attr['color']='red'
See
http://graphviz.org/doc/info/attrs.html
for a list of all attributes.
"""
# use for individual item attributes - either a node or an edge
# graphs and default node and edge attributes use Attribute
def __init__(self, handle, atype):
self.handle = handle
self.type = atype
self.ghandle = gv.agraphof(handle)
# get the encoding
root_handle = gv.agroot(self.ghandle) # get root graph
try:
ah = gv.agattr(root_handle, 0, b'charset', None)
self.encoding = gv.agattrdefval(ah)
except KeyError:
self.encoding = _DEFAULT_ENCODING
def __setitem__(self, name, value):
if not is_string_like(value):
value = str(value)
if self.type == 1 and name == 'label':
default = '\\N'
else:
default = ''
gv.agsafeset_label(self.ghandle, self.handle,
name.encode(self.encoding),
value.encode(self.encoding),
default.encode(self.encoding))
def __getitem__(self, name):
val = gv.agget(self.handle, name.encode(self.encoding))
if val is not None:
val = val.decode(self.encoding)
return val
def __delitem__(self, name):
gv.agset(self.handle, name.encode(self.encoding), b'')
def iteritems(self):
ah = None
while 1:
try:
ah = gv.agnxtattr(self.ghandle, self.type, ah)
value = gv.agxget(self.handle, ah)
try:
defval = gv.agattrdefval(ah) # default value
if defval == value:
continue # don't report default
except: # no default, gv.getattrdefval raised error
pass
# unique value for this edge
yield (gv.agattrname(ah).decode(self.encoding),
value.decode(self.encoding))
except KeyError: # gv.agxget returned KeyError, skip
continue
def _test_suite():
import doctest
suite = doctest.DocFileSuite('tests/graph.txt',
'tests/attributes.txt',
'tests/layout_draw.txt',
'tests/subgraph.txt',
package='pygraphviz')
doctest.testmod() # test docstrings in module
return suite
if __name__ == "__main__":
import os
import sys
import unittest
if sys.version_info[:2] < (2, 4):
print("Python version 2.4 or later required for tests (%d.%d detected)." % sys.version_info[:2])
sys.exit(-1)
# directory of package (relative to this)
nxbase = sys.path[0] + os.sep + os.pardir
sys.path.insert(0, nxbase) # prepend to search path
unittest.TextTestRunner().run(_test_suite())
| mit |
strint/tensorflow | tensorflow/python/training/monitored_session_test.py | 11 | 53058 | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import threading
import time
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.Variable(1, name='my_var')
variables.Variable(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.test_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.Variable([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.Variable([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.Variable([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100) as session:
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock sessionthat aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]))
],
hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3, timeout_in_ms=30000, output_partition_graphs=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]))
],
hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
| apache-2.0 |
hcsturix74/django | django/db/models/sql/compiler.py | 48 | 50737 | import re
import warnings
from itertools import chain
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.six.moves import zip
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
self.subquery = False
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.tables):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Returns a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
if expr.contains_aggregate:
continue
# We can skip References to select clause, as all expressions in
# the select clause are already part of the group by.
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# The logic here is: if the main model's primary key is in the
# query, then set new_expressions to that field. If that happens,
# then also add having expressions to group by.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.tables[0]):
pk = expr
break
if pk:
# MySQLism: Columns in HAVING clause must be added to the GROUP BY.
expressions = [pk] + [expr for expr in expressions if expr in having]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
pks = {expr for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Returns three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
# self.query.select is a special case. These columns never go to
# any model.
for col in self.query.select:
select.append((col, None))
select_idx += 1
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
ret.append((col, self.compile(col, select_format=True), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Returns a list of 2-tuples of form (expr, (sql, params)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for pos, field in enumerate(ordering):
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = True if order == 'DESC' else False
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(
self.query, allow_joins=True, reuse=None)
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
select_sql = [t[1] for t in select]
if self.query.distinct and not self.query.distinct_fields:
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def __call__(self, name):
"""
Backwards-compatibility shim so that calling a SQLCompiler is equivalent to
calling its quote_name_unless_alias method.
"""
warnings.warn(
"Calling a SQLCompiler directly is deprecated. "
"Call compiler.quote_name_unless_alias instead.",
RemovedInDjango110Warning, stacklevel=2)
return self.quote_name_unless_alias(name)
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format and not self.subquery:
return node.output_field.select_format(self, sql, params)
return sql, params
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.subquery = subquery
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
params = []
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented.")
if not order_by:
order_by = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
# If we've been asked for a NOWAIT query but the backend does
# not support it, raise a DatabaseError otherwise we could get
# an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql(subquery=True)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = True if order == 'DESC' else False
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.is_relation and path and opts.ordering and name != field.attname:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_order_by and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or (cur_depth == 1 and f.name in requested):
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
_, _, _, joins, _ = self.query.setup_joins([related_field_name], opts, root_alias)
alias = joins[-1]
from_parent = issubclass(model, opts.model)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested.keys()).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, row, converters):
row = list(row)
for pos, (convs, expression) in converters.items():
value = row[pos]
for converter in convs:
value = converter(value, expression, self.connection, self.query.context)
row[pos] = value
return tuple(row)
def results_iter(self, results=None):
"""
Returns an iterator over the results from executing this query.
"""
converters = None
if results is None:
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
for rows in results:
for row in rows:
if converters:
row = self.apply_converters(row, converters)
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count
)
if not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(
getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True),
connection=self.connection
) for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError("Database is trying to update a relational field "
"of type %s with a value of type %s. Make sure "
"you are setting the correct relations" %
(field.__class__.__name__, val.__class__.__name__))
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.clone(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super(SQLUpdateCompiler, self).pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
# Empty SQL for the inner query is a marker that the inner query
# isn't going to produce any results. This can happen when doing
# LIMIT 0 queries (generated by qs[:0]) for example.
if not self.query.subquery:
raise EmptyResultSet
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=True)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[0:col_count] for r in rows]
finally:
cursor.close()
| bsd-3-clause |
pp-mo/iris | lib/iris/tests/unit/experimental/representation/test_CubeListRepresentation.py | 5 | 2373 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.cube.CubeRepresentation` class."""
from html import escape
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.cube import CubeList
import iris.tests.stock as stock
from iris.experimental.representation import CubeListRepresentation
@tests.skip_data
class Test__instantiation(tests.IrisTest):
def setUp(self):
self.cubes = CubeList([stock.simple_3d()])
self.representer = CubeListRepresentation(self.cubes)
def test_ids(self):
self.assertEqual(id(self.cubes), self.representer.cubelist_id)
@tests.skip_data
class Test_make_content(tests.IrisTest):
def setUp(self):
self.cubes = CubeList([stock.simple_3d(), stock.lat_lon_cube()])
self.cubes[0].rename("name & <html>")
self.representer = CubeListRepresentation(self.cubes)
self.content = self.representer.make_content()
def test_repr_len(self):
self.assertEqual(len(self.cubes), len(self.content))
def test_summary_lines(self):
names = [c.name() for c in self.cubes]
for name, content in zip(names, self.content):
name = escape(name)
self.assertIn(name, content)
def test__cube_name_summary_consistency(self):
# Just check the first cube in the CubeList.
single_cube_html = self.content[0]
# Get a "prettified" cube name, as it should be in the cubelist repr.
cube_name = self.cubes[0].name()
pretty_cube_name = cube_name.strip().replace("_", " ").title()
pretty_escaped_name = escape(pretty_cube_name)
self.assertIn(pretty_escaped_name, single_cube_html)
@tests.skip_data
class Test_repr_html(tests.IrisTest):
def setUp(self):
self.cubes = CubeList([stock.simple_3d(), stock.lat_lon_cube()])
self.representer = CubeListRepresentation(self.cubes)
def test_html_length(self):
html = self.representer.repr_html()
n_html_elems = html.count("<button") # One <button> tag per cube.
self.assertEqual(len(self.cubes), n_html_elems)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
Iotic-Labs/py-application-examples | agent_tutorial_code/getting_started/6.3_advertise_feed_share.py | 1 | 6926 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2016 Iotic Labs Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-application-examples/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# INFO-----------------------------------------------------------------------------------------------------------------
# 6.3_advertise_feed_share.py 12/02/2016 Iotic Labs.
# Creates a new Thing (or connects an existing one).
# Prints information about that Thing (Thing object, lid, guid).
# Adds Thing metadata and tags.
# Advertises a feed.
# Adds feed metadata and tags
# Makes public
# Shares timestamp as test data
# PYTHON2 COMPATIBILITY -----------------------------------------------------------------------------------------------
from __future__ import unicode_literals, print_function # pylint: disable=unused-import
# LOGGING -------------------------------------------------------------------------------------------------------------
# Logging set to only CRITICAL messages by default. To see more, use logging.INFO, or to see loads, logging.DEBUG
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)03d %(levelname)s [%(name)s] {%(threadName)s} %(message)s',
level=logging.CRITICAL)
# IMPORTS -------------------------------------------------------------------------------------------------------------
import time
# IOTIC AGENT IMPORTS -------------------------------------------------------------------------------------------------
from IoticAgent import IOT
# ---------------------------------------------------------------------------------------------------------------------
def create_thing(client):
print("Creating my Thing")
# Create your Thing in this script
# Note:
# Calling 'create_thing' will create a new Thing, unless the Thing local ID (lid) is already in use.
# If it is, it will connect your script to that Thing.
my_thing = client.create_thing('My_Feed_Advertiser') # GIVE IT A NAME
# Let's have a look at it
# print some information about your Thing
print("About my Thing")
print("My Thing object:", my_thing)
print("My Thing local ID (lid):", my_thing.lid)
print("My Thing globally unique ID (guid):", my_thing.guid)
return my_thing
def set_public(my_thing):
print ("Setting to public")
# Make Thing public
my_thing.set_public(True) # to make it private, change to False
def create_feed(my_thing):
print("Creating a Feed")
# Create a Feed
my_feed = my_thing.create_feed('My_New_Feed')
# Let's have a look at it
# Print some information about the Feed you created
print("About my Feed")
print("My Feed object:", my_feed)
print("My Feed local point ID (pid):", my_feed.pid)
print("My Feed global point ID (guid):", my_feed.guid)
print("Check this is a feed or control:", my_feed.foc)
return my_feed
def add_thing_metadata(my_thing):
print ("Adding Thing metadata")
# Add thing metadata
# get the metadata
with my_thing.get_meta() as my_metadata:
# create a label
print("Setting default language label")
my_metadata.set_label('My Feed Advertiser') # uses default language
# create a description
print("Setting default language description")
my_metadata.set_description('This Thing advertises a Feed and shares a timestamp.') # uses default language
print("Setting location")
# create a location
latitude = 51.5136
longitude = -0.0981
my_metadata.set_location(latitude, longitude)
# print what you just set
# get and print the labels
my_labels = my_metadata.get_labels()
print("Returned labels:")
print(my_labels)
# get and print the descriptions
my_descriptions = my_metadata.get_descriptions()
print("Returned descriptions:")
print(my_descriptions)
# get the location
my_lat, my_lon = my_metadata.get_location()
print("Returned location lat = %f & lon = %f" % (my_lat, my_lon))
def add_thing_tags(my_thing):
print("Adding Thing tags")
# If language (lang) is not set, it will add the tags using the default
tags = ['HelloWorld', 'Advertiser', 'Timestamp']
# Requires a list of strings without spaces
# create tags (returns nothing)
print("Creating default language Thing tags")
my_thing.create_tag(tags)
# list those tags
print("List of Tags for this Thing:")
tag_list = my_thing.list_tag()
print(tag_list)
def add_feed_metadata(my_feed):
print("Adding Feed metadata")
# Add Feed metadata
# get the metadata
with my_feed.get_meta() as my_metadata:
# create a label
print("Setting default language Feed label")
my_metadata.set_label('My New Feed') # uses default language
# create a description
print("Setting default language Feed label")
my_metadata.set_description('"Shares timestamp as test feed"') # uses default language
# print what you just set
# get and print the labels
my_labels = my_metadata.get_labels()
print("Returned labels:")
print(my_labels)
# get and print the descriptions
my_descriptions = my_metadata.get_descriptions()
print("Returned descriptions:")
print(my_descriptions)
def share_data(my_feed):
print("Sharing Values")
current_values = {}
current_values["timestamp"] = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
my_feed.share(current_values)
# MAIN -------------------------------------------------------------------------------------------------------------
def main():
with IOT.Client('Feed_Advertise.ini') as client:
my_thing = create_thing(client)
my_feed = create_feed(my_thing)
set_public(my_thing)
add_thing_metadata(my_thing)
add_thing_tags(my_thing)
add_feed_metadata(my_feed)
while True:
try:
print("Main running, press ctrl+c to quit.")
share_data(my_feed)
time.sleep(10)
except KeyboardInterrupt:
break
# RUN --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
# END --------------------------------------------------------------------------------------------------
| apache-2.0 |
ikalnytskyi/sphinxcontrib-openapi | sphinxcontrib/openapi/schema_utils.py | 1 | 4446 | """OpenAPI schema utility functions."""
from io import StringIO
_DEFAULT_EXAMPLES = {
"string": "string",
"integer": 1,
"number": 1.0,
"boolean": True,
"array": [],
}
_DEFAULT_STRING_EXAMPLES = {
"date": "2020-01-01",
"date-time": "2020-01-01T01:01:01Z",
"password": "********",
"byte": "QG1pY2hhZWxncmFoYW1ldmFucw==",
"ipv4": "127.0.0.1",
"ipv6": "::1",
}
def example_from_schema(schema):
"""
Generates an example request/response body from the provided schema.
>>> schema = {
... "type": "object",
... "required": ["id", "name"],
... "properties": {
... "id": {
... "type": "integer",
... "format": "int64"
... },
... "name": {
... "type": "string",
... "example": "John Smith"
... },
... "tag": {
... "type": "string"
... }
... }
... }
>>> example = example_from_schema(schema)
>>> assert example == {
... "id": 1,
... "name": "John Smith",
... "tag": "string"
... }
"""
# If an example was provided then we use that
if "example" in schema:
return schema["example"]
elif "oneOf" in schema:
return example_from_schema(schema["oneOf"][0])
elif "anyOf" in schema:
return example_from_schema(schema["anyOf"][0])
elif "allOf" in schema:
# Combine schema examples
example = {}
for sub_schema in schema["allOf"]:
example.update(example_from_schema(sub_schema))
return example
elif "enum" in schema:
return schema["enum"][0]
elif "type" not in schema:
# Any type
return _DEFAULT_EXAMPLES["integer"]
elif schema["type"] == "object" or "properties" in schema:
example = {}
for prop, prop_schema in schema.get("properties", {}).items():
example[prop] = example_from_schema(prop_schema)
return example
elif schema["type"] == "array":
items = schema["items"]
min_length = schema.get("minItems", 0)
max_length = schema.get("maxItems", max(min_length, 2))
assert min_length <= max_length
# Try generate at least 2 example array items
gen_length = min(2, max_length) if min_length <= 2 else min_length
example_items = []
if items == {}:
# Any-type arrays
example_items.extend(_DEFAULT_EXAMPLES.values())
elif isinstance(items, dict) and "oneOf" in items:
# Mixed-type arrays
example_items.append(_DEFAULT_EXAMPLES[sorted(items["oneOf"])[0]])
else:
example_items.append(example_from_schema(items))
# Generate array containing example_items and satisfying min_length and max_length
return [example_items[i % len(example_items)] for i in range(gen_length)]
elif schema["type"] == "string":
example_string = _DEFAULT_STRING_EXAMPLES.get(
schema.get("format", None), _DEFAULT_EXAMPLES["string"]
)
min_length = schema.get("minLength", 0)
max_length = schema.get("maxLength", max(min_length, len(example_string)))
gen_length = (
min(len(example_string), max_length)
if min_length <= len(example_string)
else min_length
)
assert 0 <= min_length <= max_length
if min_length <= len(example_string) <= max_length:
return example_string
else:
example_builder = StringIO()
for i in range(gen_length):
example_builder.write(example_string[i % len(example_string)])
example_builder.seek(0)
return example_builder.read()
elif schema["type"] in ("integer", "number"):
example = _DEFAULT_EXAMPLES[schema["type"]]
if "minimum" in schema and "maximum" in schema:
# Take average
example = schema["minimum"] + (schema["maximum"] - schema["minimum"]) / 2
elif "minimum" in schema and example <= schema["minimum"]:
example = schema["minimum"] + 1
elif "maximum" in schema and example >= schema["maximum"]:
example = schema["maximum"] - 1
return float(example) if schema["type"] == "number" else int(example)
else:
return _DEFAULT_EXAMPLES[schema["type"]]
| bsd-2-clause |
richardfergie/googleads-python-lib | googleads/errors.py | 4 | 3865 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors used by the Google Ads Client Library."""
class GoogleAdsError(Exception):
"""Parent class of all errors raised by this library."""
pass
class GoogleAdsValueError(GoogleAdsError):
"""Error indicating that the user input for a function was invalid."""
pass
class AdWordsReportError(GoogleAdsError):
"""Error indicating that an AdWords report download request failed.
Attributes:
code: The HTTP status code with which the report failed.
error: The urllib2.HTTPError (Python 2) or urllib.error.HTTPError
(Python 3) describing the failure.
content: The actual HTTP response content. This could be something like a
404 page or an XML error message from the AdWords report service.
"""
def __init__(self, code, error, content, message=None):
"""Initializes an AdWordsReportError.
Args:
code: The HTTP status code number that was returned.
error: The urllib2.HTTPError (Python 2) or urllib.error.HTTPError
(Python 3) describing the failure.
content: The HTTP response body as a string.
[optional]
message: A user-friendly error message string. If one is not provided, a
default message will be used.
"""
super(AdWordsReportError, self).__init__(
message if message else ('AdWords report download failed with HTTP '
'status code: %s' % code))
self.code = code
self.error = error
self.content = content
class AdWordsReportBadRequestError(AdWordsReportError):
"""Error indicating a bad request was made to the AdWords report service.
Attributes:
type: A string identifying what type of error this is.
trigger: A string containing the value from your request that caused the
problem.
field_path: A string showing where, in the report's fields, the trigger can
be found.
"""
def __init__(self, type_, trigger, field_path, code, error, content):
"""Initializes an AdWordsReportError.
Args:
type_: A string identifying what type of error this is.
trigger: A string containing the value from your request that caused the
problem.
field_path: A string showing where, in the report's fields, the trigger
can be found.
code: The HTTP status code number that was returned.
error: The urllib2.HTTPError (Python 2) or urllib.error.HTTPError
(Python 3) describing the failure.
content: The HTTP response body as a string.
"""
super(AdWordsReportBadRequestError, self).__init__(
code, error, content, 'Type: %s\nTrigger: %s\nField Path: %s' %
(type_, trigger, field_path))
self.type = type_
self.trigger = trigger
self.field_path = field_path
class DfpReportError(GoogleAdsError):
"""Error indicating that a DFP report download request failed.
Attributes:
report_job_id: The ID of the report job which failed.
"""
def __init__(self, report_job_id):
"""Initializes a DfpReportError.
Args:
report_job_id: The ID of the report job which failed.
"""
super(DfpReportError, self).__init__(
'DFP report job failed. The ID of the failed report is: %s'
% report_job_id)
self.report_job_id = report_job_id
| apache-2.0 |
fldc/CouchPotatoServer | libs/rsa/parallel.py | 196 | 2212 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions for parallel computation on multiple cores.
Introduced in Python-RSA 3.1.
.. note::
Requires Python 2.6 or newer.
'''
from __future__ import print_function
import multiprocessing as mp
import rsa.prime
import rsa.randnum
def _find_prime(nbits, pipe):
while True:
integer = rsa.randnum.read_random_int(nbits)
# Make sure it's odd
integer |= 1
# Test for primeness
if rsa.prime.is_prime(integer):
pipe.send(integer)
return
def getprime(nbits, poolsize):
'''Returns a prime number that can be stored in 'nbits' bits.
Works in multiple threads at the same time.
>>> p = getprime(128, 3)
>>> rsa.prime.is_prime(p-1)
False
>>> rsa.prime.is_prime(p)
True
>>> rsa.prime.is_prime(p+1)
False
>>> from rsa import common
>>> common.bit_size(p) == 128
True
'''
(pipe_recv, pipe_send) = mp.Pipe(duplex=False)
# Create processes
procs = [mp.Process(target=_find_prime, args=(nbits, pipe_send))
for _ in range(poolsize)]
[p.start() for p in procs]
result = pipe_recv.recv()
[p.terminate() for p in procs]
return result
__all__ = ['getprime']
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(100):
(failures, tests) = doctest.testmod()
if failures:
break
if count and count % 10 == 0:
print('%i times' % count)
print('Doctests done')
| gpl-3.0 |
stefanv/aandete | app/lib/pygments/lexers/erlang.py | 25 | 18936 | # -*- coding: utf-8 -*-
"""
pygments.lexers.erlang
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Erlang.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \
include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer',
'ElixirLexer']
line_re = re.compile('.*?\n')
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
.. versionadded:: 0.9
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
mimetypes = ['text/x-erlang']
keywords = (
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
)
builtins = ( # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
)
operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
word_operators = (
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
)
atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_]\w*)'
esc_char_re = r'[bdefnrstv\'"\\]'
esc_octal_re = r'[0-7][0-7]?[0-7]?'
esc_hex_re = r'(?:x[0-9a-fA-F]{2}|x\{[0-9a-fA-F]+\})'
esc_ctrl_re = r'\^[a-zA-Z]'
escape_re = r'(?:\\(?:'+esc_char_re+r'|'+esc_octal_re+r'|'+esc_hex_re+r'|'+esc_ctrl_re+r'))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
(words(keywords, suffix=r'\b'), Keyword),
(words(builtins, suffix=r'\b'), Name.Builtin),
(words(word_operators, suffix=r'\b'), Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[]\[:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
# Erlang script shebang
(r'\A#!.+\n', Comment.Hashbang),
# EEP 43: Maps
# http://www.erlang.org/eeps/eep-0043.html
(r'#\{', Punctuation, 'map_key'),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+BPWXb-ginpswx]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
'map_key': [
include('root'),
(r'=>', Punctuation, 'map_val'),
(r':=', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
.. versionadded:: 1.1
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
def gen_elixir_string_rules(name, symbol, token):
states = {}
states['string_' + name] = [
(r'[^#%s\\]+' % (symbol,), token),
include('escapes'),
(r'\\.', token),
(r'(%s)' % (symbol,), bygroups(token), "#pop"),
include('interpol')
]
return states
def gen_elixir_sigstr_rules(term, token, interpol=True):
if interpol:
return [
(r'[^#%s\\]+' % (term,), token),
include('escapes'),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
include('interpol')
]
else:
return [
(r'[^%s\\]+' % (term,), token),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
]
class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
.. versionadded:: 1.5
"""
name = 'Elixir'
aliases = ['elixir', 'ex', 'exs']
filenames = ['*.ex', '*.exs']
mimetypes = ['text/x-elixir']
KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')
KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')
BUILTIN = (
'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',
'quote', 'unquote', 'unquote_splicing', 'throw', 'super',
)
BUILTIN_DECLARATION = (
'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',
'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback',
)
BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')
CONSTANT = ('nil', 'true', 'false')
PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__')
OPERATORS3 = (
'<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==',
'~>>', '<~>', '|~>', '<|>',
)
OPERATORS2 = (
'==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~',
'->', '<-', '|', '.', '=', '~>', '<~',
)
OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')
PUNCTUATION = (
'\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']',
)
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.KEYWORD:
yield index, Keyword, value
elif value in self.KEYWORD_OPERATOR:
yield index, Operator.Word, value
elif value in self.BUILTIN:
yield index, Keyword, value
elif value in self.BUILTIN_DECLARATION:
yield index, Keyword.Declaration, value
elif value in self.BUILTIN_NAMESPACE:
yield index, Keyword.Namespace, value
elif value in self.CONSTANT:
yield index, Name.Constant, value
elif value in self.PSEUDO_VAR:
yield index, Name.Builtin.Pseudo, value
else:
yield index, token, value
else:
yield index, token, value
def gen_elixir_sigil_rules():
# all valid sigil terminators (excluding heredocs)
terminators = [
(r'\{', r'\}', 'cb'),
(r'\[', r'\]', 'sb'),
(r'\(', r'\)', 'pa'),
(r'<', r'>', 'ab'),
(r'/', r'/', 'slas'),
(r'\|', r'\|', 'pipe'),
('"', '"', 'quot'),
("'", "'", 'apos'),
]
# heredocs have slightly different rules
triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')]
token = String.Other
states = {'sigils': []}
for term, name in triquotes:
states['sigils'] += [
(r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-intp')),
(r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-no-intp')),
]
states[name + '-end'] = [
(r'[a-zA-Z]+', token, '#pop'),
default('#pop'),
]
states[name + '-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_interpol'),
]
states[name + '-no-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_no_interpol'),
]
for lterm, rterm, name in terminators:
states['sigils'] += [
(r'~[a-z]' + lterm, token, name + '-intp'),
(r'~[A-Z]' + lterm, token, name + '-no-intp'),
]
states[name + '-intp'] = gen_elixir_sigstr_rules(rterm, token)
states[name + '-no-intp'] = \
gen_elixir_sigstr_rules(rterm, token, interpol=False)
return states
op3_re = "|".join(re.escape(s) for s in OPERATORS3)
op2_re = "|".join(re.escape(s) for s in OPERATORS2)
op1_re = "|".join(re.escape(s) for s in OPERATORS1)
ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re)
punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION)
alnum = '\w'
name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum
modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum}
complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re)
special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})'
long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})'
hex_char_re = r'(\\x[\da-fA-F]{1,2})'
escape_char_re = r'(\\[abdefnrstv])'
tokens = {
'root': [
(r'\s+', Text),
(r'#.*$', Comment.Single),
# Various kinds of characters
(r'(\?)' + long_hex_char_re,
bygroups(String.Char,
String.Escape, Number.Hex, String.Escape)),
(r'(\?)' + hex_char_re,
bygroups(String.Char, String.Escape)),
(r'(\?)' + escape_char_re,
bygroups(String.Char, String.Escape)),
(r'\?\\?.', String.Char),
# '::' has to go before atoms
(r':::', String.Symbol),
(r'::', Operator),
# atoms
(r':' + special_atom_re, String.Symbol),
(r':' + complex_name_re, String.Symbol),
(r':"', String.Symbol, 'string_double_atom'),
(r":'", String.Symbol, 'string_single_atom'),
# [keywords: ...]
(r'(%s|%s)(:)(?=\s|\n)' % (special_atom_re, complex_name_re),
bygroups(String.Symbol, Punctuation)),
# @attributes
(r'@' + name_re, Name.Attribute),
# identifiers
(name_re, Name),
(r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)),
# operators and punctuation
(op3_re, Operator),
(op2_re, Operator),
(punctuation_re, Punctuation),
(r'&\d', Name.Entity), # anon func arguments
(op1_re, Operator),
# numbers
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[\da-fA-F]+', Number.Hex),
(r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float),
(r'\d(_?\d)*', Number.Integer),
# strings and heredocs
(r'"""\s*', String.Heredoc, 'heredoc_double'),
(r"'''\s*$", String.Heredoc, 'heredoc_single'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
include('sigils'),
(r'%\{', Punctuation, 'map_key'),
(r'\{', Punctuation, 'tuple'),
],
'heredoc_double': [
(r'^\s*"""', String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_single': [
(r"^\s*'''", String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_interpol': [
(r'[^#\\\n]+', String.Heredoc),
include('escapes'),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
include('interpol'),
],
'heredoc_no_interpol': [
(r'[^\\\n]+', String.Heredoc),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
],
'escapes': [
(long_hex_char_re,
bygroups(String.Escape, Number.Hex, String.Escape)),
(hex_char_re, String.Escape),
(escape_char_re, String.Escape),
],
'interpol': [
(r'#\{', String.Interpol, 'interpol_string'),
],
'interpol_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'map_key': [
include('root'),
(r':', Punctuation, 'map_val'),
(r'=>', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
'tuple': [
include('root'),
(r'\}', Punctuation, '#pop'),
],
}
tokens.update(gen_elixir_string_rules('double', '"', String.Double))
tokens.update(gen_elixir_string_rules('single', "'", String.Single))
tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol))
tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol))
tokens.update(gen_elixir_sigil_rules())
class ElixirConsoleLexer(Lexer):
"""
For Elixir interactive console (iex) output like:
.. sourcecode:: iex
iex> [head | tail] = [1,2,3]
[1,2,3]
iex> head
1
iex> tail
[2,3]
iex> [head | tail]
[1,2,3]
iex> length [head | tail]
3
.. versionadded:: 1.5
"""
name = 'Elixir iex session'
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
_prompt_re = re.compile('(iex|\.{3})(\(\d+\))?> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
curcode = ''
in_error = False
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'** '):
in_error = True
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
curcode += line[-1:]
else:
m = self._prompt_re.match(line)
if m is not None:
in_error = False
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
token = Generic.Error if in_error else Generic.Output
yield match.start(), token, line
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item
| bsd-3-clause |
sonaht/ansible | lib/ansible/modules/network/interface/net_lldp_interface.py | 9 | 2296 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: net_lldp_interface
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage LLDP interfaces configuration on network devices
description:
- This module provides declarative management of LLDP interfaces
configuration on network devices.
options:
name:
description:
- Name of the interface LLDP should be configured on.
aggregate:
description: List of interfaces LLDP should be configured on.
purge:
description:
- Purge interfaces not defined in the aggregate parameter.
default: no
state:
description:
- State of the LLDP configuration.
default: present
choices: ['present', 'absent', 'enabled', 'disabled']
"""
EXAMPLES = """
- name: Configure LLDP on specific interfaces
net_lldp_interface:
name: eth1
state: present
- name: Disable LLDP on specific interfaces
net_lldp_interface:
name: eth1
state: disabled
- name: Enable LLDP on specific interfaces
net_lldp_interface:
name: eth1
state: enabled
- name: Delete LLDP on specific interfaces
net_lldp_interface:
name: eth1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set service lldp eth1 disable
"""
| gpl-3.0 |
caosmo/pip | pip/_vendor/requests/packages/chardet/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| mit |
SomethingExplosive/android_external_chromium_org | tools/android/findbugs_plugin/test/run_findbugs_plugin_tests.py | 109 | 1401 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is used to test the findbugs plugin, it calls
# build/android/pylib/utils/findbugs.py to analyze the classes in
# org.chromium.tools.findbugs.plugin package, and expects to get the same
# issue with those in expected_result.txt.
#
# Useful command line:
# --rebaseline to generate the expected_result.txt, please make sure don't
# remove the expected result of exsting tests.
import optparse
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..',
'build', 'android')))
from pylib import constants
from pylib.utils import findbugs
def main(argv):
parser = findbugs.GetCommonParser()
options, _ = parser.parse_args()
if not options.known_bugs:
options.known_bugs = os.path.join(constants.DIR_SOURCE_ROOT, 'tools',
'android', 'findbugs_plugin', 'test',
'expected_result.txt')
if not options.only_analyze:
options.only_analyze = 'org.chromium.tools.findbugs.plugin.*'
return findbugs.Run(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
TalShafir/ansible | test/units/parsing/test_unquote.py | 298 | 1602 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.quoting import unquote
import pytest
UNQUOTE_DATA = (
(u'1', u'1'),
(u'\'1\'', u'1'),
(u'"1"', u'1'),
(u'"1 \'2\'"', u'1 \'2\''),
(u'\'1 "2"\'', u'1 "2"'),
(u'\'1 \'2\'\'', u'1 \'2\''),
(u'"1\\"', u'"1\\"'),
(u'\'1\\\'', u'\'1\\\''),
(u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
(u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
(u'"', u'"'),
(u'\'', u'\''),
# Not entirely sure these are good but they match the current
# behaviour
(u'"1""2"', u'1""2'),
(u'\'1\'\'2\'', u'1\'\'2'),
(u'"1" 2 "3"', u'1" 2 "3'),
(u'"1"\'2\'"3"', u'1"\'2\'"3'),
)
@pytest.mark.parametrize("quoted, expected", UNQUOTE_DATA)
def test_unquote(quoted, expected):
assert unquote(quoted) == expected
| gpl-3.0 |
tangentlabs/django-fancypages | fancypages/utils/decorators.py | 2 | 2379 | import urlparse
from functools import wraps
from django.conf import settings
from django.contrib import messages
from django.shortcuts import render
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.views import redirect_to_login
def staff_member_required(view_func, login_url=None):
"""
Ensure that the user is a logged-in staff member.
* If not authenticated, redirect to a specified login URL.
* If not staff, show a 403 page
This decorator is based on the decorator with the same name from
django.contrib.admin.view.decorators. This one is superior as it allows a
redirect URL to be specified.
"""
if login_url is None:
login_url = getattr(settings, 'LOGIN_URL')
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
return view_func(request, *args, **kwargs)
# If user is not logged in, redirect to login page
if not request.user.is_authenticated():
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
path = request.build_absolute_uri()
login_scheme, login_netloc = urlparse.urlparse(login_url)[:2]
current_scheme, current_netloc = urlparse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
messages.warning(request, _("You must log in to access this page"))
return redirect_to_login(path, login_url, REDIRECT_FIELD_NAME)
else:
# User does not have permission to view this page
raise PermissionDenied
return _checklogin
def login_forbidden(view_func, template_name='login_forbidden.html',
status=403):
"""
Only allow anonymous users to access this view.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if not request.user.is_authenticated():
return view_func(request, *args, **kwargs)
return render(request, template_name, status=status)
return _checklogin
| bsd-3-clause |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/iprg_member_broker.py | 16 | 107641 | from ..broker import Broker
class IprgMemberBroker(Broker):
controller = "iprg_members"
def show(self, **kwargs):
"""Shows the details for the specified iprg member.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg member methods. The listed methods will be called on each iprg member returned and included in the output. Available methods are: active_interface, active_router, standby_router, standby_interface, data_source, device, interface, iprg, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_interface, active_router, standby_router, standby_interface, data_source, device, interface, iprg.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprg_member: The iprg member identified by the specified IprgMemberID.
:rtype iprg_member: IprgMember
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available iprg members. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier of the device configured with this HSRP/VRRP membership.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier of the device configured with this HSRP/VRRP membership.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier of the interface configured with this HSRP/VRRP membership.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier of the interface configured with this HSRP/VRRP membership.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for the HSRP/VRRP group to which this membership pertains.
:type IprgID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for the HSRP/VRRP group to which this membership pertains.
:type IprgID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprg members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg member methods. The listed methods will be called on each iprg member returned and included in the output. Available methods are: active_interface, active_router, standby_router, standby_interface, data_source, device, interface, iprg, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_interface, active_router, standby_router, standby_interface, data_source, device, interface, iprg.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgMemberID
:param sort: The data field(s) to use for sorting the output. Default is IprgMemberID. Valid values are IprgMemberID, IprgMemberStartTime, IprgMemberEndTime, IprgMemberTimestamp, IprgMemberChangedCols, IprgID, InterfaceID, DeviceID, DataSourceID, IprgMemberPriority, IprgMemberPreemptInd, IprgMemberPreemptDelay, IprgMemberUseConfiguredTimersInd, IprgMemberConfiguredHelloTime, IprgMemberConfiguredHoldTime, IprgMemberLearnedHelloTime, IprgMemberLearnedHoldTime, IprgMemberVirtualIPDotted, IprgMemberVirtualIPNumeric, IprgMemberUseConfigVirtualIPInd, IprgMemberActiveIPDotted, IprgMemberActiveIPNumeric, IprgMemberActiveDeviceID, IprgMemberActiveInterfaceID, IprgMemberStandbyIPDotted, IprgMemberStandbyIPNumeric, IprgMemberStandbyDeviceID, IprgMemberStandbyInterfaceID, IprgMemberState.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IprgMember. Valid values are IprgMemberID, IprgMemberStartTime, IprgMemberEndTime, IprgMemberTimestamp, IprgMemberChangedCols, IprgID, InterfaceID, DeviceID, DataSourceID, IprgMemberPriority, IprgMemberPreemptInd, IprgMemberPreemptDelay, IprgMemberUseConfiguredTimersInd, IprgMemberConfiguredHelloTime, IprgMemberConfiguredHoldTime, IprgMemberLearnedHelloTime, IprgMemberLearnedHoldTime, IprgMemberVirtualIPDotted, IprgMemberVirtualIPNumeric, IprgMemberUseConfigVirtualIPInd, IprgMemberActiveIPDotted, IprgMemberActiveIPNumeric, IprgMemberActiveDeviceID, IprgMemberActiveInterfaceID, IprgMemberStandbyIPDotted, IprgMemberStandbyIPNumeric, IprgMemberStandbyDeviceID, IprgMemberStandbyInterfaceID, IprgMemberState. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprg_members: An array of the IprgMember objects that match the specified input criteria.
:rtype iprg_members: Array of IprgMember
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available iprg members matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier of the device configured with this HSRP/VRRP membership.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier of the device configured with this HSRP/VRRP membership.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier of the interface configured with this HSRP/VRRP membership.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier of the interface configured with this HSRP/VRRP membership.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for the HSRP/VRRP group to which this membership pertains.
:type IprgID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for the HSRP/VRRP group to which this membership pertains.
:type IprgID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberActiveDeviceID: The internal NetMRI identifier of the device that this router believes is the current active/master router.
:type IprgMemberActiveDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberActiveDeviceID: The internal NetMRI identifier of the device that this router believes is the current active/master router.
:type IprgMemberActiveDeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberActiveIPDotted: The IP address for the active or master router, according to this device, in dotted (or colon-delimited for IPv6) format.
:type IprgMemberActiveIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberActiveIPDotted: The IP address for the active or master router, according to this device, in dotted (or colon-delimited for IPv6) format.
:type IprgMemberActiveIPDotted: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberActiveIPNumeric: The numerical IP address for the active or master router, according to this device.
:type IprgMemberActiveIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberActiveIPNumeric: The numerical IP address for the active or master router, according to this device.
:type IprgMemberActiveIPNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberActiveInterfaceID: The internal NetMRI identifier of the interface corresponding to the active/master IP address, if available.
:type IprgMemberActiveInterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberActiveInterfaceID: The internal NetMRI identifier of the interface corresponding to the active/master IP address, if available.
:type IprgMemberActiveInterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type IprgMemberChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type IprgMemberChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberConfiguredHelloTime: The configured hello or advertise interval.
:type IprgMemberConfiguredHelloTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberConfiguredHelloTime: The configured hello or advertise interval.
:type IprgMemberConfiguredHelloTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberConfiguredHoldTime: The configured hold time or master down interval.
:type IprgMemberConfiguredHoldTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberConfiguredHoldTime: The configured hold time or master down interval.
:type IprgMemberConfiguredHoldTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type IprgMemberEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type IprgMemberEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberLearnedHelloTime: The learned hello or advertise interval.
:type IprgMemberLearnedHelloTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberLearnedHelloTime: The learned hello or advertise interval.
:type IprgMemberLearnedHelloTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberLearnedHoldTime: The learned hold time or master down interval.
:type IprgMemberLearnedHoldTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberLearnedHoldTime: The learned hold time or master down interval.
:type IprgMemberLearnedHoldTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberPreemptDelay: The number of seconds after booting that preemption is enabled. This prevents a router with an unpopulated routing table from preempting until the routing table is ready.
:type IprgMemberPreemptDelay: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberPreemptDelay: The number of seconds after booting that preemption is enabled. This prevents a router with an unpopulated routing table from preempting until the routing table is ready.
:type IprgMemberPreemptDelay: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberPreemptInd: A flag indicating whether this router is configured to preempt lower priority routers.
:type IprgMemberPreemptInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberPreemptInd: A flag indicating whether this router is configured to preempt lower priority routers.
:type IprgMemberPreemptInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberPriority: The router's priority in this HSRP or VRRP.
:type IprgMemberPriority: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberPriority: The router's priority in this HSRP or VRRP.
:type IprgMemberPriority: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberStandbyDeviceID: The internal NetMRI identifier of the device that this router believes is the current standby/backup router.
:type IprgMemberStandbyDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberStandbyDeviceID: The internal NetMRI identifier of the device that this router believes is the current standby/backup router.
:type IprgMemberStandbyDeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberStandbyIPDotted: The IP address for the standby or backup router, according to this device, in dotted (or colon-delimited for IPv6) format.
:type IprgMemberStandbyIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberStandbyIPDotted: The IP address for the standby or backup router, according to this device, in dotted (or colon-delimited for IPv6) format.
:type IprgMemberStandbyIPDotted: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberStandbyIPNumeric: The numerical IP address for the standby or backup router, according to this device.
:type IprgMemberStandbyIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberStandbyIPNumeric: The numerical IP address for the standby or backup router, according to this device.
:type IprgMemberStandbyIPNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberStandbyInterfaceID: The internal NetMRI identifier of the interface corresponding to the standby/backup IP address, if available.
:type IprgMemberStandbyInterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberStandbyInterfaceID: The internal NetMRI identifier of the interface corresponding to the standby/backup IP address, if available.
:type IprgMemberStandbyInterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberStartTime: The starting effective time of this revision of the record.
:type IprgMemberStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberStartTime: The starting effective time of this revision of the record.
:type IprgMemberStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberState: The current HSRP or VRRP state of this router for this HSRP/VRRP group.
:type IprgMemberState: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberState: The current HSRP or VRRP state of this router for this HSRP/VRRP group.
:type IprgMemberState: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberTimestamp: The date and time this record was collected or calculated.
:type IprgMemberTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberTimestamp: The date and time this record was collected or calculated.
:type IprgMemberTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberUseConfigVirtualIPInd: A flag indicating whether to use the configured or learned virtual IP value.
:type IprgMemberUseConfigVirtualIPInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberUseConfigVirtualIPInd: A flag indicating whether to use the configured or learned virtual IP value.
:type IprgMemberUseConfigVirtualIPInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberUseConfiguredTimersInd: A flag indicating whether to use the configured or learned timer values.
:type IprgMemberUseConfiguredTimersInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberUseConfiguredTimersInd: A flag indicating whether to use the configured or learned timer values.
:type IprgMemberUseConfiguredTimersInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberVirtualIPDotted: The virtual IP address for the associated HSRP/VRRP group, in dotted (or colon-delimited for IPv6) format.
:type IprgMemberVirtualIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberVirtualIPDotted: The virtual IP address for the associated HSRP/VRRP group, in dotted (or colon-delimited for IPv6) format.
:type IprgMemberVirtualIPDotted: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberVirtualIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgMemberVirtualIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberVirtualIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgMemberVirtualIPNumeric: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprg members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg member methods. The listed methods will be called on each iprg member returned and included in the output. Available methods are: active_interface, active_router, standby_router, standby_interface, data_source, device, interface, iprg, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_interface, active_router, standby_router, standby_interface, data_source, device, interface, iprg.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgMemberID
:param sort: The data field(s) to use for sorting the output. Default is IprgMemberID. Valid values are IprgMemberID, IprgMemberStartTime, IprgMemberEndTime, IprgMemberTimestamp, IprgMemberChangedCols, IprgID, InterfaceID, DeviceID, DataSourceID, IprgMemberPriority, IprgMemberPreemptInd, IprgMemberPreemptDelay, IprgMemberUseConfiguredTimersInd, IprgMemberConfiguredHelloTime, IprgMemberConfiguredHoldTime, IprgMemberLearnedHelloTime, IprgMemberLearnedHoldTime, IprgMemberVirtualIPDotted, IprgMemberVirtualIPNumeric, IprgMemberUseConfigVirtualIPInd, IprgMemberActiveIPDotted, IprgMemberActiveIPNumeric, IprgMemberActiveDeviceID, IprgMemberActiveInterfaceID, IprgMemberStandbyIPDotted, IprgMemberStandbyIPNumeric, IprgMemberStandbyDeviceID, IprgMemberStandbyInterfaceID, IprgMemberState.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IprgMember. Valid values are IprgMemberID, IprgMemberStartTime, IprgMemberEndTime, IprgMemberTimestamp, IprgMemberChangedCols, IprgID, InterfaceID, DeviceID, DataSourceID, IprgMemberPriority, IprgMemberPreemptInd, IprgMemberPreemptDelay, IprgMemberUseConfiguredTimersInd, IprgMemberConfiguredHelloTime, IprgMemberConfiguredHoldTime, IprgMemberLearnedHelloTime, IprgMemberLearnedHoldTime, IprgMemberVirtualIPDotted, IprgMemberVirtualIPNumeric, IprgMemberUseConfigVirtualIPInd, IprgMemberActiveIPDotted, IprgMemberActiveIPNumeric, IprgMemberActiveDeviceID, IprgMemberActiveInterfaceID, IprgMemberStandbyIPDotted, IprgMemberStandbyIPNumeric, IprgMemberStandbyDeviceID, IprgMemberStandbyInterfaceID, IprgMemberState. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against iprg members, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, InterfaceID, IprgID, IprgMemberActiveDeviceID, IprgMemberActiveIPDotted, IprgMemberActiveIPNumeric, IprgMemberActiveInterfaceID, IprgMemberChangedCols, IprgMemberConfiguredHelloTime, IprgMemberConfiguredHoldTime, IprgMemberEndTime, IprgMemberID, IprgMemberLearnedHelloTime, IprgMemberLearnedHoldTime, IprgMemberPreemptDelay, IprgMemberPreemptInd, IprgMemberPriority, IprgMemberStandbyDeviceID, IprgMemberStandbyIPDotted, IprgMemberStandbyIPNumeric, IprgMemberStandbyInterfaceID, IprgMemberStartTime, IprgMemberState, IprgMemberTimestamp, IprgMemberUseConfigVirtualIPInd, IprgMemberUseConfiguredTimersInd, IprgMemberVirtualIPDotted, IprgMemberVirtualIPNumeric.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprg_members: An array of the IprgMember objects that match the specified input criteria.
:rtype iprg_members: Array of IprgMember
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available iprg members matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, InterfaceID, IprgID, IprgMemberActiveDeviceID, IprgMemberActiveIPDotted, IprgMemberActiveIPNumeric, IprgMemberActiveInterfaceID, IprgMemberChangedCols, IprgMemberConfiguredHelloTime, IprgMemberConfiguredHoldTime, IprgMemberEndTime, IprgMemberID, IprgMemberLearnedHelloTime, IprgMemberLearnedHoldTime, IprgMemberPreemptDelay, IprgMemberPreemptInd, IprgMemberPriority, IprgMemberStandbyDeviceID, IprgMemberStandbyIPDotted, IprgMemberStandbyIPNumeric, IprgMemberStandbyInterfaceID, IprgMemberStartTime, IprgMemberState, IprgMemberTimestamp, IprgMemberUseConfigVirtualIPInd, IprgMemberUseConfiguredTimersInd, IprgMemberVirtualIPDotted, IprgMemberVirtualIPNumeric.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier of the device configured with this HSRP/VRRP membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier of the interface configured with this HSRP/VRRP membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgID: The operator to apply to the field IprgID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgID: The internal NetMRI identifier for the HSRP/VRRP group to which this membership pertains. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgID: If op_IprgID is specified, the field named in this input will be compared to the value in IprgID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgID must be specified if op_IprgID is specified.
:type val_f_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgID: If op_IprgID is specified, this value will be compared to the value in IprgID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgID must be specified if op_IprgID is specified.
:type val_c_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberActiveDeviceID: The operator to apply to the field IprgMemberActiveDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberActiveDeviceID: The internal NetMRI identifier of the device that this router believes is the current active/master router. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberActiveDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberActiveDeviceID: If op_IprgMemberActiveDeviceID is specified, the field named in this input will be compared to the value in IprgMemberActiveDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberActiveDeviceID must be specified if op_IprgMemberActiveDeviceID is specified.
:type val_f_IprgMemberActiveDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberActiveDeviceID: If op_IprgMemberActiveDeviceID is specified, this value will be compared to the value in IprgMemberActiveDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberActiveDeviceID must be specified if op_IprgMemberActiveDeviceID is specified.
:type val_c_IprgMemberActiveDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberActiveIPDotted: The operator to apply to the field IprgMemberActiveIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberActiveIPDotted: The IP address for the active or master router, according to this device, in dotted (or colon-delimited for IPv6) format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberActiveIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberActiveIPDotted: If op_IprgMemberActiveIPDotted is specified, the field named in this input will be compared to the value in IprgMemberActiveIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberActiveIPDotted must be specified if op_IprgMemberActiveIPDotted is specified.
:type val_f_IprgMemberActiveIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberActiveIPDotted: If op_IprgMemberActiveIPDotted is specified, this value will be compared to the value in IprgMemberActiveIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberActiveIPDotted must be specified if op_IprgMemberActiveIPDotted is specified.
:type val_c_IprgMemberActiveIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberActiveIPNumeric: The operator to apply to the field IprgMemberActiveIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberActiveIPNumeric: The numerical IP address for the active or master router, according to this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberActiveIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberActiveIPNumeric: If op_IprgMemberActiveIPNumeric is specified, the field named in this input will be compared to the value in IprgMemberActiveIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberActiveIPNumeric must be specified if op_IprgMemberActiveIPNumeric is specified.
:type val_f_IprgMemberActiveIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberActiveIPNumeric: If op_IprgMemberActiveIPNumeric is specified, this value will be compared to the value in IprgMemberActiveIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberActiveIPNumeric must be specified if op_IprgMemberActiveIPNumeric is specified.
:type val_c_IprgMemberActiveIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberActiveInterfaceID: The operator to apply to the field IprgMemberActiveInterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberActiveInterfaceID: The internal NetMRI identifier of the interface corresponding to the active/master IP address, if available. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberActiveInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberActiveInterfaceID: If op_IprgMemberActiveInterfaceID is specified, the field named in this input will be compared to the value in IprgMemberActiveInterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberActiveInterfaceID must be specified if op_IprgMemberActiveInterfaceID is specified.
:type val_f_IprgMemberActiveInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberActiveInterfaceID: If op_IprgMemberActiveInterfaceID is specified, this value will be compared to the value in IprgMemberActiveInterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberActiveInterfaceID must be specified if op_IprgMemberActiveInterfaceID is specified.
:type val_c_IprgMemberActiveInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberChangedCols: The operator to apply to the field IprgMemberChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberChangedCols: If op_IprgMemberChangedCols is specified, the field named in this input will be compared to the value in IprgMemberChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberChangedCols must be specified if op_IprgMemberChangedCols is specified.
:type val_f_IprgMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberChangedCols: If op_IprgMemberChangedCols is specified, this value will be compared to the value in IprgMemberChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberChangedCols must be specified if op_IprgMemberChangedCols is specified.
:type val_c_IprgMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberConfiguredHelloTime: The operator to apply to the field IprgMemberConfiguredHelloTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberConfiguredHelloTime: The configured hello or advertise interval. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberConfiguredHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberConfiguredHelloTime: If op_IprgMemberConfiguredHelloTime is specified, the field named in this input will be compared to the value in IprgMemberConfiguredHelloTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberConfiguredHelloTime must be specified if op_IprgMemberConfiguredHelloTime is specified.
:type val_f_IprgMemberConfiguredHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberConfiguredHelloTime: If op_IprgMemberConfiguredHelloTime is specified, this value will be compared to the value in IprgMemberConfiguredHelloTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberConfiguredHelloTime must be specified if op_IprgMemberConfiguredHelloTime is specified.
:type val_c_IprgMemberConfiguredHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberConfiguredHoldTime: The operator to apply to the field IprgMemberConfiguredHoldTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberConfiguredHoldTime: The configured hold time or master down interval. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberConfiguredHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberConfiguredHoldTime: If op_IprgMemberConfiguredHoldTime is specified, the field named in this input will be compared to the value in IprgMemberConfiguredHoldTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberConfiguredHoldTime must be specified if op_IprgMemberConfiguredHoldTime is specified.
:type val_f_IprgMemberConfiguredHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberConfiguredHoldTime: If op_IprgMemberConfiguredHoldTime is specified, this value will be compared to the value in IprgMemberConfiguredHoldTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberConfiguredHoldTime must be specified if op_IprgMemberConfiguredHoldTime is specified.
:type val_c_IprgMemberConfiguredHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberEndTime: The operator to apply to the field IprgMemberEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberEndTime: If op_IprgMemberEndTime is specified, the field named in this input will be compared to the value in IprgMemberEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberEndTime must be specified if op_IprgMemberEndTime is specified.
:type val_f_IprgMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberEndTime: If op_IprgMemberEndTime is specified, this value will be compared to the value in IprgMemberEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberEndTime must be specified if op_IprgMemberEndTime is specified.
:type val_c_IprgMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberID: The operator to apply to the field IprgMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberID: If op_IprgMemberID is specified, the field named in this input will be compared to the value in IprgMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberID must be specified if op_IprgMemberID is specified.
:type val_f_IprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberID: If op_IprgMemberID is specified, this value will be compared to the value in IprgMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberID must be specified if op_IprgMemberID is specified.
:type val_c_IprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberLearnedHelloTime: The operator to apply to the field IprgMemberLearnedHelloTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberLearnedHelloTime: The learned hello or advertise interval. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberLearnedHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberLearnedHelloTime: If op_IprgMemberLearnedHelloTime is specified, the field named in this input will be compared to the value in IprgMemberLearnedHelloTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberLearnedHelloTime must be specified if op_IprgMemberLearnedHelloTime is specified.
:type val_f_IprgMemberLearnedHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberLearnedHelloTime: If op_IprgMemberLearnedHelloTime is specified, this value will be compared to the value in IprgMemberLearnedHelloTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberLearnedHelloTime must be specified if op_IprgMemberLearnedHelloTime is specified.
:type val_c_IprgMemberLearnedHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberLearnedHoldTime: The operator to apply to the field IprgMemberLearnedHoldTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberLearnedHoldTime: The learned hold time or master down interval. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberLearnedHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberLearnedHoldTime: If op_IprgMemberLearnedHoldTime is specified, the field named in this input will be compared to the value in IprgMemberLearnedHoldTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberLearnedHoldTime must be specified if op_IprgMemberLearnedHoldTime is specified.
:type val_f_IprgMemberLearnedHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberLearnedHoldTime: If op_IprgMemberLearnedHoldTime is specified, this value will be compared to the value in IprgMemberLearnedHoldTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberLearnedHoldTime must be specified if op_IprgMemberLearnedHoldTime is specified.
:type val_c_IprgMemberLearnedHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberPreemptDelay: The operator to apply to the field IprgMemberPreemptDelay. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberPreemptDelay: The number of seconds after booting that preemption is enabled. This prevents a router with an unpopulated routing table from preempting until the routing table is ready. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberPreemptDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberPreemptDelay: If op_IprgMemberPreemptDelay is specified, the field named in this input will be compared to the value in IprgMemberPreemptDelay using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberPreemptDelay must be specified if op_IprgMemberPreemptDelay is specified.
:type val_f_IprgMemberPreemptDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberPreemptDelay: If op_IprgMemberPreemptDelay is specified, this value will be compared to the value in IprgMemberPreemptDelay using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberPreemptDelay must be specified if op_IprgMemberPreemptDelay is specified.
:type val_c_IprgMemberPreemptDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberPreemptInd: The operator to apply to the field IprgMemberPreemptInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberPreemptInd: A flag indicating whether this router is configured to preempt lower priority routers. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberPreemptInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberPreemptInd: If op_IprgMemberPreemptInd is specified, the field named in this input will be compared to the value in IprgMemberPreemptInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberPreemptInd must be specified if op_IprgMemberPreemptInd is specified.
:type val_f_IprgMemberPreemptInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberPreemptInd: If op_IprgMemberPreemptInd is specified, this value will be compared to the value in IprgMemberPreemptInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberPreemptInd must be specified if op_IprgMemberPreemptInd is specified.
:type val_c_IprgMemberPreemptInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberPriority: The operator to apply to the field IprgMemberPriority. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberPriority: The router's priority in this HSRP or VRRP. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberPriority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberPriority: If op_IprgMemberPriority is specified, the field named in this input will be compared to the value in IprgMemberPriority using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberPriority must be specified if op_IprgMemberPriority is specified.
:type val_f_IprgMemberPriority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberPriority: If op_IprgMemberPriority is specified, this value will be compared to the value in IprgMemberPriority using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberPriority must be specified if op_IprgMemberPriority is specified.
:type val_c_IprgMemberPriority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberStandbyDeviceID: The operator to apply to the field IprgMemberStandbyDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberStandbyDeviceID: The internal NetMRI identifier of the device that this router believes is the current standby/backup router. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberStandbyDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberStandbyDeviceID: If op_IprgMemberStandbyDeviceID is specified, the field named in this input will be compared to the value in IprgMemberStandbyDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberStandbyDeviceID must be specified if op_IprgMemberStandbyDeviceID is specified.
:type val_f_IprgMemberStandbyDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberStandbyDeviceID: If op_IprgMemberStandbyDeviceID is specified, this value will be compared to the value in IprgMemberStandbyDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberStandbyDeviceID must be specified if op_IprgMemberStandbyDeviceID is specified.
:type val_c_IprgMemberStandbyDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberStandbyIPDotted: The operator to apply to the field IprgMemberStandbyIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberStandbyIPDotted: The IP address for the standby or backup router, according to this device, in dotted (or colon-delimited for IPv6) format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberStandbyIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberStandbyIPDotted: If op_IprgMemberStandbyIPDotted is specified, the field named in this input will be compared to the value in IprgMemberStandbyIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberStandbyIPDotted must be specified if op_IprgMemberStandbyIPDotted is specified.
:type val_f_IprgMemberStandbyIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberStandbyIPDotted: If op_IprgMemberStandbyIPDotted is specified, this value will be compared to the value in IprgMemberStandbyIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberStandbyIPDotted must be specified if op_IprgMemberStandbyIPDotted is specified.
:type val_c_IprgMemberStandbyIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberStandbyIPNumeric: The operator to apply to the field IprgMemberStandbyIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberStandbyIPNumeric: The numerical IP address for the standby or backup router, according to this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberStandbyIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberStandbyIPNumeric: If op_IprgMemberStandbyIPNumeric is specified, the field named in this input will be compared to the value in IprgMemberStandbyIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberStandbyIPNumeric must be specified if op_IprgMemberStandbyIPNumeric is specified.
:type val_f_IprgMemberStandbyIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberStandbyIPNumeric: If op_IprgMemberStandbyIPNumeric is specified, this value will be compared to the value in IprgMemberStandbyIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberStandbyIPNumeric must be specified if op_IprgMemberStandbyIPNumeric is specified.
:type val_c_IprgMemberStandbyIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberStandbyInterfaceID: The operator to apply to the field IprgMemberStandbyInterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberStandbyInterfaceID: The internal NetMRI identifier of the interface corresponding to the standby/backup IP address, if available. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberStandbyInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberStandbyInterfaceID: If op_IprgMemberStandbyInterfaceID is specified, the field named in this input will be compared to the value in IprgMemberStandbyInterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberStandbyInterfaceID must be specified if op_IprgMemberStandbyInterfaceID is specified.
:type val_f_IprgMemberStandbyInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberStandbyInterfaceID: If op_IprgMemberStandbyInterfaceID is specified, this value will be compared to the value in IprgMemberStandbyInterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberStandbyInterfaceID must be specified if op_IprgMemberStandbyInterfaceID is specified.
:type val_c_IprgMemberStandbyInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberStartTime: The operator to apply to the field IprgMemberStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberStartTime: If op_IprgMemberStartTime is specified, the field named in this input will be compared to the value in IprgMemberStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberStartTime must be specified if op_IprgMemberStartTime is specified.
:type val_f_IprgMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberStartTime: If op_IprgMemberStartTime is specified, this value will be compared to the value in IprgMemberStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberStartTime must be specified if op_IprgMemberStartTime is specified.
:type val_c_IprgMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberState: The operator to apply to the field IprgMemberState. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberState: The current HSRP or VRRP state of this router for this HSRP/VRRP group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberState: If op_IprgMemberState is specified, the field named in this input will be compared to the value in IprgMemberState using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberState must be specified if op_IprgMemberState is specified.
:type val_f_IprgMemberState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberState: If op_IprgMemberState is specified, this value will be compared to the value in IprgMemberState using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberState must be specified if op_IprgMemberState is specified.
:type val_c_IprgMemberState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberTimestamp: The operator to apply to the field IprgMemberTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberTimestamp: If op_IprgMemberTimestamp is specified, the field named in this input will be compared to the value in IprgMemberTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberTimestamp must be specified if op_IprgMemberTimestamp is specified.
:type val_f_IprgMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberTimestamp: If op_IprgMemberTimestamp is specified, this value will be compared to the value in IprgMemberTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberTimestamp must be specified if op_IprgMemberTimestamp is specified.
:type val_c_IprgMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberUseConfigVirtualIPInd: The operator to apply to the field IprgMemberUseConfigVirtualIPInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberUseConfigVirtualIPInd: A flag indicating whether to use the configured or learned virtual IP value. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberUseConfigVirtualIPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberUseConfigVirtualIPInd: If op_IprgMemberUseConfigVirtualIPInd is specified, the field named in this input will be compared to the value in IprgMemberUseConfigVirtualIPInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberUseConfigVirtualIPInd must be specified if op_IprgMemberUseConfigVirtualIPInd is specified.
:type val_f_IprgMemberUseConfigVirtualIPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberUseConfigVirtualIPInd: If op_IprgMemberUseConfigVirtualIPInd is specified, this value will be compared to the value in IprgMemberUseConfigVirtualIPInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberUseConfigVirtualIPInd must be specified if op_IprgMemberUseConfigVirtualIPInd is specified.
:type val_c_IprgMemberUseConfigVirtualIPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberUseConfiguredTimersInd: The operator to apply to the field IprgMemberUseConfiguredTimersInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberUseConfiguredTimersInd: A flag indicating whether to use the configured or learned timer values. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberUseConfiguredTimersInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberUseConfiguredTimersInd: If op_IprgMemberUseConfiguredTimersInd is specified, the field named in this input will be compared to the value in IprgMemberUseConfiguredTimersInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberUseConfiguredTimersInd must be specified if op_IprgMemberUseConfiguredTimersInd is specified.
:type val_f_IprgMemberUseConfiguredTimersInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberUseConfiguredTimersInd: If op_IprgMemberUseConfiguredTimersInd is specified, this value will be compared to the value in IprgMemberUseConfiguredTimersInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberUseConfiguredTimersInd must be specified if op_IprgMemberUseConfiguredTimersInd is specified.
:type val_c_IprgMemberUseConfiguredTimersInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberVirtualIPDotted: The operator to apply to the field IprgMemberVirtualIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberVirtualIPDotted: The virtual IP address for the associated HSRP/VRRP group, in dotted (or colon-delimited for IPv6) format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberVirtualIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberVirtualIPDotted: If op_IprgMemberVirtualIPDotted is specified, the field named in this input will be compared to the value in IprgMemberVirtualIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberVirtualIPDotted must be specified if op_IprgMemberVirtualIPDotted is specified.
:type val_f_IprgMemberVirtualIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberVirtualIPDotted: If op_IprgMemberVirtualIPDotted is specified, this value will be compared to the value in IprgMemberVirtualIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberVirtualIPDotted must be specified if op_IprgMemberVirtualIPDotted is specified.
:type val_c_IprgMemberVirtualIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberVirtualIPNumeric: The operator to apply to the field IprgMemberVirtualIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberVirtualIPNumeric: The numerical value of the HSRP/VRRP virtual IP address. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberVirtualIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberVirtualIPNumeric: If op_IprgMemberVirtualIPNumeric is specified, the field named in this input will be compared to the value in IprgMemberVirtualIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberVirtualIPNumeric must be specified if op_IprgMemberVirtualIPNumeric is specified.
:type val_f_IprgMemberVirtualIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberVirtualIPNumeric: If op_IprgMemberVirtualIPNumeric is specified, this value will be compared to the value in IprgMemberVirtualIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberVirtualIPNumeric must be specified if op_IprgMemberVirtualIPNumeric is specified.
:type val_c_IprgMemberVirtualIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprg members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg member methods. The listed methods will be called on each iprg member returned and included in the output. Available methods are: active_interface, active_router, standby_router, standby_interface, data_source, device, interface, iprg, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_interface, active_router, standby_router, standby_interface, data_source, device, interface, iprg.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgMemberID
:param sort: The data field(s) to use for sorting the output. Default is IprgMemberID. Valid values are IprgMemberID, IprgMemberStartTime, IprgMemberEndTime, IprgMemberTimestamp, IprgMemberChangedCols, IprgID, InterfaceID, DeviceID, DataSourceID, IprgMemberPriority, IprgMemberPreemptInd, IprgMemberPreemptDelay, IprgMemberUseConfiguredTimersInd, IprgMemberConfiguredHelloTime, IprgMemberConfiguredHoldTime, IprgMemberLearnedHelloTime, IprgMemberLearnedHoldTime, IprgMemberVirtualIPDotted, IprgMemberVirtualIPNumeric, IprgMemberUseConfigVirtualIPInd, IprgMemberActiveIPDotted, IprgMemberActiveIPNumeric, IprgMemberActiveDeviceID, IprgMemberActiveInterfaceID, IprgMemberStandbyIPDotted, IprgMemberStandbyIPNumeric, IprgMemberStandbyDeviceID, IprgMemberStandbyInterfaceID, IprgMemberState.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IprgMember. Valid values are IprgMemberID, IprgMemberStartTime, IprgMemberEndTime, IprgMemberTimestamp, IprgMemberChangedCols, IprgID, InterfaceID, DeviceID, DataSourceID, IprgMemberPriority, IprgMemberPreemptInd, IprgMemberPreemptDelay, IprgMemberUseConfiguredTimersInd, IprgMemberConfiguredHelloTime, IprgMemberConfiguredHoldTime, IprgMemberLearnedHelloTime, IprgMemberLearnedHoldTime, IprgMemberVirtualIPDotted, IprgMemberVirtualIPNumeric, IprgMemberUseConfigVirtualIPInd, IprgMemberActiveIPDotted, IprgMemberActiveIPNumeric, IprgMemberActiveDeviceID, IprgMemberActiveInterfaceID, IprgMemberStandbyIPDotted, IprgMemberStandbyIPNumeric, IprgMemberStandbyDeviceID, IprgMemberStandbyInterfaceID, IprgMemberState. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprg_members: An array of the IprgMember objects that match the specified input criteria.
:rtype iprg_members: Array of IprgMember
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def iprg(self, **kwargs):
"""The HSRP/VRRP group to which this membership pertains.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The HSRP/VRRP group to which this membership pertains.
:rtype : Iprg
"""
return self.api_request(self._get_method_fullname("iprg"), kwargs)
def interface(self, **kwargs):
"""The interface configured with this HSRP/VRRP membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The interface configured with this HSRP/VRRP membership.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("interface"), kwargs)
def active_router(self, **kwargs):
"""The device that this router believes is the current active or master device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device that this router believes is the current active or master device.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("active_router"), kwargs)
def standby_router(self, **kwargs):
"""The device that this router believes is the current standby or backup device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device that this router believes is the current standby or backup device.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("standby_router"), kwargs)
def active_interface(self, **kwargs):
"""The interface object corresponding to the active/master IP address, if available.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The interface object corresponding to the active/master IP address, if available.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("active_interface"), kwargs)
def standby_interface(self, **kwargs):
"""The interface object corresponding to the standby/backup IP address, if available.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The interface object corresponding to the standby/backup IP address, if available.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("standby_interface"), kwargs)
def infradevice(self, **kwargs):
"""The device configured with this HSRP/VRRP membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device configured with this HSRP/VRRP membership.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
def device(self, **kwargs):
"""The device configured with this HSRP/VRRP membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier for this HSRP/VRRP membership.
:type IprgMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device configured with this HSRP/VRRP membership.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
| apache-2.0 |
ropez/pytest | testing/test_runner.py | 167 | 20633 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import _pytest._code
import os
import py
import pytest
import sys
from _pytest import runner, main
class TestSetupState:
def test_setup(self, testdir):
ss = runner.SetupState()
item = testdir.getitem("def test_func(): pass")
l = [1]
ss.prepare(item)
ss.addfinalizer(l.pop, colitem=item)
assert l
ss._pop_and_teardown()
assert not l
def test_teardown_exact_stack_empty(self, testdir):
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
def test_setup_fails_and_failure_is_cached(self, testdir):
item = testdir.getitem("""
def setup_module(mod):
raise ValueError(42)
def test_func(): pass
""") # noqa
ss = runner.SetupState()
pytest.raises(ValueError, lambda: ss.prepare(item))
pytest.raises(ValueError, lambda: ss.prepare(item))
def test_teardown_multiple_one_fails(self, testdir):
r = []
def fin1(): r.append('fin1')
def fin2(): raise Exception('oops')
def fin3(): r.append('fin3')
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
ss.addfinalizer(fin3, item)
with pytest.raises(Exception) as err:
ss._callfinalizers(item)
assert err.value.args == ('oops',)
assert r == ['fin3', 'fin1']
def test_teardown_multiple_fail(self, testdir):
# Ensure the first exception is the one which is re-raised.
# Ideally both would be reported however.
def fin1(): raise Exception('oops1')
def fin2(): raise Exception('oops2')
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
with pytest.raises(Exception) as err:
ss._callfinalizers(item)
assert err.value.args == ('oops2',)
class BaseFunctionalTests:
def test_passfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
pass
""")
rep = reports[1]
assert rep.passed
assert not rep.failed
assert rep.outcome == "passed"
assert not rep.longrepr
def test_failfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.passed
assert not rep.skipped
assert rep.failed
assert rep.when == "call"
assert rep.outcome == "failed"
#assert isinstance(rep.longrepr, ReprExceptionInfo)
def test_skipfunction(self, testdir):
reports = testdir.runitem("""
import pytest
def test_func():
pytest.skip("hello")
""")
rep = reports[1]
assert not rep.failed
assert not rep.passed
assert rep.skipped
assert rep.outcome == "skipped"
#assert rep.skipped.when == "call"
#assert rep.skipped.when == "call"
#assert rep.skipped == "%sreason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.path
#assert not rep.skipped.failurerepr
def test_skip_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
pytest.skip("hello")
def test_func():
pass
""")
print(reports)
rep = reports[0]
assert not rep.failed
assert not rep.passed
assert rep.skipped
#assert rep.skipped.reason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.lineno == 3
assert len(reports) == 2
assert reports[1].passed # teardown
def test_failure_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
rep = reports[0]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "setup"
assert len(reports) == 2
def test_failure_in_teardown_function(self, testdir):
reports = testdir.runitem("""
import pytest
def teardown_function(func):
raise ValueError(42)
def test_func():
pass
""")
print(reports)
assert len(reports) == 3
rep = reports[2]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "teardown"
#assert rep.longrepr.reprcrash.lineno == 3
#assert rep.longrepr.reprtraceback.reprentries
def test_custom_failure_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
return "hello"
""")
reports = testdir.runitem("""
import pytest
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "call"
#assert rep.failed.where.lineno == 3
#assert rep.failed.where.path.basename == "test_func.py"
#assert rep.failed.failurerepr == "hello"
def test_teardown_final_returncode(self, testdir):
rec = testdir.inline_runsource("""
def test_func():
pass
def teardown_function(func):
raise ValueError(42)
""")
assert rec.ret == 1
def test_exact_teardown_issue90(self, testdir):
rec = testdir.inline_runsource("""
import pytest
class TestClass:
def test_method(self):
pass
def teardown_class(cls):
raise Exception()
def test_func():
import sys
# on python2 exc_info is keept till a function exits
# so we would end up calling test functions while
# sys.exc_info would return the indexerror
# from guessing the lastitem
excinfo = sys.exc_info()
import traceback
assert excinfo[0] is None, \
traceback.format_exception(*excinfo)
def teardown_function(func):
raise ValueError(42)
""")
reps = rec.getreports("pytest_runtest_logreport")
print (reps)
for i in range(2):
assert reps[i].nodeid.endswith("test_method")
assert reps[i].passed
assert reps[2].when == "teardown"
assert reps[2].failed
assert len(reps) == 6
for i in range(3,5):
assert reps[i].nodeid.endswith("test_func")
assert reps[i].passed
assert reps[5].when == "teardown"
assert reps[5].nodeid.endswith("test_func")
assert reps[5].failed
def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
assert 0
""")
reports = testdir.runitem("""
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
assert len(reports) == 2
rep = reports[0]
print(rep)
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "setup"
#assert rep.outcome.where.lineno == 3
#assert rep.outcome.where.path.basename == "test_func.py"
#assert instanace(rep.failed.failurerepr, PythonFailureRepr)
def test_systemexit_does_not_bail_out(self, testdir):
try:
reports = testdir.runitem("""
def test_func():
raise SystemExit(42)
""")
except SystemExit:
pytest.fail("runner did not catch SystemExit")
rep = reports[1]
assert rep.failed
assert rep.when == "call"
def test_exit_propagates(self, testdir):
try:
testdir.runitem("""
import pytest
def test_func():
raise pytest.exit.Exception()
""")
except pytest.exit.Exception:
pass
else:
pytest.fail("did not raise")
class TestExecutionNonForked(BaseFunctionalTests):
def getrunner(self):
def f(item):
return runner.runtestprotocol(item, log=False)
return f
def test_keyboardinterrupt_propagates(self, testdir):
try:
testdir.runitem("""
def test_func():
raise KeyboardInterrupt("fake")
""")
except KeyboardInterrupt:
pass
else:
pytest.fail("did not raise")
class TestExecutionForked(BaseFunctionalTests):
pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')")
def getrunner(self):
# XXX re-arrange this test to live in pytest-xdist
boxed = pytest.importorskip("xdist.boxed")
return boxed.forked_run_report
def test_suicide(self, testdir):
reports = testdir.runitem("""
def test_func():
import os
os.kill(os.getpid(), 15)
""")
rep = reports[0]
assert rep.failed
assert rep.when == "???"
class TestSessionReports:
def test_collect_result(self, testdir):
col = testdir.getmodulecol("""
def test_func1():
pass
class TestClass:
pass
""")
rep = runner.collect_one_node(col)
assert not rep.failed
assert not rep.skipped
assert rep.passed
locinfo = rep.location
assert locinfo[0] == col.fspath.basename
assert not locinfo[1]
assert locinfo[2] == col.fspath.basename
res = rep.result
assert len(res) == 2
assert res[0].name == "test_func1"
assert res[1].name == "TestClass"
def test_skip_at_module_scope(self, testdir):
col = testdir.getmodulecol("""
import pytest
pytest.skip("hello")
def test_func():
pass
""")
rep = main.collect_one_node(col)
assert not rep.failed
assert not rep.passed
assert rep.skipped
reporttypes = [
runner.BaseReport,
runner.TestReport,
runner.TeardownErrorReport,
runner.CollectReport,
]
@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes])
def test_report_extra_parameters(reporttype):
if hasattr(py.std.inspect, 'signature'):
args = list(py.std.inspect.signature(reporttype.__init__).parameters.keys())[1:]
else:
args = py.std.inspect.getargspec(reporttype.__init__)[0][1:]
basekw = dict.fromkeys(args, [])
report = reporttype(newthing=1, **basekw)
assert report.newthing == 1
def test_callinfo():
ci = runner.CallInfo(lambda: 0, '123')
assert ci.when == "123"
assert ci.result == 0
assert "result" in repr(ci)
ci = runner.CallInfo(lambda: 0/0, '123')
assert ci.when == "123"
assert not hasattr(ci, 'result')
assert ci.excinfo
assert "exc" in repr(ci)
# design question: do we want general hooks in python files?
# then something like the following functional tests makes sense
@pytest.mark.xfail
def test_runtest_in_module_ordering(testdir):
p1 = testdir.makepyfile("""
def pytest_runtest_setup(item): # runs after class-level!
item.function.mylist.append("module")
class TestClass:
def pytest_runtest_setup(self, item):
assert not hasattr(item.function, 'mylist')
item.function.mylist = ['class']
def pytest_funcarg__mylist(self, request):
return request.function.mylist
def pytest_runtest_call(self, item, __multicall__):
try:
__multicall__.execute()
except ValueError:
pass
def test_hello1(self, mylist):
assert mylist == ['class', 'module'], mylist
raise ValueError()
def test_hello2(self, mylist):
assert mylist == ['class', 'module'], mylist
def pytest_runtest_teardown(item):
del item.function.mylist
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*2 passed*"
])
def test_outcomeexception_exceptionattributes():
outcome = runner.OutcomeException('test')
assert outcome.args[0] == outcome.msg
def test_pytest_exit():
try:
pytest.exit("hello")
except pytest.exit.Exception:
excinfo = _pytest._code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
def test_pytest_fail():
try:
pytest.fail("hello")
except pytest.fail.Exception:
excinfo = _pytest._code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Failed")
def test_pytest_fail_notrace(testdir):
testdir.makepyfile("""
import pytest
def test_hello():
pytest.fail("hello", pytrace=False)
def teardown_function(function):
pytest.fail("world", pytrace=False)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"world",
"hello",
])
assert 'def teardown_function' not in result.stdout.str()
@pytest.mark.parametrize('str_prefix', ['u', ''])
def test_pytest_fail_notrace_non_ascii(testdir, str_prefix):
"""Fix pytest.fail with pytrace=False with non-ascii characters (#1178).
This tests with native and unicode strings containing non-ascii chars.
"""
testdir.makepyfile(u"""
# coding: utf-8
import pytest
def test_hello():
pytest.fail(%s'oh oh: ☺', pytrace=False)
""" % str_prefix)
result = testdir.runpytest()
if sys.version_info[0] >= 3:
result.stdout.fnmatch_lines(['*test_hello*', "oh oh: ☺"])
else:
result.stdout.fnmatch_lines(['*test_hello*', "oh oh: *"])
assert 'def test_hello' not in result.stdout.str()
def test_pytest_no_tests_collected_exit_status(testdir):
result = testdir.runpytest()
result.stdout.fnmatch_lines('*collected 0 items*')
assert result.ret == main.EXIT_NOTESTSCOLLECTED
testdir.makepyfile(test_foo="""
def test_foo():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*collected 1 items*')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == main.EXIT_OK
result = testdir.runpytest('-k nonmatch')
result.stdout.fnmatch_lines('*collected 1 items*')
result.stdout.fnmatch_lines('*1 deselected*')
assert result.ret == main.EXIT_NOTESTSCOLLECTED
def test_exception_printing_skip():
try:
pytest.skip("hello")
except pytest.skip.Exception:
excinfo = _pytest._code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Skipped")
def test_importorskip(monkeypatch):
importorskip = pytest.importorskip
def f():
importorskip("asdlkj")
try:
sys = importorskip("sys") # noqa
assert sys == py.std.sys
#path = pytest.importorskip("os.path")
#assert path == py.std.os.path
excinfo = pytest.raises(pytest.skip.Exception, f)
path = py.path.local(excinfo.getrepr().reprcrash.path)
# check that importorskip reports the actual call
# in this test the test_runner.py file
assert path.purebasename == "test_runner"
pytest.raises(SyntaxError, "pytest.importorskip('x y z')")
pytest.raises(SyntaxError, "pytest.importorskip('x=y')")
mod = py.std.types.ModuleType("hello123")
mod.__version__ = "1.3"
monkeypatch.setitem(sys.modules, "hello123", mod)
pytest.raises(pytest.skip.Exception, """
pytest.importorskip("hello123", minversion="1.3.1")
""")
mod2 = pytest.importorskip("hello123", minversion="1.3")
assert mod2 == mod
except pytest.skip.Exception:
print(_pytest._code.ExceptionInfo())
pytest.fail("spurious skip")
def test_importorskip_imports_last_module_part():
ospath = pytest.importorskip("os.path")
assert os.path == ospath
def test_importorskip_dev_module(monkeypatch):
try:
mod = py.std.types.ModuleType("mockmodule")
mod.__version__ = '0.13.0.dev-43290'
monkeypatch.setitem(sys.modules, 'mockmodule', mod)
mod2 = pytest.importorskip('mockmodule', minversion='0.12.0')
assert mod2 == mod
pytest.raises(pytest.skip.Exception, """
pytest.importorskip('mockmodule1', minversion='0.14.0')""")
except pytest.skip.Exception:
print(_pytest._code.ExceptionInfo())
pytest.fail("spurious skip")
def test_pytest_cmdline_main(testdir):
p = testdir.makepyfile("""
import pytest
def test_hello():
assert 1
if __name__ == '__main__':
pytest.cmdline.main([__file__])
""")
import subprocess
popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
popen.communicate()
ret = popen.wait()
assert ret == 0
def test_unicode_in_longrepr(testdir):
testdir.makeconftest("""
import py
def pytest_runtest_makereport(__multicall__):
rep = __multicall__.execute()
if rep.when == "call":
rep.longrepr = py.builtin._totext("\\xc3\\xa4", "utf8")
return rep
""")
testdir.makepyfile("""
def test_out():
assert 0
""")
result = testdir.runpytest()
assert result.ret == 1
assert "UnicodeEncodeError" not in result.stderr.str()
def test_failure_in_setup(testdir):
testdir.makepyfile("""
def setup_module():
0/0
def test_func():
pass
""")
result = testdir.runpytest("--tb=line")
assert "def setup_module" not in result.stdout.str()
def test_makereport_getsource(testdir):
testdir.makepyfile("""
def test_foo():
if False: pass
else: assert False
""")
result = testdir.runpytest()
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(['*else: assert False*'])
def test_makereport_getsource_dynamic_code(testdir, monkeypatch):
"""Test that exception in dynamically generated code doesn't break getting the source line."""
import inspect
original_findsource = inspect.findsource
def findsource(obj, *args, **kwargs):
# Can be triggered by dynamically created functions
if obj.__name__ == 'foo':
raise IndexError()
return original_findsource(obj, *args, **kwargs)
monkeypatch.setattr(inspect, 'findsource', findsource)
testdir.makepyfile("""
import pytest
@pytest.fixture
def foo(missing):
pass
def test_fix(foo):
assert False
""")
result = testdir.runpytest('-vv')
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(["*test_fix*", "*fixture*'missing'*not found*"])
def test_store_except_info_on_eror():
""" Test that upon test failure, the exception info is stored on
sys.last_traceback and friends.
"""
# Simulate item that raises a specific exception
class ItemThatRaises:
def runtest(self):
raise IndexError('TEST')
try:
runner.pytest_runtest_call(ItemThatRaises())
except IndexError:
pass
# Check that exception info is stored on sys
assert sys.last_type is IndexError
assert sys.last_value.args[0] == 'TEST'
assert sys.last_traceback
| mit |
tirkarthi/guake | fiximports.py | 4 | 6458 | #!/usr/bin/env python
'''Check and sort import statement from a python file '''
import re
import sys
class FixImports(object):
'''
I can be used to check and sort import statement of a python file
Please use sortImportGroups() method
'''
_regexImport = re.compile(r"^import\s+(.*)")
_regexFromImport = re.compile(r"^from\s+([a-zA-Z0-9\._]+)\s+import\s+(.*)$")
_regexFromFutureImport = re.compile(r"^from\s+__future__\s+import\s+(.*)$")
def printErrorMsg(self, filename, lineNb, errorMessage):
''' I print the error message following pylint convention'''
print("%(filename)s:%(line_nb)s: %(error_msg)s" %
dict(filename=filename,
line_nb=lineNb,
error_msg=errorMessage))
def isImportLine(self, line):
'''I return True is the given line is an import statement, False otherwize'''
return self._regexImport.match(line) or self._regexFromImport.match(line)
def isBadLineFixable(self, line):
'''I return True is the given line is an import line than I know how to split'''
if self.isImportLine(line) and '(' not in line:
return True
return False
def analyzeLine(self, filename, line, lineNb):
'''I look at the line and print all error I find'''
res = True
if self.isImportLine(line):
if ',' in line:
self.printErrorMsg(filename, lineNb,
"multiple modules imported on one line - will fix")
res = False
if '\\' in line:
self.printErrorMsg(filename, lineNb,
"line-continuation character found - will fix.")
res = False
# these two don't occur in the Buildbot codebase, so we don't try to
# fix them
if ';' in line:
self.printErrorMsg(filename, lineNb,
"multiple import statement on one line. "
"Put each import on its own line.")
res = False
if '(' in line:
self.printErrorMsg(filename, lineNb,
"parenthesis character found. "
"Please import each module on a single line")
res = False
return res
def importOrder(self, line):
'''
I define how import lines should be sorted
return a tuple of order criterias sorted be importance
'''
ret = ("__future__" not in line, # always put __future__ import first
self._regexFromImport.match(line) is not None, # import before from import
line, # then lexicographic order
)
return ret
def sortImportGroups(self, filename, data=None):
'''
I perform the analysis of the given file, print the error I find and try to split and
sort the import statement
'''
lines = data.split("\n")
res = True
for cur_line_nb, line in enumerate(lines):
if not self.analyzeLine(filename, line, cur_line_nb):
if not self.isBadLineFixable(line):
res = False
if not res:
return False, data
# First split the import we can split
newlines = []
self.groups = []
self.group_start = None
def maybeEndGroup():
if self.group_start is not None:
self.groups.append((self.group_start, len(newlines)))
self.group_start = None
iter = lines.__iter__()
while True:
try:
line = iter.next()
except StopIteration:
break
if self.isImportLine(line):
# join any continuation lines (\\)
while line[-1] == '\\':
line = line[:-1] + iter.next()
if self.group_start is None:
self.group_start = len(newlines)
if self.isBadLineFixable(line):
match = self._regexFromImport.match(line)
if match:
module = match.group(1)
imports = [s.strip() for s in match.group(2).split(",")]
for imp in imports:
newlines.append("from %s import %s" % (module, imp))
continue
else:
maybeEndGroup()
newlines.append(line)
maybeEndGroup()
lines = newlines
for start, end in self.groups:
lines[start:end] = sorted(lines[start:end], key=self.importOrder)
# reiterate line by line to split mixed groups
splitted_groups_lines = []
prev_import_line_type = ""
for line in lines:
if not line.strip() or not self.isImportLine(line):
splitted_groups_lines.append(line)
prev_import_line_type = ""
else:
import_match = self._regexImport.match(line)
from_match = self._regexFromImport.match(line)
current_line_type = None
if import_match is not None:
module = import_match
current_line_type = "import"
elif from_match is not None:
module = from_match
current_line_type = "from"
assert(current_line_type)
if prev_import_line_type and current_line_type != prev_import_line_type:
splitted_groups_lines.append("")
prev_import_line_type = current_line_type
splitted_groups_lines.append(line)
return True, "\n".join(splitted_groups_lines)
def main():
'''I am the main method'''
if len(sys.argv) != 2:
print "usage: %s <python file>" % (sys.argv[0],)
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as filedesc:
data = filedesc.read()
res, content = FixImports().sortImportGroups(filename, data)
if not res:
sys.exit(1)
with open(filename, 'w') as filedesc:
filedesc.write(content)
if data != content:
print "import successfully reordered for file: %s" % (filename)
sys.exit(0)
if __name__ == "__main__":
main()
| gpl-2.0 |
yasserglez/tagfs | packages/tagfs/contrib/django/contrib/databrowse/plugins/fieldchoices.py | 88 | 3876 | from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.contrib.databrowse.sites import DatabrowsePlugin
from django.shortcuts import render_to_response
from django.utils.text import capfirst
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
import urllib
class FieldChoicePlugin(DatabrowsePlugin):
def __init__(self, field_filter=None):
# If field_filter is given, it should be a callable that takes a
# Django database Field instance and returns True if that field should
# be included. If field_filter is None, that all fields will be used.
self.field_filter = field_filter
def field_dict(self, model):
"""
Helper function that returns a dictionary of all fields in the given
model. If self.field_filter is set, it only includes the fields that
match the filter.
"""
if self.field_filter:
return dict([(f.name, f) for f in model._meta.fields if self.field_filter(f)])
else:
return dict([(f.name, f) for f in model._meta.fields if not f.rel and not f.primary_key and not f.unique and not isinstance(f, (models.AutoField, models.TextField))])
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return u''
return mark_safe(u'<p class="filter"><strong>View by:</strong> %s</p>' % \
u', '.join(['<a href="fields/%s/">%s</a>' % (f.name, force_unicode(capfirst(f.verbose_name))) for f in fields.values()]))
def urls(self, plugin_name, easy_instance_field):
if easy_instance_field.field in self.field_dict(easy_instance_field.model.model).values():
field_value = smart_str(easy_instance_field.raw_value)
return [mark_safe(u'%s%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
urllib.quote(field_value, safe='')))]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no fields with choices, there's no point in going
# further.
if not self.fields:
raise http.Http404('The requested model has no fields.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/', 1)
if self.fields.has_key(url_bits[0]):
return self.field_view(request, self.fields[url_bits[0]], *url_bits[1:])
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = self.fields.values()
field_list.sort(lambda x, y: cmp(x.verbose_name, y.verbose_name))
return render_to_response('databrowse/fieldchoice_homepage.html', {'root_url': self.site.root_url, 'model': easy_model, 'field_list': field_list})
def field_view(self, request, field, value=None):
easy_model = EasyModel(self.site, self.model)
easy_field = easy_model.field(field.name)
if value is not None:
obj_list = easy_model.objects(**{field.name: value})
return render_to_response('databrowse/fieldchoice_detail.html', {'root_url': self.site.root_url, 'model': easy_model, 'field': easy_field, 'value': value, 'object_list': obj_list})
obj_list = [v[field.name] for v in self.model._default_manager.distinct().order_by(field.name).values(field.name)]
return render_to_response('databrowse/fieldchoice_list.html', {'root_url': self.site.root_url, 'model': easy_model, 'field': easy_field, 'object_list': obj_list})
| mit |
Asana/mypipe | avro/lang/py/src/avro/ipc.py | 22 | 17211 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support for inter-process calls.
"""
import httplib
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from avro import io
from avro import protocol
from avro import schema
#
# Constants
#
# Handshake schema is pulled in during build
HANDSHAKE_REQUEST_SCHEMA = schema.parse("""
@HANDSHAKE_REQUEST_SCHEMA@
""")
HANDSHAKE_RESPONSE_SCHEMA = schema.parse("""
@HANDSHAKE_RESPONSE_SCHEMA@
""")
HANDSHAKE_REQUESTOR_WRITER = io.DatumWriter(HANDSHAKE_REQUEST_SCHEMA)
HANDSHAKE_REQUESTOR_READER = io.DatumReader(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_WRITER = io.DatumWriter(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_READER = io.DatumReader(HANDSHAKE_REQUEST_SCHEMA)
META_SCHEMA = schema.parse('{"type": "map", "values": "bytes"}')
META_WRITER = io.DatumWriter(META_SCHEMA)
META_READER = io.DatumReader(META_SCHEMA)
SYSTEM_ERROR_SCHEMA = schema.parse('["string"]')
# protocol cache
REMOTE_HASHES = {}
REMOTE_PROTOCOLS = {}
BIG_ENDIAN_INT_STRUCT = io.struct_class('!I')
BUFFER_HEADER_LENGTH = 4
BUFFER_SIZE = 8192
#
# Exceptions
#
class AvroRemoteException(schema.AvroException):
"""
Raised when an error message is sent by an Avro requestor or responder.
"""
def __init__(self, fail_msg=None):
schema.AvroException.__init__(self, fail_msg)
class ConnectionClosedException(schema.AvroException):
pass
#
# Base IPC Classes (Requestor/Responder)
#
class BaseRequestor(object):
"""Base class for the client side of a protocol interaction."""
def __init__(self, local_protocol, transceiver):
self._local_protocol = local_protocol
self._transceiver = transceiver
self._remote_protocol = None
self._remote_hash = None
self._send_protocol = None
# read-only properties
local_protocol = property(lambda self: self._local_protocol)
transceiver = property(lambda self: self._transceiver)
# read/write properties
def set_remote_protocol(self, new_remote_protocol):
self._remote_protocol = new_remote_protocol
REMOTE_PROTOCOLS[self.transceiver.remote_name] = self.remote_protocol
remote_protocol = property(lambda self: self._remote_protocol,
set_remote_protocol)
def set_remote_hash(self, new_remote_hash):
self._remote_hash = new_remote_hash
REMOTE_HASHES[self.transceiver.remote_name] = self.remote_hash
remote_hash = property(lambda self: self._remote_hash, set_remote_hash)
def set_send_protocol(self, new_send_protocol):
self._send_protocol = new_send_protocol
send_protocol = property(lambda self: self._send_protocol, set_send_protocol)
def request(self, message_name, request_datum):
"""
Writes a request message and reads a response or error message.
"""
# build handshake and call request
buffer_writer = StringIO()
buffer_encoder = io.BinaryEncoder(buffer_writer)
self.write_handshake_request(buffer_encoder)
self.write_call_request(message_name, request_datum, buffer_encoder)
# send the handshake and call request; block until call response
call_request = buffer_writer.getvalue()
return self.issue_request(call_request, message_name, request_datum)
def write_handshake_request(self, encoder):
local_hash = self.local_protocol.md5
remote_name = self.transceiver.remote_name
remote_hash = REMOTE_HASHES.get(remote_name)
if remote_hash is None:
remote_hash = local_hash
self.remote_protocol = self.local_protocol
request_datum = {}
request_datum['clientHash'] = local_hash
request_datum['serverHash'] = remote_hash
if self.send_protocol:
request_datum['clientProtocol'] = str(self.local_protocol)
HANDSHAKE_REQUESTOR_WRITER.write(request_datum, encoder)
def write_call_request(self, message_name, request_datum, encoder):
"""
The format of a call request is:
* request metadata, a map with values of type bytes
* the message name, an Avro string, followed by
* the message parameters. Parameters are serialized according to
the message's request declaration.
"""
# request metadata (not yet implemented)
request_metadata = {}
META_WRITER.write(request_metadata, encoder)
# message name
message = self.local_protocol.messages.get(message_name)
if message is None:
raise schema.AvroException('Unknown message: %s' % message_name)
encoder.write_utf8(message.name)
# message parameters
self.write_request(message.request, request_datum, encoder)
def write_request(self, request_schema, request_datum, encoder):
datum_writer = io.DatumWriter(request_schema)
datum_writer.write(request_datum, encoder)
def read_handshake_response(self, decoder):
handshake_response = HANDSHAKE_REQUESTOR_READER.read(decoder)
match = handshake_response.get('match')
if match == 'BOTH':
self.send_protocol = False
return True
elif match == 'CLIENT':
if self.send_protocol:
raise schema.AvroException('Handshake failure.')
self.remote_protocol = protocol.parse(
handshake_response.get('serverProtocol'))
self.remote_hash = handshake_response.get('serverHash')
self.send_protocol = False
return True
elif match == 'NONE':
if self.send_protocol:
raise schema.AvroException('Handshake failure.')
self.remote_protocol = protocol.parse(
handshake_response.get('serverProtocol'))
self.remote_hash = handshake_response.get('serverHash')
self.send_protocol = True
return False
else:
raise schema.AvroException('Unexpected match: %s' % match)
def read_call_response(self, message_name, decoder):
"""
The format of a call response is:
* response metadata, a map with values of type bytes
* a one-byte error flag boolean, followed by either:
o if the error flag is false,
the message response, serialized per the message's response schema.
o if the error flag is true,
the error, serialized per the message's error union schema.
"""
# response metadata
response_metadata = META_READER.read(decoder)
# remote response schema
remote_message_schema = self.remote_protocol.messages.get(message_name)
if remote_message_schema is None:
raise schema.AvroException('Unknown remote message: %s' % message_name)
# local response schema
local_message_schema = self.local_protocol.messages.get(message_name)
if local_message_schema is None:
raise schema.AvroException('Unknown local message: %s' % message_name)
# error flag
if not decoder.read_boolean():
writers_schema = remote_message_schema.response
readers_schema = local_message_schema.response
return self.read_response(writers_schema, readers_schema, decoder)
else:
writers_schema = remote_message_schema.errors
readers_schema = local_message_schema.errors
raise self.read_error(writers_schema, readers_schema, decoder)
def read_response(self, writers_schema, readers_schema, decoder):
datum_reader = io.DatumReader(writers_schema, readers_schema)
result = datum_reader.read(decoder)
return result
def read_error(self, writers_schema, readers_schema, decoder):
datum_reader = io.DatumReader(writers_schema, readers_schema)
return AvroRemoteException(datum_reader.read(decoder))
class Requestor(BaseRequestor):
def issue_request(self, call_request, message_name, request_datum):
call_response = self.transceiver.transceive(call_request)
# process the handshake and call response
buffer_decoder = io.BinaryDecoder(StringIO(call_response))
call_response_exists = self.read_handshake_response(buffer_decoder)
if call_response_exists:
return self.read_call_response(message_name, buffer_decoder)
else:
return self.request(message_name, request_datum)
class Responder(object):
"""Base class for the server side of a protocol interaction."""
def __init__(self, local_protocol):
self._local_protocol = local_protocol
self._local_hash = self.local_protocol.md5
self._protocol_cache = {}
self.set_protocol_cache(self.local_hash, self.local_protocol)
# read-only properties
local_protocol = property(lambda self: self._local_protocol)
local_hash = property(lambda self: self._local_hash)
protocol_cache = property(lambda self: self._protocol_cache)
# utility functions to manipulate protocol cache
def get_protocol_cache(self, hash):
return self.protocol_cache.get(hash)
def set_protocol_cache(self, hash, protocol):
self.protocol_cache[hash] = protocol
def respond(self, call_request):
"""
Called by a server to deserialize a request, compute and serialize
a response or error. Compare to 'handle()' in Thrift.
"""
buffer_reader = StringIO(call_request)
buffer_decoder = io.BinaryDecoder(buffer_reader)
buffer_writer = StringIO()
buffer_encoder = io.BinaryEncoder(buffer_writer)
error = None
response_metadata = {}
try:
remote_protocol = self.process_handshake(buffer_decoder, buffer_encoder)
# handshake failure
if remote_protocol is None:
return buffer_writer.getvalue()
# read request using remote protocol
request_metadata = META_READER.read(buffer_decoder)
remote_message_name = buffer_decoder.read_utf8()
# get remote and local request schemas so we can do
# schema resolution (one fine day)
remote_message = remote_protocol.messages.get(remote_message_name)
if remote_message is None:
fail_msg = 'Unknown remote message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
local_message = self.local_protocol.messages.get(remote_message_name)
if local_message is None:
fail_msg = 'Unknown local message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
writers_schema = remote_message.request
readers_schema = local_message.request
request = self.read_request(writers_schema, readers_schema,
buffer_decoder)
# perform server logic
try:
response = self.invoke(local_message, request)
except AvroRemoteException, e:
error = e
except Exception, e:
error = AvroRemoteException(str(e))
# write response using local protocol
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(error is not None)
if error is None:
writers_schema = local_message.response
self.write_response(writers_schema, response, buffer_encoder)
else:
writers_schema = local_message.errors
self.write_error(writers_schema, error, buffer_encoder)
except schema.AvroException, e:
error = AvroRemoteException(str(e))
buffer_encoder = io.BinaryEncoder(StringIO())
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(True)
self.write_error(SYSTEM_ERROR_SCHEMA, error, buffer_encoder)
return buffer_writer.getvalue()
def process_handshake(self, decoder, encoder):
handshake_request = HANDSHAKE_RESPONDER_READER.read(decoder)
handshake_response = {}
# determine the remote protocol
client_hash = handshake_request.get('clientHash')
client_protocol = handshake_request.get('clientProtocol')
remote_protocol = self.get_protocol_cache(client_hash)
if remote_protocol is None and client_protocol is not None:
remote_protocol = protocol.parse(client_protocol)
self.set_protocol_cache(client_hash, remote_protocol)
# evaluate remote's guess of the local protocol
server_hash = handshake_request.get('serverHash')
if self.local_hash == server_hash:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'BOTH'
else:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'CLIENT'
if handshake_response['match'] != 'BOTH':
handshake_response['serverProtocol'] = str(self.local_protocol)
handshake_response['serverHash'] = self.local_hash
HANDSHAKE_RESPONDER_WRITER.write(handshake_response, encoder)
return remote_protocol
def invoke(self, local_message, request):
"""
Aactual work done by server: cf. handler in thrift.
"""
pass
def read_request(self, writers_schema, readers_schema, decoder):
datum_reader = io.DatumReader(writers_schema, readers_schema)
return datum_reader.read(decoder)
def write_response(self, writers_schema, response_datum, encoder):
datum_writer = io.DatumWriter(writers_schema)
datum_writer.write(response_datum, encoder)
def write_error(self, writers_schema, error_exception, encoder):
datum_writer = io.DatumWriter(writers_schema)
datum_writer.write(str(error_exception), encoder)
#
# Utility classes
#
class FramedReader(object):
"""Wrapper around a file-like object to read framed data."""
def __init__(self, reader):
self._reader = reader
# read-only properties
reader = property(lambda self: self._reader)
def read_framed_message(self):
message = []
while True:
buffer = StringIO()
buffer_length = self._read_buffer_length()
if buffer_length == 0:
return ''.join(message)
while buffer.tell() < buffer_length:
chunk = self.reader.read(buffer_length - buffer.tell())
if chunk == '':
raise ConnectionClosedException("Reader read 0 bytes.")
buffer.write(chunk)
message.append(buffer.getvalue())
def _read_buffer_length(self):
read = self.reader.read(BUFFER_HEADER_LENGTH)
if read == '':
raise ConnectionClosedException("Reader read 0 bytes.")
return BIG_ENDIAN_INT_STRUCT.unpack(read)[0]
class FramedWriter(object):
"""Wrapper around a file-like object to write framed data."""
def __init__(self, writer):
self._writer = writer
# read-only properties
writer = property(lambda self: self._writer)
def write_framed_message(self, message):
message_length = len(message)
total_bytes_sent = 0
while message_length - total_bytes_sent > 0:
if message_length - total_bytes_sent > BUFFER_SIZE:
buffer_length = BUFFER_SIZE
else:
buffer_length = message_length - total_bytes_sent
self.write_buffer(message[total_bytes_sent:
(total_bytes_sent + buffer_length)])
total_bytes_sent += buffer_length
# A message is always terminated by a zero-length buffer.
self.write_buffer_length(0)
def write_buffer(self, chunk):
buffer_length = len(chunk)
self.write_buffer_length(buffer_length)
self.writer.write(chunk)
def write_buffer_length(self, n):
self.writer.write(BIG_ENDIAN_INT_STRUCT.pack(n))
#
# Transceiver Implementations
#
class HTTPTransceiver(object):
"""
A simple HTTP-based transceiver implementation.
Useful for clients but not for servers
"""
def __init__(self, host, port, req_resource='/'):
self.req_resource = req_resource
self.conn = httplib.HTTPConnection(host, port)
self.conn.connect()
# read-only properties
sock = property(lambda self: self.conn.sock)
remote_name = property(lambda self: self.sock.getsockname())
# read/write properties
def set_conn(self, new_conn):
self._conn = new_conn
conn = property(lambda self: self._conn, set_conn)
req_resource = '/'
def transceive(self, request):
self.write_framed_message(request)
result = self.read_framed_message()
return result
def read_framed_message(self):
response = self.conn.getresponse()
response_reader = FramedReader(response)
framed_message = response_reader.read_framed_message()
response.read() # ensure we're ready for subsequent requests
return framed_message
def write_framed_message(self, message):
req_method = 'POST'
req_headers = {'Content-Type': 'avro/binary'}
req_body_buffer = FramedWriter(StringIO())
req_body_buffer.write_framed_message(message)
req_body = req_body_buffer.writer.getvalue()
self.conn.request(req_method, self.req_resource, req_body, req_headers)
def close(self):
self.conn.close()
#
# Server Implementations (none yet)
#
| apache-2.0 |
t794104/ansible | lib/ansible/modules/storage/netapp/na_ontap_autosupport.py | 17 | 10134 | #!/usr/bin/python
"""
create Autosupport module to enable, disable or modify
"""
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- "Enable/Disable Autosupport"
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_autosupport
options:
state:
description:
- Specifies whether the AutoSupport daemon is present or absent.
- When this setting is absent, delivery of all AutoSupport messages is turned off.
choices: ['present', 'absent']
default: present
node_name:
description:
- The name of the filer that owns the AutoSupport Configuration.
required: true
transport:
description:
- The name of the transport protocol used to deliver AutoSupport messages
choices: ['http', 'https', 'smtp']
noteto:
description:
- Specifies up to five recipients of short AutoSupport e-mail messages.
post_url:
description:
- The URL used to deliver AutoSupport messages via HTTP POST
mail_hosts:
description:
- List of mail server(s) used to deliver AutoSupport messages via SMTP.
- Both host names and IP addresses may be used as valid input.
support:
description:
- Specifies whether AutoSupport notification to technical support is enabled.
type: bool
from_address:
description:
- specify the e-mail address from which the node sends AutoSupport messages
version_added: 2.8
partner_addresses:
description:
- Specifies up to five partner vendor recipients of full AutoSupport e-mail messages.
version_added: 2.8
to_addresses:
description:
- Specifies up to five recipients of full AutoSupport e-mail messages.
version_added: 2.8
proxy_url:
description:
- specify an HTTP or HTTPS proxy if the 'transport' parameter is set to HTTP or HTTPS and your organization uses a proxy
version_added: 2.8
hostname_in_subject:
description:
- Specify whether the hostname of the node is included in the subject line of the AutoSupport message.
type: bool
version_added: 2.8
short_description: NetApp ONTAP Autosupport
version_added: "2.7"
"""
EXAMPLES = """
- name: Enable autosupport
na_ontap_autosupport:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
state: present
node_name: test
transport: https
noteto: abc@def.com,def@ghi.com
mail_hosts: 1.2.3.4,5.6.7.8
support: False
post_url: url/1.0/post
- name: Disable autosupport
na_ontap_autosupport:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
state: absent
node_name: test
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPasup(object):
"""Class with autosupport methods"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
node_name=dict(required=True, type='str'),
transport=dict(required=False, type='str', choices=['smtp', 'http', 'https']),
noteto=dict(required=False, type='list'),
post_url=dict(reuired=False, type='str'),
support=dict(required=False, type='bool'),
mail_hosts=dict(required=False, type='list'),
from_address=dict(required=False, type='str'),
partner_addresses=dict(required=False, type='list'),
to_addresses=dict(required=False, type='list'),
proxy_url=dict(required=False, type='str'),
hostname_in_subject=dict(required=False, type='bool'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=False
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
# present or absent requires modifying state to enabled or disabled
self.parameters['service_state'] = 'started' if self.parameters['state'] == 'present' else 'stopped'
self.set_playbook_zapi_key_map()
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def set_playbook_zapi_key_map(self):
self.na_helper.zapi_string_keys = {
'node_name': 'node-name',
'transport': 'transport',
'post_url': 'post-url',
'from_address': 'from',
'proxy_url': 'proxy-url'
}
self.na_helper.zapi_list_keys = {
'noteto': ('noteto', 'mail-address'),
'mail_hosts': ('mail-hosts', 'string'),
'partner_addresses': ('partner-address', 'mail-address'),
'to_addresses': ('to', 'mail-address'),
}
self.na_helper.zapi_bool_keys = {
'support': 'is-support-enabled',
'hostname_in_subject': 'is-node-in-subject'
}
def get_autosupport_config(self):
"""
Invoke zapi - get current autosupport details
:return: dict()
"""
asup_details = netapp_utils.zapi.NaElement('autosupport-config-get')
asup_details.add_new_child('node-name', self.parameters['node_name'])
asup_info = dict()
try:
result = self.server.invoke_successfully(asup_details, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='%s' % to_native(error),
exception=traceback.format_exc())
# zapi invoke successful
asup_attr_info = result.get_child_by_name('attributes').get_child_by_name('autosupport-config-info')
asup_info['service_state'] = 'started' if asup_attr_info['is-enabled'] == 'true' else 'stopped'
for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
asup_info[item_key] = asup_attr_info[zapi_key]
for item_key, zapi_key in self.na_helper.zapi_bool_keys.items():
asup_info[item_key] = self.na_helper.get_value_for_bool(from_zapi=True,
value=asup_attr_info[zapi_key])
for item_key, zapi_key in self.na_helper.zapi_list_keys.items():
parent, dummy = zapi_key
asup_info[item_key] = self.na_helper.get_value_for_list(from_zapi=True,
zapi_parent=asup_attr_info.get_child_by_name(parent)
)
return asup_info
def modify_autosupport_config(self, modify):
"""
Invoke zapi - modify autosupport config
@return: NaElement object / FAILURE with an error_message
"""
asup_details = {'node-name': self.parameters['node_name']}
if modify.get('service_state'):
asup_details['is-enabled'] = 'true' if modify.get('service_state') == 'started' else 'false'
asup_config = netapp_utils.zapi.NaElement('autosupport-config-modify')
for item_key in modify:
if item_key in self.na_helper.zapi_string_keys:
zapi_key = self.na_helper.zapi_string_keys.get(item_key)
asup_details[zapi_key] = modify[item_key]
elif item_key in self.na_helper.zapi_bool_keys:
zapi_key = self.na_helper.zapi_bool_keys.get(item_key)
asup_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False,
value=modify[item_key])
elif item_key in self.na_helper.zapi_list_keys:
parent_key, child_key = self.na_helper.zapi_list_keys.get(item_key)
asup_config.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False,
zapi_parent=parent_key,
zapi_child=child_key,
data=modify.get(item_key)))
asup_config.translate_struct(asup_details)
try:
return self.server.invoke_successfully(asup_config, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='%s' % to_native(error), exception=traceback.format_exc())
def autosupport_log(self):
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_autosupport", cserver)
def apply(self):
"""
Apply action to autosupport
"""
current = self.get_autosupport_config()
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
self.modify_autosupport_config(modify)
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""Execute action"""
asup_obj = NetAppONTAPasup()
asup_obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
altairpearl/scikit-learn | sklearn/cluster/bicluster.py | 66 | 19850 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
longmen21/edx-platform | pavelib/paver_tests/test_eslint.py | 16 | 1694 | """
Tests for paver quality tasks
"""
import unittest
from mock import patch
import pavelib.quality
from paver.easy import BuildFailure
class TestPaverESLint(unittest.TestCase):
"""
For testing run_eslint
"""
def setUp(self):
super(TestPaverESLint, self).setUp()
# Mock the paver @needs decorator
self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start()
self._mock_paver_needs.return_value = 0
# Mock shell commands
patcher = patch('pavelib.quality.sh')
self._mock_paver_sh = patcher.start()
# Cleanup mocks
self.addCleanup(patcher.stop)
self.addCleanup(self._mock_paver_needs.stop)
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_eslint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
"""
run_eslint encounters an error parsing the eslint output log
"""
mock_count.return_value = None
with self.assertRaises(BuildFailure):
pavelib.quality.run_eslint("")
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_eslint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
"""
eslint finds violations, but a limit was not set
"""
mock_count.return_value = 1
pavelib.quality.run_eslint("")
| agpl-3.0 |
zouyapeng/horizon-newtouch | openstack_dashboard/dashboards/admin/volumes/volume_types/extras/views.py | 7 | 3624 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.volumes.volume_types.extras \
import forms as project_forms
from openstack_dashboard.dashboards.admin.volumes.volume_types.extras \
import tables as project_tables
class ExtraSpecMixin(object):
def get_context_data(self, **kwargs):
context = super(ExtraSpecMixin, self).get_context_data(**kwargs)
try:
context['vol_type'] = api.cinder.volume_type_get(self.request,
self.kwargs['type_id'])
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve volume type details."))
if 'key' in self.kwargs:
context['key'] = self.kwargs['key']
return context
class IndexView(ExtraSpecMixin, forms.ModalFormMixin, tables.DataTableView):
table_class = project_tables.ExtraSpecsTable
template_name = 'admin/volumes/volume_types/extras/index.html'
def get_data(self):
try:
type_id = self.kwargs['type_id']
extras_list = api.cinder.volume_type_extra_get(self.request,
type_id)
extras_list.sort(key=lambda es: (es.key,))
except Exception:
extras_list = []
exceptions.handle(self.request,
_('Unable to retrieve extra spec list.'))
return extras_list
class CreateView(ExtraSpecMixin, forms.ModalFormView):
form_class = project_forms.CreateExtraSpec
template_name = 'admin/volumes/volume_types/extras/create.html'
def get_initial(self):
return {'type_id': self.kwargs['type_id']}
def get_success_url(self):
return ("/admin/volumes/volume_types/%s/extras/" %
(self.kwargs['type_id']))
class EditView(ExtraSpecMixin, forms.ModalFormView):
form_class = project_forms.EditExtraSpec
template_name = 'admin/volumes/volume_types/extras/edit.html'
success_url = 'horizon:admin:volumes:volume_types:extras:index'
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['type_id'],))
def get_initial(self):
type_id = self.kwargs['type_id']
key = self.kwargs['key']
try:
extra_specs = api.cinder.volume_type_extra_get(self.request,
type_id,
raw=True)
except Exception:
extra_specs = {}
exceptions.handle(self.request,
_('Unable to retrieve volume type extra spec '
'details.'))
return {'type_id': type_id,
'key': key,
'value': extra_specs.get(key, '')}
| apache-2.0 |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/ad_group_error.py | 1 | 1782 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.errors',
marshal='google.ads.googleads.v8',
manifest={
'AdGroupErrorEnum',
},
)
class AdGroupErrorEnum(proto.Message):
r"""Container for enum describing possible ad group errors. """
class AdGroupError(proto.Enum):
r"""Enum describing possible ad group errors."""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_ADGROUP_NAME = 2
INVALID_ADGROUP_NAME = 3
ADVERTISER_NOT_ON_CONTENT_NETWORK = 5
BID_TOO_BIG = 6
BID_TYPE_AND_BIDDING_STRATEGY_MISMATCH = 7
MISSING_ADGROUP_NAME = 8
ADGROUP_LABEL_DOES_NOT_EXIST = 9
ADGROUP_LABEL_ALREADY_EXISTS = 10
INVALID_CONTENT_BID_CRITERION_TYPE_GROUP = 11
AD_GROUP_TYPE_NOT_VALID_FOR_ADVERTISING_CHANNEL_TYPE = 12
ADGROUP_TYPE_NOT_SUPPORTED_FOR_CAMPAIGN_SALES_COUNTRY = 13
CANNOT_ADD_ADGROUP_OF_TYPE_DSA_TO_CAMPAIGN_WITHOUT_DSA_SETTING = 14
PROMOTED_HOTEL_AD_GROUPS_NOT_AVAILABLE_FOR_CUSTOMER = 15
INVALID_EXCLUDED_PARENT_ASSET_FIELD_TYPE = 16
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
markkolich/FrameworkBenchmarks | frameworks/Python/django/hello/hello/wsgi.py | 75 | 1132 | """
WSGI config for hello project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Tools/freeze/makefreeze.py | 46 | 2710 | import marshal
import bkfile
# Write a file containing frozen code for the modules in the dictionary.
header = """
#include "Python.h"
static struct _frozen _PyImport_FrozenModules[] = {
"""
trailer = """\
{0, 0, 0} /* sentinel */
};
"""
# if __debug__ == 0 (i.e. -O option given), set Py_OptimizeFlag in frozen app.
default_entry_point = """
int
main(int argc, char **argv)
{
extern int Py_FrozenMain(int, char **);
""" + ((not __debug__ and """
Py_OptimizeFlag++;
""") or "") + """
PyImport_FrozenModules = _PyImport_FrozenModules;
return Py_FrozenMain(argc, argv);
}
"""
def makefreeze(base, dict, debug=0, entry_point=None, fail_import=()):
if entry_point is None: entry_point = default_entry_point
done = []
files = []
mods = dict.keys()
mods.sort()
for mod in mods:
m = dict[mod]
mangled = "__".join(mod.split("."))
if m.__code__:
file = 'M_' + mangled + '.c'
outfp = bkfile.open(base + file, 'w')
files.append(file)
if debug:
print "freezing", mod, "..."
str = marshal.dumps(m.__code__)
size = len(str)
if m.__path__:
# Indicate package by negative size
size = -size
done.append((mod, mangled, size))
writecode(outfp, mangled, str)
outfp.close()
if debug:
print "generating table of frozen modules"
outfp = bkfile.open(base + 'frozen.c', 'w')
for mod, mangled, size in done:
outfp.write('extern unsigned char M_%s[];\n' % mangled)
outfp.write(header)
for mod, mangled, size in done:
outfp.write('\t{"%s", M_%s, %d},\n' % (mod, mangled, size))
outfp.write('\n')
# The following modules have a NULL code pointer, indicating
# that the prozen program should not search for them on the host
# system. Importing them will *always* raise an ImportError.
# The zero value size is never used.
for mod in fail_import:
outfp.write('\t{"%s", NULL, 0},\n' % (mod,))
outfp.write(trailer)
outfp.write(entry_point)
outfp.close()
return files
# Write a C initializer for a module containing the frozen python code.
# The array is called M_<mod>.
def writecode(outfp, mod, str):
outfp.write('unsigned char M_%s[] = {' % mod)
for i in range(0, len(str), 16):
outfp.write('\n\t')
for c in str[i:i+16]:
outfp.write('%d,' % ord(c))
outfp.write('\n};\n')
## def writecode(outfp, mod, str):
## outfp.write('unsigned char M_%s[%d] = "%s";\n' % (mod, len(str),
## '\\"'.join(map(lambda s: repr(s)[1:-1], str.split('"')))))
| apache-2.0 |
Faiz7412/or-tools | examples/python/young_tableaux.py | 32 | 4055 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Young tableaux in Google CP Solver.
See
http://mathworld.wolfram.com/YoungTableau.html
and
http://en.wikipedia.org/wiki/Young_tableau
'''
The partitions of 4 are
{4}, {3,1}, {2,2}, {2,1,1}, {1,1,1,1}
And the corresponding standard Young tableaux are:
1. 1 2 3 4
2. 1 2 3 1 2 4 1 3 4
4 3 2
3. 1 2 1 3
3 4 2 4
4 1 2 1 3 1 4
3 2 2
4 4 3
5. 1
2
3
4
'''
Thanks to Laurent Perron for improving this model.
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/young_tableaux.mzn
* Choco : http://www.hakank.org/choco/YoungTableuax.java
* JaCoP : http://www.hakank.org/JaCoP/YoungTableuax.java
* Comet : http://www.hakank.org/comet/young_tableaux.co
* Gecode : http://www.hakank.org/gecode/young_tableaux.cpp
* ECLiPSe : http://www.hakank.org/eclipse/young_tableaux.ecl
* Tailor/Essence' : http://www.hakank.org/tailor/young_tableaux.eprime
* SICStus: http://hakank.org/sicstus/young_tableaux.pl
* Zinc: http://hakank.org/minizinc/young_tableaux.zinc
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
import string
from ortools.constraint_solver import pywrapcp
def main(n=5):
# Create the solver.
solver = pywrapcp.Solver("Problem")
#
# data
#
print "n:", n
#
# declare variables
#
x = {}
for i in range(n):
for j in range(n):
x[(i, j)] = solver.IntVar(1, n + 1, "x(%i,%i)" % (i, j))
x_flat = [x[(i, j)] for i in range(n) for j in range(n)]
# partition structure
p = [solver.IntVar(0, n + 1, "p%i" % i) for i in range(n)]
#
# constraints
#
# 1..n is used exactly once
for i in range(1, n + 1):
solver.Add(solver.Count(x_flat, i, 1))
solver.Add(x[(0, 0)] == 1)
# row wise
for i in range(n):
for j in range(1, n):
solver.Add(x[(i, j)] >= x[(i, j - 1)])
# column wise
for j in range(n):
for i in range(1, n):
solver.Add(x[(i, j)] >= x[(i - 1, j)])
# calculate the structure (the partition)
for i in range(n):
# MiniZinc/Zinc version:
# p[i] == sum(j in 1..n) (bool2int(x[i,j] <= n))
b = [solver.IsLessOrEqualCstVar(x[(i, j)], n) for j in range(n)]
solver.Add(p[i] == solver.Sum(b))
solver.Add(solver.Sum(p) == n)
for i in range(1, n):
solver.Add(p[i - 1] >= p[i])
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x_flat)
solution.Add(p)
# db: DecisionBuilder
db = solver.Phase(x_flat + p,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print "p:", [p[i].Value() for i in range(n)]
print "x:"
for i in range(n):
for j in range(n):
val = x_flat[i * n + j].Value()
if val <= n:
print val,
if p[i].Value() > 0:
print
print
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
n = 5
if __name__ == "__main__":
if len(sys.argv) > 1:
n = string.atoi(sys.argv[1])
main(n)
| apache-2.0 |
losfair/MiracleKernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
shiquanwang/pylearn2 | pylearn2/datasets/tests/test_norb.py | 4 | 1679 | """
Unit tests for ./norb.py
"""
import unittest
import numpy
from pylearn2.datasets.norb import SmallNORB
from pylearn2.datasets.norb_small import FoveatedNORB
from pylearn2.utils import safe_zip
from pylearn2.testing.skip import skip_if_no_data
class TestNORB(unittest.TestCase):
def setUp(self):
skip_if_no_data()
def test_foveated_norb(self):
# Test that the FoveatedNORB class can be instantiated
norb_train = FoveatedNORB(which_set="train",
scale=1, restrict_instances=[4, 6, 7, 8],
one_hot=1)
def test_get_topological_view(self):
# This is just to lower the memory usage. Otherwise, the
# buildbot use close to 10G of ram.
norb = SmallNORB('train', stop=1000)
# Get a topological view as a single "(b, s, 0 1, c)" tensor.
topo_tensor = norb.get_topological_view(single_tensor=True)
shape = (norb.X.shape[0], 2) + SmallNORB.original_image_shape + (1, )
expected_topo_tensor = norb.X.reshape(shape)
# We loop to lower the peak memory usage
for i in range(topo_tensor.shape[0]):
assert numpy.all(topo_tensor[i] == expected_topo_tensor[i])
# Get a topological view as two "(b, 0, 1, c)" tensors
topo_tensors = norb.get_topological_view(single_tensor=False)
expected_topo_tensors = tuple(expected_topo_tensor[:, i, ...]
for i in range(2))
for topo_tensor, expected_topo_tensor in safe_zip(
topo_tensors, expected_topo_tensors):
assert numpy.all(topo_tensor == expected_topo_tensor)
| bsd-3-clause |
dkubiak789/OpenUpgrade | openerp/report/render/render.py | 457 | 2524 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Why doing some multi-thread instead of using OSE capabilities ?
# For progress bar.
#
# Add a transparant multi-thread layer to all report rendering layers
#
# TODO: method to stock on the disk
class render(object):
""" Represents a report job being rendered.
@param bin_datas a dictionary of name:<binary content> of images etc.
@param path the path in which binary files can be discovered, useful
for components (images) of the report. It can be:
- a string, relative or absolute path to images
- a list, containing strings of paths.
If a string is absolute path, it will be opened as such, else
it will be passed to tools.file_open() which also considers zip
addons.
Reporting classes must subclass this class and redefine the __init__ and
_render methods (not the other methods).
"""
def __init__(self, bin_datas=None, path='.'):
self.done = False
if bin_datas is None:
self.bin_datas = {}
else:
self.bin_datas = bin_datas
self.path = path
def _render(self):
return None
def render(self):
self.done = False
self._result = self._render()
self.done = True
return True
def is_done(self):
return self.done
def get(self):
if self.is_done():
return self._result
else:
return None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
janusnic/21v-python | unit_05/alive/test.py | 2 | 5183 | import main
def test_get_board():
alive_cons = [(1, 1),
(2, 2),
(3, 1),
(3, 3),
(4, 0)]
board = [[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 1, 0],
[1, 0, 0, 0, 0]]
assert main.get_board(5, alive_cons) == board
class TestGetNeighbors(object):
def test_zero_positive(self):
con = (0, 2)
neighbors = [(-1, 1),
(-1, 2),
(-1, 3),
(0, 1),
(0, 3),
(1, 1),
(1, 2),
(1, 3)]
assert main.get_neighbors(con) == neighbors
def test_zero_zero(self):
con = (0, 0)
neighbors = [(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1)]
assert set(main.get_neighbors(con)) == set(neighbors)
def test_positive_result(self):
con = (5, 5)
neighbors = [(4, 4),
(4, 5),
(4, 6),
(5, 4),
(5, 6),
(6, 4),
(6, 5),
(6, 6)]
assert set(main.get_neighbors(con)) == set(neighbors)
def test_calculate_alive_neighbors():
con = (0, 2)
alive_cons = [(0, 0),
(1, 1),
(1, 2),
(2, 4),
(3, 5),
(0, 3)]
assert main.calculate_alive_neighbors(con, alive_cons) == 3
class TestIsAliveCon(object):
def test_new_con(self):
alive_cons = [(1, 1),
(2, 0),
(2, 2)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is True
def test_alive_con_alive_3_neighbors(self):
alive_cons = [(1, 1),
(2, 0),
(2, 1),
(2, 2)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is True
def test_alive_con_alive_2_neighbors(self):
alive_cons = [(1, 1),
(2, 0),
(2, 1)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is True
def tests_dead_con_few_neighbor(self):
alive_cons = [(1, 1),
(2, 0)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is False
def test_alive_con_few_neighbors(self):
alive_cons = [(1, 1),
(2, 1)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is False
def test_many_neighbors(self):
alive_cons = [(1, 0),
(1, 1),
(2, 0),
(3, 1)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is False
class TestIsCorrectCon(object):
def test_zero_zero(self):
assert main.is_correct_con(5, (0, 0)) is True
def test_zero_positive(self):
assert main.is_correct_con(5, (0, 4)) is True
assert main.is_correct_con(5, (4, 0)) is True
def test_zero_negative(self):
assert main.is_correct_con(5, (0, -1)) is False
assert main.is_correct_con(5, (-1, 0)) is False
def test_negative(self):
assert main.is_correct_con(5, (-1, -6)) is False
assert main.is_correct_con(5, (-6, -1)) is False
assert main.is_correct_con(5, (-6, -6)) is False
assert main.is_correct_con(5, (-1, -1)) is False
def test_posirive(self):
assert main.is_correct_con(5, (1, 1)) is True
def test_over_size(self):
assert main.is_correct_con(5, (1, 5)) is False
assert main.is_correct_con(5, (5, 1)) is False
assert main.is_correct_con(5, (1, 6)) is False
assert main.is_correct_con(5, (6, 6)) is False
def test_negative_over_size(self):
assert main.is_correct_con(5, (-6, 6)) is False
assert main.is_correct_con(5, (6, -6)) is False
assert main.is_correct_con(5, (-1, 6)) is False
assert main.is_correct_con(5, (-1, 6)) is False
def test_correct_cons():
cons = [(1, 1),
(-1, 2),
(1, -1),
(1, 10)]
assert main.correct_cons(5, cons) == [(1, 1)]
class TestNewStep(object):
def test_glader(self):
alive_cons = [(1, 2),
(2, 3),
(3, 1),
(3, 2),
(3, 3)]
new_alive_cons = [(2, 1),
(2, 3),
(3, 2),
(3, 3),
(4, 2)]
assert set(main.new_step(alive_cons)) == set(new_alive_cons)
def test_flasher(self):
alive_cons = [(0, 1),
(1, 1),
(2, 1)]
new_alive_cons = [(1, 0),
(1, 1),
(1, 2)]
assert set(main.new_step(alive_cons)) == set(new_alive_cons) | mit |
N6UDP/cslbot | cslbot/commands/threads.py | 2 | 2008 | # Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import threading
import re
from ..helpers.command import Command
@Command('threads')
def cmd(send, *_):
"""Enumerate threads.
Syntax: {command}
"""
thread_names = []
for x in sorted(threading.enumerate(), key=lambda k: k.name):
res = re.match(r'Thread-(\d+$)', x.name)
if res:
tid = int(res.group(1))
# Handle the main server thread (permanently listed as _worker)
if x._target.__name__ == '_worker':
thread_names.append((tid, "%s running server thread" % x.name))
# Handle the multiprocessing pool worker threads (they don't have names beyond Thread-x)
elif x._target.__module__ == 'multiprocessing.pool':
thread_names.append((tid, "%s running multiprocessing pool worker thread" % x.name))
# Handle everything else including MainThread and deferred threads
else:
res = re.match(r'Thread-(\d+)', x.name)
tid = 0
if res:
tid = int(res.group(1))
thread_names.append((tid, x.name))
for x in sorted(thread_names, key=lambda k: k[0]):
send(x[1])
| gpl-2.0 |
LubosD/darling | src/xar/test/integrity.py | 3 | 2962 | #!/usr/bin/env python
from __future__ import print_function
import os
import os.path
import subprocess
import util
#
# Utility Functions
#
def _test_truncation(filename, path_to_be_archived, bytes_to_chop, *args):
with util.archive_created(filename, path_to_be_archived) as path:
with open("/dev/null", "w") as bitbucket:
size = os.stat(path).st_size
while size > 0:
last_size = size
size = max(0, size - bytes_to_chop)
with open(path, "w+") as f:
f.truncate(size)
with util.directory_created("scratch") as directory:
returncode = subprocess.call(["xar", "-x", "-f", path, "-C", directory], stderr=bitbucket)
assert returncode != 0, "xar claimed to succeed when extracting a truncated archive"
#
# Test Cases
#
def large_uncompressed(filename):
_test_truncation(filename, "/usr/share/man/man1", 1024 * 1024, "--compression=none")
def large_default_compression(filename):
_test_truncation(filename, "/usr/share/man/man1", 1024 * 1024)
def large_gzip_compressed(filename):
util.skip_if_no_compression_support("gzip")
_test_truncation(filename, "/usr/share/man/man1", 1024 * 1024, "--compression=gzip")
def large_bzip2_compressed(filename):
util.skip_if_no_compression_support("bzip2")
_test_truncation(filename, "/usr/share/man/man1", 1024 * 1024, "--compression=bzip2")
def large_lzma_compressed(filename):
util.skip_if_no_compression_support("lzma")
_test_truncation(filename, "/usr/share/man/man1", 1024 * 1024, "--compression=lzma")
# "small" variants use a non-base-2 size to try to catch issues that occur on uneven boundaries
def small_uncompressed(filename):
_test_truncation(filename, "/bin", 43651, "--compression=none")
def small_default_compression(filename):
_test_truncation(filename, "/bin", 43651)
def small_gzip_compressed(filename):
util.skip_if_no_compression_support("gzip")
_test_truncation(filename, "/bin", 43651, "--compression=gzip")
def small_bzip2_compressed(filename):
util.skip_if_no_compression_support("bzip2")
_test_truncation(filename, "/bin", 43651, "--compression=bzip2")
def small_lzma_compressed(filename):
util.skip_if_no_compression_support("lzma")
_test_truncation(filename, "/bin", 43651, "--compression=lzma")
TEST_CASES = (large_uncompressed, large_default_compression,
large_gzip_compressed, large_bzip2_compressed, large_lzma_compressed,
small_uncompressed, small_default_compression,
small_gzip_compressed, small_bzip2_compressed, small_lzma_compressed)
if __name__ == "__main__":
for case in TEST_CASES:
try:
case("{f}.xar".format(f=case.func_name))
print("PASSED: {f}".format(f=case.func_name))
except (AssertionError, IOError, subprocess.CalledProcessError):
import sys, os
print("FAILED: {f}".format(f=case.func_name))
sys.excepthook(*sys.exc_info())
print("")
except util.TestCaseSkipError, e:
print("SKIPPED: {f}: {m}".format(f=case.func_name, m=e.message))
| gpl-3.0 |
osstech-jp/samba | third_party/waf/wafadmin/py3kfixes.py | 32 | 3876 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2009 (ita)
"""
Fixes for py3k go here
"""
import os
all_modifs = {}
def modif(dir, name, fun):
if name == '*':
lst = []
for y in '. Tools 3rdparty'.split():
for x in os.listdir(os.path.join(dir, y)):
if x.endswith('.py'):
lst.append(y + os.sep + x)
#lst = [y + os.sep + x for x in os.listdir(os.path.join(dir, y)) for y in '. Tools 3rdparty'.split() if x.endswith('.py')]
for x in lst:
modif(dir, x, fun)
return
filename = os.path.join(dir, name)
f = open(filename, 'r')
txt = f.read()
f.close()
txt = fun(txt)
f = open(filename, 'w')
f.write(txt)
f.close()
def subst(filename):
def do_subst(fun):
global all_modifs
try:
all_modifs[filename] += fun
except KeyError:
all_modifs[filename] = [fun]
return fun
return do_subst
@subst('Constants.py')
def r1(code):
code = code.replace("'iluvcuteoverload'", "b'iluvcuteoverload'")
code = code.replace("ABI=7", "ABI=37")
return code
@subst('Tools/ccroot.py')
def r2(code):
code = code.replace("p.stdin.write('\\n')", "p.stdin.write(b'\\n')")
code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")')
return code
@subst('Utils.py')
def r3(code):
code = code.replace("m.update(str(lst))", "m.update(str(lst).encode())")
code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")')
return code
@subst('ansiterm.py')
def r33(code):
code = code.replace('unicode', 'str')
return code
@subst('Task.py')
def r4(code):
code = code.replace("up(self.__class__.__name__)", "up(self.__class__.__name__.encode())")
code = code.replace("up(self.env.variant())", "up(self.env.variant().encode())")
code = code.replace("up(x.parent.abspath())", "up(x.parent.abspath().encode())")
code = code.replace("up(x.name)", "up(x.name.encode())")
code = code.replace('class TaskBase(object):\n\t__metaclass__=store_task_type', 'import binascii\n\nclass TaskBase(object, metaclass=store_task_type):')
code = code.replace('keys=self.cstr_groups.keys()', 'keys=list(self.cstr_groups.keys())')
code = code.replace("sig.encode('hex')", 'binascii.hexlify(sig)')
code = code.replace("os.path.join(Options.cache_global,ssig)", "os.path.join(Options.cache_global,ssig.decode())")
return code
@subst('Build.py')
def r5(code):
code = code.replace("cPickle.dump(data,file,-1)", "cPickle.dump(data,file)")
code = code.replace('for node in src_dir_node.childs.values():', 'for node in list(src_dir_node.childs.values()):')
return code
@subst('*')
def r6(code):
code = code.replace('xrange', 'range')
code = code.replace('iteritems', 'items')
code = code.replace('maxint', 'maxsize')
code = code.replace('iterkeys', 'keys')
code = code.replace('Error,e:', 'Error as e:')
code = code.replace('Exception,e:', 'Exception as e:')
return code
@subst('TaskGen.py')
def r7(code):
code = code.replace('class task_gen(object):\n\t__metaclass__=register_obj', 'class task_gen(object, metaclass=register_obj):')
return code
@subst('Tools/python.py')
def r8(code):
code = code.replace('proc.communicate()[0]', 'proc.communicate()[0].decode("utf-8")')
return code
@subst('Tools/glib2.py')
def r9(code):
code = code.replace('f.write(c)', 'f.write(c.encode("utf-8"))')
return code
@subst('Tools/config_c.py')
def r10(code):
code = code.replace("key=kw['success']", "key=kw['success']\n\t\t\t\ttry:\n\t\t\t\t\tkey=key.decode('utf-8')\n\t\t\t\texcept:\n\t\t\t\t\tpass")
code = code.replace('out=str(out)','out=out.decode("utf-8")')
code = code.replace('err=str(err)','err=err.decode("utf-8")')
return code
@subst('Tools/d.py')
def r11(code):
code = code.replace('ret.strip()', 'ret.strip().decode("utf-8")')
return code
def fixdir(dir):
global all_modifs
for k in all_modifs:
for v in all_modifs[k]:
modif(os.path.join(dir, 'wafadmin'), k, v)
#print('substitutions finished')
| gpl-3.0 |
SexualHealthInnovations/callisto-core | callisto_core/tests/test_base.py | 2 | 9682 | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.test import TestCase
from django.urls import reverse
from callisto_core.accounts.models import Account
from callisto_core.delivery import models
from callisto_core.notification.models import EmailNotification
User = get_user_model()
class ReportAssertionHelper(object):
def assert_report_exists(self):
return bool(models.Report.objects.filter(pk=self.report.pk).count())
class ReportPostHelper(object):
valid_statuses = [200, 301, 302]
username = "demo"
password = "demo"
def client_post_login(self):
self.user = User.objects.create_user(
username=self.username, password=self.password
)
url = reverse("login")
if (
"callisto_core.accounts.auth.EncryptedBackend"
in settings.AUTHENTICATION_BACKENDS
):
from hashlib import sha256
import bcrypt
from callisto_core.accounts.auth import index
userhash = sha256(self.username.lower().encode("utf-8")).hexdigest()
usercrypt = bcrypt.hashpw(userhash.encode("utf-8"), bcrypt.gensalt())
userindex = index(userhash)
self.userhash = userhash
Account.objects.create(
user=self.user,
site_id=1,
encrypted_username=usercrypt.decode(),
username_index=userindex,
)
data = {"username": self.userhash, "password": self.password}
else:
data = {"username": self.username, "password": self.password}
Account.objects.create(user=self.user, site_id=1)
response = self.client.post(url, data, follow=True)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_get_report_creation(self):
url = reverse("report_new")
response = self.client.get(url)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_post_report_creation(self):
self.client_get_report_creation()
url = reverse("report_new")
data = {"key": self.passphrase, "key_confirmation": self.passphrase}
response = self.client.post(url, data, follow=True)
self.report = response.context["report"]
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_get_report_delete(self):
url = reverse("report_delete", kwargs={"uuid": self.report.uuid})
response = self.client.get(url)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_post_report_delete(self):
self.client_get_report_delete()
url = reverse("report_delete", kwargs={"uuid": self.report.uuid})
data = {"key": self.passphrase}
response = self.client.post(url, data, follow=True)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_post_report_pdf_view(self, skip_assertions=False):
url = reverse("report_pdf_view", kwargs={"uuid": self.report.uuid})
data = {"key": self.passphrase}
response = self.client.post(url, data, follow=True)
if not skip_assertions:
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_get_review(self):
url = reverse(
"report_update", kwargs={"uuid": self.report.uuid, "step": "done"}
)
response = self.client.get(url)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_post_answer_question(self):
url = reverse("report_update", kwargs={"uuid": self.report.uuid, "step": "0"})
self.data = {"question_3": "blanket ipsum pillowfight"}
response = self.client.post(url, self.data, follow=True)
self.assertIn(response.status_code, self.valid_statuses)
self.report.refresh_from_db()
self.assertEqual(
self.decrypted_report["data"]["question_3"], self.data["question_3"]
)
return response
def client_post_answer_second_page_question(self):
url = reverse("report_update", kwargs={"uuid": self.report.uuid, "step": "1"})
self.data = {"question_2": "cupcake ipsum catsmeow"}
response = self.client.post(url, self.data, follow=True)
self.assertIn(response.status_code, self.valid_statuses)
self.report.refresh_from_db()
self.assertEqual(
self.decrypted_report["data"]["question_2"], self.data["question_2"]
)
return response
def client_post_report_access(self, url):
response = self.client.post(url, data={"key": self.passphrase}, follow=True)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_post_report_prep(self):
self.report_contact_email = self.school_email
self.report_contact_phone = "555-555-5555"
response = self.client.post(
reverse("reporting_prep", kwargs={"uuid": self.report.uuid}),
data={
"contact_email": self.report_contact_email,
"contact_phone": self.report_contact_phone,
},
follow=True,
)
self.report.refresh_from_db()
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_get_matching_enter_empty(self):
url = reverse("reporting_matching_enter", kwargs={"uuid": self.report.uuid})
response = self.client.get(url)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_post_matching_enter_empty(self):
self.client_get_matching_enter_empty()
url = reverse("reporting_matching_enter", kwargs={"uuid": self.report.uuid})
data = {"facebook_identifier": ""}
response = self.client.post(url, data, follow=True)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_get_matching_enter(self):
url = reverse("matching_enter", kwargs={"uuid": self.report.uuid})
response = self.client.get(url)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_post_matching_withdraw(self):
url = reverse("matching_withdraw", kwargs={"uuid": self.report.uuid})
data = {"key": self.passphrase}
response = self.client.post(url, data, follow=True)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_post_matching_enter(
self, identifier="https://www.facebook.com/callistoorg"
):
url = reverse("matching_enter", kwargs={"uuid": self.report.uuid})
data = {"facebook_identifier": identifier}
response = self.client.post(url, data, follow=True)
self.assertIn(response.status_code, self.valid_statuses)
return response
def client_post_reporting_end_step(self):
response = self.client.post(
reverse("reporting_end_step", kwargs={"uuid": self.report.uuid}),
data={"confirmation": True, "key": self.passphrase},
follow=True,
)
self.assertIn(response.status_code, self.valid_statuses)
return response
class ReportFlowHelper(TestCase, ReportPostHelper, ReportAssertionHelper):
passphrase = "super secret"
school_email = "HUMAN_STUDENT_TOTALLY_NOT_A_WOLF@example.edu"
fixtures = ["wizard_builder_data", "callisto_core_notification_data"]
@property
def decrypted_report(self):
return self.report.decrypt_record(self.passphrase)
def setUp(self):
self._setup_sites()
self._setup_user()
def _setup_user(self):
username = "testing_122"
self.user = User.objects.create_user(username=username, password="testing_12")
if (
"callisto_core.accounts.auth.EncryptedBackend"
in settings.AUTHENTICATION_BACKENDS
):
from hashlib import sha256
import bcrypt
from callisto_core.accounts.auth import index
userhash = sha256(username.lower().encode("utf-8")).hexdigest()
usercrypt = bcrypt.hashpw(userhash.encode("utf-8"), bcrypt.gensalt())
userindex = index(userhash)
Account.objects.create(
user=self.user,
site_id=1,
school_email=self.school_email,
encrypted_username=usercrypt.decode(),
username_index=userindex,
)
self.client.login(username=userhash, password="testing_12")
self.userhash = userhash
else:
Account.objects.create(
user=self.user, site_id=1, school_email=self.school_email
)
self.client.login(username=username, password="testing_12")
def _setup_sites(self):
self.site = Site.objects.get(id=1)
self.site.domain = "testserver"
self.site.save()
def client_clear_passphrase(self):
session = self.client.session
session["passphrases"] = {}
session.save()
self.assertEqual(self.client.session.get("passphrases"), {})
def client_set_passphrase(self):
session = self.client.session
passphrases = session.get("passphrases", {})
passphrases[str(self.report.uuid)] = self.passphrase
session["passphrases"] = passphrases
session.save()
| agpl-3.0 |
m4nh/roars | scripts/nodes/examples/tf_obj_detector_example.py | 1 | 2868 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from roars.rosutils.rosnode import RosNode
from roars.vision.cameras import CameraRGB
from roars.detections.prediction import prediction
from roars.detections.tensorflow_detector_wrapper import tensorflow_detector_wrapper
from roars.gui import cv_show_detection
import cv2
import numpy as np
import argparse
from random import randint
from rospy.numpy_msg import numpy_msg
from rospy_tutorials.msg import Floats
if __name__=='__main__':
parser = argparse.ArgumentParser(description="Load an inference graph and use it on the input images")
parser.add_argument('-g','--graph',help="path to the pb file with the graph and weight definition",required=True)
parser.add_argument('-l','--labelMap',help="path to the pbtxt containing the label definition",required=True)
parser.add_argument('-v','--visualization',help="flag to enable visualization (only for debug)",action='store_true')
args = parser.parse_args()
#create tensorflow wrapper class
detector = tensorflow_detector_wrapper(args.graph,args.labelMap)
if args.visualization:
classes = detector.getClassDictionary()
c_map = cv_show_detection.getColorMap(classes)
#node creation
node = RosNode("tf_obj_detector")
#⬢⬢⬢⬢⬢➤ Sets HZ from parameters
node.setHz(node.setupParameter("hz", 60))
#create publisher
prediction_topic = node.setupParameter(
"prediction_topic",
"/detections"
)
publisher = node.createPublisher(prediction_topic,numpy_msg(Floats))
#⬢⬢⬢⬢⬢➤ Creates Camera Proxy
camera_topic = node.setupParameter(
"camera_topic",
"/camera/rgb/image_raw/compressed"
)
camera_file = node.getFileInPackage(
'roars',
'data/camera_calibrations/asus_xtion.yml'
)
camera = CameraRGB(
configuration_file=camera_file,
rgb_topic=camera_topic,
compressed_image="compressed" in camera_topic
)
#⬢⬢⬢⬢⬢➤ NODE
def image_callback(frame):
#grab image from frame
img = frame.rgb_image.copy()
#convert to rgb
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#detection
predictions = detector.detect(img_rgb)
#convert predictions in a matrx with each row representing a different detection
msg = prediction.toMatrix(predictions)
#publish the detction
#TODO: add timestamp to stay in sincro with frames?
publisher.publish(msg)
if args.visualization:
immy = cv_show_detection.draw_prediction(img_rgb,predictions,c_map)
cv2.imshow('detections',immy)
cv2.waitKey(1)
camera.registerUserCallabck(image_callback)
#⬢⬢⬢⬢⬢➤ Main Loop
while node.isActive():
node.tick()
| gpl-3.0 |
Hasky0911/Maoyan | node_modules/node-gyp/gyp/tools/pretty_sln.py | 1831 | 5099 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
le9i0nx/ansible | contrib/inventory/gce.py | 47 | 18346 | #!/usr/bin/env python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
Version: 0.0.3
'''
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
import argparse
from time import time
if sys.version_info >= (3, 0):
import configparser
else:
import ConfigParser as configparser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
try:
import json
except ImportError:
import simplejson as json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
sys.exit("GCE inventory script requires libcloud >= 0.13")
class CloudInventoryCache(object):
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
cache_max_age=300):
cache_dir = os.path.expanduser(cache_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = os.path.join(cache_dir, cache_name)
self.cache_max_age = cache_max_age
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data)
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True
class GceInventory(object):
def __init__(self):
# Cache object
self.cache = None
# dictionary containing inventory read from disk
self.inventory = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.driver = self.get_gce_driver()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Cache management
start_inventory_time = time()
cache_used = False
if self.args.refresh_cache or not self.cache.is_valid():
self.do_api_calls_update_cache()
else:
self.load_inventory_from_cache()
cache_used = True
self.inventory['_meta']['stats'] = {'use_cache': True}
self.inventory['_meta']['stats'] = {
'inventory_load_time': time() - start_inventory_time,
'cache_used': cache_used
}
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.inventory['_meta']['hostvars'][self.args.host],
pretty=self.args.pretty))
else:
# Otherwise, assume user wants all instances grouped
zones = self.parse_env_zones()
print(self.json_format_dict(self.inventory,
pretty=self.args.pretty))
sys.exit(0)
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = configparser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(secrets.GCE_PARAMS)
kwargs = secrets.GCE_KEYWORD_PARAMS
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = [
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
return gce
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
'gce_subnetwork': subnet,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ssh_host
}
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data
def list_nodes(self):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
self.driver.connection.gce_params = params
all_nodes.extend(self.driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
# To avoid making multiple requests per zone
# we list all nodes and then filter the results
if zones and zone not in zones:
continue
if zone in groups:
groups[zone].append(name)
else:
groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if tag in groups:
groups[tag].append(name)
else:
groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if net in groups:
groups[net].append(name)
else:
groups[net] = [name]
machine_type = node.size
if machine_type in groups:
groups[machine_type].append(name)
else:
groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if image in groups:
groups[image].append(name)
else:
groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
for private_ip in node.private_ips:
groups[private_ip] = [name]
if len(node.public_ips) >= 1:
for public_ip in node.public_ips:
groups[public_ip] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
if __name__ == '__main__':
GceInventory()
| gpl-3.0 |
evernym/plenum | plenum/server/router.py | 2 | 3859 | from collections import deque, OrderedDict
from inspect import isawaitable
from typing import Callable, Any, NamedTuple, Union, Iterable
from typing import Tuple
from stp_core.common.log import getlogger
logger = getlogger()
Route = Tuple[Union[type, NamedTuple], Callable]
class Router:
"""
A simple router.
Routes messages to functions based on their type.
Constructor takes an iterable of tuples of
(1) a class type and
(2) a function that handles the message
"""
def __init__(self, *routes: Route):
"""
Create a new router with a list of routes
:param routes: each route is a tuple of a type and a callable, so that
the router knows which callable to invoke when presented with an object
of a particular type.
"""
self.routes = OrderedDict(routes)
def add(self, route: Route):
k, v = route
self.routes[k] = v
def extend(self, routes: Iterable[Route]):
for r in routes:
self.add(r)
def remove(self, routes: Iterable[Route]):
for k in routes:
self.routes.pop(k, None)
def getFunc(self, o: Any) -> Callable:
"""
Get the next function from the list of routes that is capable of
processing o's type.
:param o: the object to process
:return: the next function
"""
for cls, func in self.routes.items():
if isinstance(o, cls):
return func
logger.error("Unhandled msg {}, available handlers are:".format(o))
for cls in self.routes.keys():
logger.error(" {}".format(cls))
raise RuntimeError("unhandled msg: {}".format(o))
# noinspection PyCallingNonCallable
def handleSync(self, msg: Any) -> Any:
"""
Pass the message as an argument to the function defined in `routes`.
If the msg is a tuple, pass the values as multiple arguments to the function.
:param msg: tuple of object and callable
"""
# If a plain python tuple and not a named tuple, a better alternative
# would be to create a named entity with the 3 characteristics below
# TODO: non-obvious tuple, re-factor!
if isinstance(msg, tuple) and len(
msg) == 2 and not hasattr(msg, '_field_types'):
return self.getFunc(msg[0])(*msg)
else:
return self.getFunc(msg)(msg)
async def handle(self, msg: Any) -> Any:
"""
Handle both sync and async functions.
:param msg: a message
:return: the result of execution of the function corresponding to this message's type
"""
res = self.handleSync(msg)
if isawaitable(res):
return await res
else:
return res
async def handleAll(self, deq: deque, limit=None) -> int:
"""
Handle all items in a deque. Can call asynchronous handlers.
:param deq: a deque of items to be handled by this router
:param limit: the number of items in the deque to the handled
:return: the number of items handled successfully
"""
count = 0
while deq and (not limit or count < limit):
count += 1
item = deq.popleft()
await self.handle(item)
return count
def handleAllSync(self, deq: deque, limit=None) -> int:
"""
Synchronously handle all items in a deque.
:param deq: a deque of items to be handled by this router
:param limit: the number of items in the deque to the handled
:return: the number of items handled successfully
"""
count = 0
while deq and (not limit or count < limit):
count += 1
msg = deq.popleft()
self.handleSync(msg)
return count
| apache-2.0 |
amcat/amcat | amcat/tools/association.py | 1 | 8749 | ##########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
An association represents a table of conditional probabilities for a set
of SearchQuery objects.
"""
import datetime
from collections import namedtuple, defaultdict
from functools import partial
from itertools import product
from amcat.tools import amcates
from amcat.tools.caching import cached
from . import dot
ArticleScore = namedtuple("ArticleScore", ["id", "query", "interval", "score"])
ArticleAssociation = namedtuple("ArticleAssociation", ["interval", "probability", "of", "given"])
def format_func(f):
def ff(float_or_string):
if isinstance(float_or_string, str):
return float_or_string
return f(float_or_string)
return ff
FORMATS = {
"0.12": format_func("{:1.2f}".format),
"0.123": format_func("{:1.3f}".format),
"12%": format_func(lambda p: "{:1.0f}%".format(p*100)),
"12.3%": format_func(lambda p: "{:1.1f}%".format(p*100))
}
INTERVALS = {None, "year", "month", "week"}
def get_trivial_weight(result):
return 1.0
def get_asymptotic_weight(result):
"""
Asymptotic quadratic weight function.
@type result: SearchResult
"""
return 1 - (0.5 ** result.score)
def get_node(queries, query):
nid = "node_{}_{}".format(len(queries), query.label)
return dot.Node(nid, query.label)
class Intervals(object):
@classmethod
def trivial(cls, date):
return None
@classmethod
def year(cls, date):
return "{date.year}-01-01".format(date=date)
@classmethod
def month(cls, date):
return "{date.year}-{date.month:02}-01".format(date=date)
@classmethod
def quarter(cls, date):
quarter = 1 + (date.month-1) / 3
return "{date.year}-{quarter}".format(date=date, quarter=quarter)
@classmethod
def week(cls, date):
date = date + datetime.timedelta(days=-date.weekday())
return "{date.year}-{date.month:02}-{date.day:02}".format(date=date)
@classmethod
def get(cls, interval):
if interval not in INTERVALS:
error_msg = "{} is not a valid interval. Choose from: {}"
raise ValueError(error_msg.format(interval, INTERVALS))
if interval is None:
return cls.trivial
return getattr(cls, interval)
class Association(object):
"""
"""
def __init__(self, queries, filters, interval=None, weighted=False):
"""
@type queries: [SearchQuery]
@type interval: str
@type weighted: bool
"""
self.interval = interval
self.weighted = weighted
self.queries = queries
self.filters = filters
self.fields = ["date"] if interval else []
self.elastic_api = amcates.ES()
self.score_func = get_trivial_weight
self.interval_func = Intervals.get(interval)
if weighted:
self.score_func = get_asymptotic_weight
def _get_query_arguments(self, query):
return {
"score": self.weighted,
"_source": self.fields,
"filters": self.filters,
"query": query.query
}
def _get_query(self, query):
return self.elastic_api.query_all(**self._get_query_arguments(query))
def _get_scores(self):
# Ideally, we would like to use elastic aggregations for the
# intervals, but we need scores simultaneously so we can't.
for query in self.queries:
for a in self._get_query(query):
interval = self.interval_func(getattr(a, "date", None))
yield ArticleScore(a.id, query, interval, self.score_func(a))
@cached
def get_scores(self):
return tuple(self._get_scores())
@cached
def get_intervals(self):
return sorted(set(s.interval for s in self.get_scores()))
@cached
def get_queries(self):
return tuple(sorted(self.queries, key=lambda q: q.label))
def _get_conditional_probabilities(self):
"""
@return: [ArticleAssociation]
"""
probs = defaultdict(partial(defaultdict, dict))
for aid, query, interval, score in self.get_scores():
probs[interval][query][aid] = score
for interval, queries in probs.items():
for query1, query2 in product(queries, queries):
sumprob1 = sum(queries[query1].values())
if query1 == query2:
yield ArticleAssociation(interval, 1.0, query1, query2)
continue
if sumprob1 == 0:
yield ArticleAssociation(interval, "-", query1, query2)
continue
sumprob2 = 0
for aid, p1 in queries[query1].items():
try:
sumprob2 += p1 * queries[query2][aid]
except KeyError:
continue
# probability of given
yield ArticleAssociation(interval, sumprob2 / sumprob1, query1, query2)
@cached
def get_conditional_probabilities(self):
return tuple(self._get_conditional_probabilities())
def _get_table(self, format):
# Get conditional probabilities and sort on interval / query labels
probs = self.get_conditional_probabilities()
probs = sorted(probs, key=lambda aa: (aa.interval, aa.of.label, aa.given.label))
for interval, probability, of, given in probs:
if of == given:
continue
yield interval, of, given, format(probability)
def get_table(self, format=str):
"""Render associations as table.
@param format: function which renders probabilities. Should take a float and return a string
@return: (headers, rows)"""
return ["Interval", "From", "To", "Association"], self._get_table(format)
def _get_crosstable(self, probs, format):
yield ("",) + self.get_queries()
for q1 in self.get_queries():
yield (q1,) + tuple(format(probs[q1][q2]) for q2 in self.get_queries())
def get_crosstables(self, format=str):
probabilities = defaultdict(partial(defaultdict, partial(defaultdict, lambda: "-")))
for aa in self.get_conditional_probabilities():
probabilities[aa.interval][aa.of][aa.given] = aa.probability
for interval, probs in sorted(probabilities.items()):
yield (interval, self._get_crosstable(probs, format))
def _get_graph(self, format, threshold, include_labels, associations):
graph = dot.Graph()
queries = self.get_queries()
nodes = {query: get_node(queries, query) for query in queries}
for _, p, of, given in associations:
if isinstance(p, str) or p <= threshold or of == given or p == 0.0:
continue
label = format(p) if include_labels else ""
graph.addEdge(nodes[of], nodes[given], weight=1+10*p, label=label)
graph.normalizeWeights()
return graph
def get_graphs(self, format=str, threshold=-1, include_labels=False):
probs = defaultdict(list)
for aa in self.get_conditional_probabilities():
probs[aa.interval].append(aa)
for interval, aas in sorted(probs.items()):
yield interval, self._get_graph(format, threshold, include_labels, aas)
| agpl-3.0 |
cbertinato/pandas | pandas/core/reshape/pivot.py | 1 | 21644 | import numpy as np
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import is_integer_dtype, is_list_like, is_scalar
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
import pandas.core.common as com
from pandas.core.frame import _shared_docs
from pandas.core.groupby import Grouper
from pandas.core.index import Index, MultiIndex, _get_objs_combined_axis
from pandas.core.reshape.concat import concat
from pandas.core.reshape.util import cartesian_product
from pandas.core.series import Series
# Note: We need to make sure `frame` is imported before `pivot`, otherwise
# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency
@Substitution('\ndata : DataFrame')
@Appender(_shared_docs['pivot_table'], indents=1)
def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
fill_value=None, margins=False, dropna=True,
margins_name='All', observed=False):
index = _convert_by(index)
columns = _convert_by(columns)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(data, values=values, index=index,
columns=columns,
fill_value=fill_value, aggfunc=func,
margins=margins, dropna=dropna,
margins_name=margins_name,
observed=observed)
pieces.append(table)
keys.append(getattr(func, '__name__', func))
return concat(pieces, keys=keys, axis=1)
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys, observed=observed)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
agged = agged.dropna(how='all')
# gh-21133
# we want to down cast if
# the original values are ints
# as we grouped with a NaN value
# and then dropped, coercing to floats
for v in values:
if (v in data and is_integer_dtype(data[v]) and
v in agged and not is_integer_dtype(agged[v])):
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
if table.index.nlevels > 1:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[:len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
from pandas import MultiIndex
if table.index.nlevels > 1:
m = MultiIndex.from_arrays(cartesian_product(table.index.levels),
names=table.index.names)
table = table.reindex(m, axis=0)
if table.columns.nlevels > 1:
m = MultiIndex.from_arrays(cartesian_product(table.columns.levels),
names=table.columns.names)
table = table.reindex(m, axis=1)
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value, downcast='infer')
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(table, data, values, rows=index,
cols=columns, aggfunc=aggfunc,
observed=dropna,
margins_name=margins_name, fill_value=fill_value)
# discard the top level
if (values_passed and not values_multi and not table.empty and
(table.columns.nlevels > 1)):
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how='all', axis=1)
return table
def _add_margins(table, data, values, rows, cols, aggfunc,
observed=None, margins_name='All', fill_value=None):
if not isinstance(margins_name, str):
raise ValueError('margins_name argument must be a string')
msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(msg)
grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name)
# could be passed a Series object with no 'columns'
if hasattr(table, 'columns'):
for level in table.columns.names[1:]:
if margins_name in table.columns.get_level_values(level):
raise ValueError(msg)
if len(rows) > 1:
key = (margins_name,) + ('',) * (len(rows) - 1)
else:
key = margins_name
if not values and isinstance(table, ABCSeries):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
return table.append(Series({key: grand_margin[margins_name]}))
if values:
marginal_result_set = _generate_marginal_results(table, data, values,
rows, cols, aggfunc,
observed,
grand_margin,
margins_name)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
marginal_result_set = _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc, observed, margins_name)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
row_margin = row_margin.reindex(result.columns, fill_value=fill_value)
# populate grand margin
for k in margin_keys:
if isinstance(k, str):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
from pandas import DataFrame
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
try:
for dtype in set(result.dtypes):
cols = result.select_dtypes([dtype]).columns
margin_dummy[cols] = margin_dummy[cols].astype(dtype)
result = result.append(margin_dummy)
except TypeError:
# we cannot reshape, so coerce the axis
result.index = result.index._to_safe_for_reshape()
result = result.append(margin_dummy)
result.index.names = row_names
return result
def _compute_grand_margin(data, values, aggfunc,
margins_name='All'):
if values:
grand_margin = {}
for k, v in data[values].iteritems():
try:
if isinstance(aggfunc, str):
grand_margin[k] = getattr(v, aggfunc)()
elif isinstance(aggfunc, dict):
if isinstance(aggfunc[k], str):
grand_margin[k] = getattr(v, aggfunc[k])()
else:
grand_margin[k] = aggfunc[k](v)
else:
grand_margin[k] = aggfunc(v)
except TypeError:
pass
return grand_margin
else:
return {margins_name: aggfunc(data.index)}
def _generate_marginal_results(table, data, values, rows, cols, aggfunc,
observed,
grand_margin,
margins_name='All'):
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
margin_keys = []
def _all_key(key):
return (key, margins_name) + ('',) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows + values].groupby(
rows, observed=observed).agg(aggfunc)
cat_axis = 1
for key, piece in table.groupby(level=0,
axis=cat_axis,
observed=observed):
all_key = _all_key(key)
# we are going to mutate this, so need to copy!
piece = piece.copy()
try:
piece[all_key] = margin[key]
except TypeError:
# we cannot reshape, so coerce the axis
piece.set_axis(piece._get_axis(
cat_axis)._to_safe_for_reshape(),
axis=cat_axis, inplace=True)
piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
else:
margin = grand_margin
cat_axis = 0
for key, piece in table.groupby(level=0,
axis=cat_axis,
observed=observed):
all_key = _all_key(key)
table_pieces.append(piece)
table_pieces.append(Series(margin[key], index=[all_key]))
margin_keys.append(all_key)
result = concat(table_pieces, axis=cat_axis)
if len(rows) == 0:
return result
else:
result = table
margin_keys = table.columns
if len(cols) > 0:
row_margin = data[cols + values].groupby(
cols, observed=observed).agg(aggfunc)
row_margin = row_margin.stack()
# slight hack
new_order = [len(cols)] + list(range(len(cols)))
row_margin.index = row_margin.index.reorder_levels(new_order)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc,
observed, margins_name='All'):
if len(cols) > 0:
# need to "interleave" the margins
margin_keys = []
def _all_key():
if len(cols) == 1:
return margins_name
return (margins_name, ) + ('', ) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows].groupby(rows,
observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
else:
margin = data.groupby(level=0,
axis=0,
observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
return result
else:
result = table
margin_keys = table.columns
if len(cols):
row_margin = data[cols].groupby(cols, observed=observed).apply(aggfunc)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _convert_by(by):
if by is None:
by = []
elif (is_scalar(by) or
isinstance(by, (np.ndarray, Index, ABCSeries, Grouper)) or
hasattr(by, '__call__')):
by = [by]
else:
by = list(by)
return by
@Substitution('\ndata : DataFrame')
@Appender(_shared_docs['pivot'], indents=1)
def pivot(data, index=None, columns=None, values=None):
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = data.set_index(cols, append=append)
else:
if index is None:
index = data.index
else:
index = data[index]
index = MultiIndex.from_arrays([index, data[columns]])
if is_list_like(values) and not isinstance(values, tuple):
# Exclude tuple because it is seen as a single column name
indexed = data._constructor(data[values].values, index=index,
columns=values)
else:
indexed = data._constructor_sliced(data[values].values,
index=index)
return indexed.unstack(columns)
def crosstab(index, columns, values=None, rownames=None, colnames=None,
aggfunc=None, margins=False, margins_name='All', dropna=True,
normalize=False):
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
.. versionadded:: 0.18.1
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
rownames = _get_names(index, rownames, prefix='row')
colnames = _get_names(columns, colnames, prefix='col')
common_idx = _get_objs_combined_axis(index + columns, intersect=True,
sort=False)
data = {}
data.update(zip(rownames, index))
data.update(zip(colnames, columns))
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
from pandas import DataFrame
df = DataFrame(data, index=common_idx)
if values is None:
df['__dummy__'] = 0
kwargs = {'aggfunc': len, 'fill_value': 0}
else:
df['__dummy__'] = values
kwargs = {'aggfunc': aggfunc}
table = df.pivot_table('__dummy__', index=rownames, columns=colnames,
margins=margins, margins_name=margins_name,
dropna=dropna, **kwargs)
# Post-process
if normalize is not False:
table = _normalize(table, normalize=normalize, margins=margins,
margins_name=margins_name)
return table
def _normalize(table, normalize, margins, margins_name='All'):
if not isinstance(normalize, (bool, str)):
axis_subs = {0: 'index', 1: 'columns'}
try:
normalize = axis_subs[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
if margins is False:
# Actual Normalizations
normalizers = {
'all': lambda x: x / x.sum(axis=1).sum(axis=0),
'columns': lambda x: x / x.sum(),
'index': lambda x: x.div(x.sum(axis=1), axis=0)
}
normalizers[True] = normalizers['all']
try:
f = normalizers[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
table = f(table)
table = table.fillna(0)
elif margins is True:
column_margin = table.loc[:, margins_name].drop(margins_name)
index_margin = table.loc[margins_name, :].drop(margins_name)
table = table.drop(margins_name, axis=1).drop(margins_name)
# to keep index and columns names
table_index_names = table.index.names
table_columns_names = table.columns.names
# Normalize core
table = _normalize(table, normalize=normalize, margins=False)
# Fix Margins
if normalize == 'columns':
column_margin = column_margin / column_margin.sum()
table = concat([table, column_margin], axis=1)
table = table.fillna(0)
elif normalize == 'index':
index_margin = index_margin / index_margin.sum()
table = table.append(index_margin)
table = table.fillna(0)
elif normalize == "all" or normalize is True:
column_margin = column_margin / column_margin.sum()
index_margin = index_margin / index_margin.sum()
index_margin.loc[margins_name] = 1
table = concat([table, column_margin], axis=1)
table = table.append(index_margin)
table = table.fillna(0)
else:
raise ValueError("Not a valid normalize argument")
table.index.names = table_index_names
table.columns.names = table_columns_names
else:
raise ValueError("Not a valid margins argument")
return table
def _get_names(arrs, names, prefix='row'):
if names is None:
names = []
for i, arr in enumerate(arrs):
if isinstance(arr, ABCSeries) and arr.name is not None:
names.append(arr.name)
else:
names.append('{prefix}_{i}'.format(prefix=prefix, i=i))
else:
if len(names) != len(arrs):
raise AssertionError('arrays and names must have the same length')
if not isinstance(names, list):
names = list(names)
return names
| bsd-3-clause |
servo/servo | tests/wpt/web-platform-tests/tools/third_party/py/py/_code/assertion.py | 60 | 3174 | import sys
import py
BuiltinAssertionError = py.builtin.builtins.AssertionError
_reprcompare = None # if set, will be called by assert reinterp for comparison ops
def _format_explanation(explanation):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
raw_lines = (explanation or '').split('\n')
# escape newlines not followed by {, } and ~
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
lines.append(l)
else:
lines[-1] += '\\n' + l
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = 'and '
else:
s = 'where '
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
assert line.startswith('}')
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line.startswith('~')
result.append(' '*len(stack) + line[1:])
assert len(stack) == 1
return '\n'.join(result)
class AssertionError(BuiltinAssertionError):
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
try:
self.msg = str(args[0])
except py.builtin._sysex:
raise
except:
self.msg = "<[broken __repr__] %s at %0xd>" %(
args[0].__class__, id(args[0]))
else:
f = py.code.Frame(sys._getframe(1))
try:
source = f.code.fullsource
if source is not None:
try:
source = source.getstatement(f.lineno, assertion=True)
except IndexError:
source = None
else:
source = str(source.deindent()).strip()
except py.error.ENOENT:
source = None
# this can also occur during reinterpretation, when the
# co_filename is set to "<run>".
if source:
self.msg = reinterpret(source, f, should_fail=True)
else:
self.msg = "<could not determine information>"
if not self.args:
self.args = (self.msg,)
if sys.version_info > (3, 0):
AssertionError.__module__ = "builtins"
reinterpret_old = "old reinterpretation not available for py3"
else:
from py._code._assertionold import interpret as reinterpret_old
from py._code._assertionnew import interpret as reinterpret
| mpl-2.0 |
michaeljohn32/odoomrp-wip | purchase_requisition_full_bid_order_generator/__openerp__.py | 25 | 1510 | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Purchase Requisition Full Bid Order Generator",
"version": "1.0",
"author": "AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"website": "www.avanzosc.es",
"category": "Purchase Management",
"contributors": ["Esther Martín <esthermartin@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <anajuaristi@avanzosc.es>",
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>"],
"depends": ["base", "purchase", "purchase_requisition"],
"data": [
"views/bids_ext_view.xml"],
"installable": True
}
| agpl-3.0 |
marcelocure/django | tests/m2o_recursive/models.py | 282 | 1047 | """
Relating an object to itself, many-to-one
To define a many-to-one relationship between a model and itself, use
``ForeignKey('self', ...)``.
In this example, a ``Category`` is related to itself. That is, each
``Category`` has a parent ``Category``.
Set ``related_name`` to designate what the reverse relationship is called.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
parent = models.ForeignKey('self', models.SET_NULL, blank=True, null=True, related_name='child_set')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Person(models.Model):
full_name = models.CharField(max_length=20)
mother = models.ForeignKey('self', models.SET_NULL, null=True, related_name='mothers_child_set')
father = models.ForeignKey('self', models.SET_NULL, null=True, related_name='fathers_child_set')
def __str__(self):
return self.full_name
| bsd-3-clause |
scristopher/paintown | scons/helpers.py | 4 | 1237 | def read_cmake_list(name):
"""
Read a cmake files list and return a dictionary with each cmake variable
matched to a list of filenames.
This makes it easy to add/remove files, as only the cmake list needs to be
modified and scons will automatically pick up the changes.
"""
lists = {}
current = []
reading = False
for line in open(name):
if line.startswith("set("):
current = []
name = line[4:].strip()
lists[name] = current
reading = True
else:
if reading:
# Stop reading files once we hit a line like 'file.cpp)'
if line.strip("\t\r\n").endswith(")"):
reading = False
path = line.strip("( )\t\r\n")
if path:
current.append(path)
return lists
def findFile(name):
return findDirectory(name)
def findDirectory(name):
import os.path
where = '.'
# just try 5 directories
for i in xrange(0, 6):
if os.path.exists("%s/%s" % (where, name)):
return "%s/%s" % (where, name)
where = os.path.join(where, '..')
raise Exception("Could not find the %s directory" % name)
| bsd-3-clause |
abdhaleegit/avocado-misc-tests | generic/cxl.py | 4 | 2138 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author: sudeesh john <sudeesh@linux.vnet.ibm.com>
import os
from avocado import Test
from avocado.utils import build, git, process
from avocado.utils.software_manager import SoftwareManager
class Cxl(Test):
"""
This tests the CAPI functionality in IBM Power machines.
This wrapper uses the testcases from
https://github.com/ibm-capi/cxl-tests.git
"""
def setUp(self):
"""
Preparing the machie for the cxl test
"""
self.script = self.params.get('script', default='memcpy_afu_ctx')
self.args = self.params.get('args', default='')
lspci_out = process.system_output("lspci")
if "accelerators" not in lspci_out.decode():
self.cancel("No capi card preset. Unable to initialte the test")
smngr = SoftwareManager()
for pkgs in ['gcc', 'make', 'automake', 'autoconf']:
if not smngr.check_installed(pkgs) and not smngr.install(pkgs):
self.cancel('%s is needed for the test to be run' % pkgs)
git.get_repo('https://github.com/ibm-capi/cxl-tests.git',
destination_dir=self.teststmpdir)
os.chdir(self.teststmpdir)
if not os.path.isfile('memcpy_afu_ctx'):
build.make(".")
def test(self):
"""
Runs the cxl tests.
"""
cmd = "./%s %s" % (self.script, self.args)
result = process.run(cmd, ignore_status=True)
if "Unable to open cxl device" in result.stderr:
self.fail("%s is failed" % cmd)
elif "failed" in result.stdout:
self.fail("%s is failed" % cmd)
| gpl-2.0 |
eLBati/odoo | addons/l10n_multilang/__openerp__.py | 91 | 1681 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multi Language Chart of Accounts',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Hidden/Dependency',
'description': """
* Multi language support for Chart of Accounts, Taxes, Tax Codes, Journals,
Accounting Templates, Analytic Chart of Accounts and Analytic Journals.
* Setup wizard changes
- Copy translations for COA, Tax, Tax Code and Fiscal Position from
templates to target objects.
""",
'website': 'http://www.openerp.com',
'depends' : ['account_accountant'],
'data': [],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
florentx/OpenUpgrade | addons/account/wizard/account_report_partner_balance.py | 364 | 2199 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_partner_balance(osv.osv_memory):
"""
This wizard will provide the partner balance report by periods, between any two dates.
"""
_inherit = 'account.common.partner.report'
_name = 'account.partner.balance'
_description = 'Print Account Partner Balance'
_columns = {
'display_partner': fields.selection([('non-zero_balance', 'With balance is not equal to 0'), ('all', 'All Partners')]
,'Display Partners'),
'journal_ids': fields.many2many('account.journal', 'account_partner_balance_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'display_partner': 'non-zero_balance',
}
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['display_partner'])[0])
return self.pool['report'].get_action(cr, uid, [], 'account.report_partnerbalance', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sigma-random/asuswrt-merlin | release/src/router/samba36/source4/dsdb/tests/python/dsdb_schema_info.py | 31 | 6654 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Unix SMB/CIFS implementation.
# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Usage:
# export DC_SERVER=target_dc_or_local_samdb_url
# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
# PYTHONPATH="$PYTHONPATH:$samba4srcdir/lib/ldb/tests/python" $SUBUNITRUN dsdb_schema_info -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
#
import sys
import time
import random
sys.path.insert(0, "bin/python")
import samba
samba.ensure_external_module("testtools", "testtools")
from ldb import SCOPE_BASE, LdbError
import samba.tests
import samba.dcerpc.drsuapi
from samba.dcerpc.drsblobs import schemaInfoBlob
from samba.ndr import ndr_unpack
from samba.dcerpc.misc import GUID
class SchemaInfoTestCase(samba.tests.TestCase):
# static SamDB connection
sam_db = None
def setUp(self):
super(SchemaInfoTestCase, self).setUp()
# connect SamDB if we haven't yet
if self.sam_db is None:
ldb_url = samba.tests.env_get_var_value("DC_SERVER")
SchemaInfoTestCase.sam_db = samba.tests.connect_samdb(ldb_url)
# fetch rootDSE
res = self.sam_db.search(base="", expression="", scope=SCOPE_BASE, attrs=["*"])
self.assertEquals(len(res), 1)
self.schema_dn = res[0]["schemaNamingContext"][0]
self.base_dn = res[0]["defaultNamingContext"][0]
self.forest_level = int(res[0]["forestFunctionality"][0])
# get DC invocation_id
self.invocation_id = GUID(self.sam_db.get_invocation_id())
def tearDown(self):
super(SchemaInfoTestCase, self).tearDown()
def _getSchemaInfo(self):
try:
schema_info_data = self.sam_db.searchone(attribute="schemaInfo",
basedn=self.schema_dn,
expression="(objectClass=*)",
scope=SCOPE_BASE)
self.assertEqual(len(schema_info_data), 21)
schema_info = ndr_unpack(schemaInfoBlob, schema_info_data)
self.assertEqual(schema_info.marker, 0xFF)
except KeyError:
# create default schemaInfo if
# attribute value is not created yet
schema_info = schemaInfoBlob()
schema_info.revision = 0
schema_info.invocation_id = self.invocation_id
return schema_info
def _checkSchemaInfo(self, schi_before, schi_after):
self.assertEqual(schi_before.revision + 1, schi_after.revision)
self.assertEqual(schi_before.invocation_id, schi_after.invocation_id)
self.assertEqual(schi_after.invocation_id, self.invocation_id)
def _ldap_schemaUpdateNow(self):
ldif = """
dn:
changetype: modify
add: schemaUpdateNow
schemaUpdateNow: 1
"""
self.sam_db.modify_ldif(ldif)
def _make_obj_names(self, prefix):
obj_name = prefix + time.strftime("%s", time.gmtime())
obj_ldap_name = obj_name.replace("-", "")
obj_dn = "CN=%s,%s" % (obj_name, self.schema_dn)
return (obj_name, obj_ldap_name, obj_dn)
def _make_attr_ldif(self, attr_name, attr_dn):
ldif = """
dn: """ + attr_dn + """
objectClass: top
objectClass: attributeSchema
adminDescription: """ + attr_name + """
adminDisplayName: """ + attr_name + """
cn: """ + attr_name + """
attributeId: 1.2.840.""" + str(random.randint(1,100000)) + """.1.5.9940
attributeSyntax: 2.5.5.12
omSyntax: 64
instanceType: 4
isSingleValued: TRUE
systemOnly: FALSE
"""
return ldif
def test_AddModifyAttribute(self):
# get initial schemaInfo
schi_before = self._getSchemaInfo()
# create names for an attribute to add
(attr_name, attr_ldap_name, attr_dn) = self._make_obj_names("schemaInfo-Attr-")
ldif = self._make_attr_ldif(attr_name, attr_dn)
# add the new attribute
self.sam_db.add_ldif(ldif)
self._ldap_schemaUpdateNow()
# compare resulting schemaInfo
schi_after = self._getSchemaInfo()
self._checkSchemaInfo(schi_before, schi_after)
# rename the Attribute
attr_dn_new = attr_dn.replace(attr_name, attr_name + "-NEW")
try:
self.sam_db.rename(attr_dn, attr_dn_new)
except LdbError, (num, _):
self.fail("failed to change lDAPDisplayName for %s: %s" % (attr_name, _))
# compare resulting schemaInfo
schi_after = self._getSchemaInfo()
self._checkSchemaInfo(schi_before, schi_after)
pass
def _make_class_ldif(self, class_name, class_dn):
ldif = """
dn: """ + class_dn + """
objectClass: top
objectClass: classSchema
adminDescription: """ + class_name + """
adminDisplayName: """ + class_name + """
cn: """ + class_name + """
governsId: 1.2.840.""" + str(random.randint(1,100000)) + """.1.5.9939
instanceType: 4
objectClassCategory: 1
subClassOf: organizationalPerson
rDNAttID: cn
systemMustContain: cn
systemOnly: FALSE
"""
return ldif
def test_AddModifyClass(self):
# get initial schemaInfo
schi_before = self._getSchemaInfo()
# create names for a Class to add
(class_name, class_ldap_name, class_dn) = self._make_obj_names("schemaInfo-Class-")
ldif = self._make_class_ldif(class_name, class_dn)
# add the new Class
self.sam_db.add_ldif(ldif)
self._ldap_schemaUpdateNow()
# compare resulting schemaInfo
schi_after = self._getSchemaInfo()
self._checkSchemaInfo(schi_before, schi_after)
# rename the Class
class_dn_new = class_dn.replace(class_name, class_name + "-NEW")
try:
self.sam_db.rename(class_dn, class_dn_new)
except LdbError, (num, _):
self.fail("failed to change lDAPDisplayName for %s: %s" % (class_name, _))
# compare resulting schemaInfo
schi_after = self._getSchemaInfo()
self._checkSchemaInfo(schi_before, schi_after)
| gpl-2.0 |
piotrek-golda/CivilHubIndependantCopy | locations/links.py | 3 | 1119 | # -*- coding: utf-8 -*-
#
# We map sidebar links for various views. I presume that sidebars will vary
# depending on the module but in each module we will have the same set of links
# for each sub-view (edition, deletion etc.).
#
LINKS_MAP = {
'summary': (
'invite',
),
'news': (
'new_discussion',
'new_poll',
'new_idea',
'add_news',
'invite',
'news_category',
),
'discussions': (
'new_discussion',
'new_poll',
'new_location',
'new_idea',
'upload',
'invite',
'discussion_category',
),
'ideas': (
'new_discussion',
'new_poll',
'new_idea',
'add_news',
'invite',
'idea_category',
),
'polls': (
'new_discussion',
'new_poll',
'new_idea',
'add_news',
'invite',
),
'gallery': (
'new_discussion',
'new_poll',
'new_idea',
'add_news',
'invite',
),
'followers': (
'invite',
),
'sublocations': (
'invite',
),
}
| gpl-3.0 |
Teino1978-Corp/google-belay | tests/py/testrunner.py | 4 | 1651 | #!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optparse
import sys
# Install the Python unittest2 package before you run this script.
import unittest2
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps.
SDK_PATH Path to the SDK installation
TEST_PATH Path to package containing test modules"""
def main(sdk_path, test_path):
sys.path.insert(0, sdk_path)
sys.path.append(test_path + '/station')
import dev_appserver
dev_appserver.fix_sys_path()
os.environ['SERVER_NAME'] = 'testrunner.example.com'
os.environ['SERVER_PORT'] = '80'
os.environ['APPENGINE_RUNTIME'] = 'python27'
suite = unittest2.loader.TestLoader().discover(test_path)
unittest2.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
options, args = parser.parse_args()
if len(args) != 2:
print 'Error: Exactly 2 arguments required.'
parser.print_help()
sys.exit(1)
SDK_PATH = args[0]
TEST_PATH = args[1]
main(SDK_PATH, TEST_PATH) | apache-2.0 |
nirmeshk/oh-mainline | vendor/packages/Django/tests/modeltests/validators/tests.py | 45 | 9225 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import types
from datetime import datetime, timedelta
from django.core.exceptions import ValidationError
from django.core.validators import *
from django.test.utils import str_prefix
from django.utils.unittest import TestCase
NOW = datetime.now()
TEST_DATA = (
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, None),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_email, 'email@here.com', None),
(validate_email, 'weirder-email@here.and.there.com', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'example@valid-----hyphens.com', None),
(validate_email, 'example@valid-with-hyphens.com', None),
(validate_email, 'test@domain.with.idn.tld.उदाहरण.परीक्षा', None),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, 'abc@.com', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, 'email@127.0.0.1', ValidationError),
(validate_email, 'example@invalid-.com', ValidationError),
(validate_email, 'example@-invalid.com', ValidationError),
(validate_email, 'example@inv-.alid-.com', ValidationError),
(validate_email, 'example@inv-.-alid.com', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, 'some@mail.com', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in it's own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '1,2,3,', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10*'x', None),
(MaxLengthValidator(10), 15*'x', ValidationError),
(MinLengthValidator(10), 15*'x', None),
(MinLengthValidator(10), 10*'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(), 'http://www.djangoproject.com/', None),
(URLValidator(), 'http://localhost/', None),
(URLValidator(), 'http://example.com/', None),
(URLValidator(), 'http://www.example.com/', None),
(URLValidator(), 'http://www.example.com:8000/test', None),
(URLValidator(), 'http://valid-with-hyphens.com/', None),
(URLValidator(), 'http://subdomain.example.com/', None),
(URLValidator(), 'http://200.8.9.10/', None),
(URLValidator(), 'http://200.8.9.10:8000/test', None),
(URLValidator(), 'http://valid-----hyphens.com/', None),
(URLValidator(), 'http://example.com?something=value', None),
(URLValidator(), 'http://example.com/index.php?something=value&another=value2', None),
(URLValidator(), 'foo', ValidationError),
(URLValidator(), 'http://', ValidationError),
(URLValidator(), 'http://example', ValidationError),
(URLValidator(), 'http://example.', ValidationError),
(URLValidator(), 'http://.com', ValidationError),
(URLValidator(), 'http://invalid-.com', ValidationError),
(URLValidator(), 'http://-invalid.com', ValidationError),
(URLValidator(), 'http://inv-.alid-.com', ValidationError),
(URLValidator(), 'http://inv-.-alid.com', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
)
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(TestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': 'First Problem'})
self.assertEqual(str(v), str_prefix("{%(_)s'first': %(_)s'First Problem'}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': %(_)s'First Problem'})"))
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
| agpl-3.0 |
kyasui/store | saleor/dashboard/order/forms.py | 2 | 4228 | from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from satchless.item import InsufficientStock
from ...cart.forms import QuantityField
from ...order.models import DeliveryGroup, OrderedItem, OrderNote
from ...product.models import Product
class OrderNoteForm(forms.ModelForm):
class Meta:
model = OrderNote
fields = ['content']
widgets = {'content': forms.Textarea({
'rows': 5, 'placeholder': _('Note')})}
def __init__(self, *args, **kwargs):
super(OrderNoteForm, self).__init__(*args, **kwargs)
self.fields['content'].label = ''
class ManagePaymentForm(forms.Form):
amount = forms.DecimalField(min_value=0, decimal_places=2, required=False)
def __init__(self, *args, **kwargs):
self.payment = kwargs.pop('payment')
super(ManagePaymentForm, self).__init__(*args, **kwargs)
def handle_action(self, action, user):
amount = self.cleaned_data['amount']
if action == 'capture' and self.payment.status == 'preauth':
self.payment.capture(amount)
elif action == 'refund' and self.payment.status == 'confirmed':
self.payment.refund(amount)
elif action == 'release' and self.payment.status == 'preauth':
self.payment.release()
else:
raise ValueError(_('Invalid payment action'))
class MoveItemsForm(forms.Form):
how_many = QuantityField()
groups = forms.ChoiceField()
def __init__(self, *args, **kwargs):
self.item = kwargs.pop('item')
super(MoveItemsForm, self).__init__(*args, **kwargs)
self.fields['how_many'].widget.attrs.update({
'max': self.item.quantity, 'min': 1})
self.fields['groups'].choices = self.get_delivery_group_choices()
def get_delivery_group_choices(self):
group = self.item.delivery_group
groups = group.order.groups.exclude(pk=group.pk).exclude(
status='cancelled')
choices = [('new', _('New'))]
choices.extend([(g.pk, str(g)) for g in groups])
return choices
def move_items(self):
how_many = self.cleaned_data['how_many']
choice = self.cleaned_data['groups']
old_group = self.item.delivery_group
if choice == 'new':
target_group = DeliveryGroup.objects.duplicate_group(old_group)
else:
target_group = DeliveryGroup.objects.get(pk=choice)
OrderedItem.objects.move_to_group(self.item, target_group, how_many)
return target_group
class ChangeQuantityForm(forms.ModelForm):
class Meta:
model = OrderedItem
fields = ['quantity']
def __init__(self, *args, **kwargs):
super(ChangeQuantityForm, self).__init__(*args, **kwargs)
self.fields['quantity'].widget.attrs.update({
'max': self.instance.quantity, 'min': 1})
self.fields['quantity'].initial = self.instance.quantity
def get_variant(self):
p = Product.objects.get_subclass(pk=self.instance.product.pk)
return p.variants.get(sku=self.instance.product_sku)
def clean_quantity(self):
quantity = self.cleaned_data['quantity']
variant = self.get_variant()
try:
variant.check_quantity(quantity)
except InsufficientStock as e:
raise forms.ValidationError(
_('Only %(remaining)d remaining in stock.') % {
'remaining': e.item.stock})
return quantity
def save(self):
quantity = self.cleaned_data['quantity']
self.instance.change_quantity(quantity)
class ShipGroupForm(forms.ModelForm):
class Meta:
model = DeliveryGroup
fields = []
def clean(self):
if self.instance.status != 'new':
raise forms.ValidationError(_('Cannot ship this group'),
code='invalid')
def save(self):
order = self.instance.order
self.instance.change_status('shipped')
statuses = [g.status for g in order.groups.all()]
if 'shipped' in statuses and 'new' not in statuses:
order.change_status('shipped')
| bsd-3-clause |
tgl-dogg/BCC-2s14-PI4-SteampunkSpider | src/steampunk_spider/database/top10paises.py | 1 | 1585 | import mysql.connector
from mysql.connector import errorcode
DB_NAME = 'steampunk'
# Get database connection
db_conn = mysql.connector.connect(user='root', password='vaporAranha', host='localhost', database='steampunk')
# Get cursor to perform operations on our database
cursor = db_conn.cursor()
request = ("SELECT nationality.name AS country, COUNT(player.id_player) AS players, "
"SUM(software.price) AS totalPrice, SUM(player.level) AS totalLv FROM "
"((nationality INNER JOIN player ON nationality.id_nationality = player.fk_nationality) "
"INNER JOIN rel_player_software ON player.id_player = rel_player_software.fk_player) "
"INNER JOIN software ON software.id_software = rel_player_software.fk_software "
"WHERE nationality.name NOT LIKE '00'"
"GROUP BY nationality.name "
"ORDER BY players DESC "
"LIMIT 10")
cursor.execute(request)
for country, players, totalLv, totalPrice in cursor:
print country, players, totalLv, totalPrice, "\n"
request2 = ("SELECT COUNT(bcc.id_player) AS players, SUM(bcc.level) AS totalLv, SUM(software.price) AS totalPrice "
"FROM (SELECT player.id_player, player.level, rel_player_software.fk_software FROM player "
"INNER JOIN rel_player_software ON player.id_player = rel_player_software.fk_player "
"WHERE player.bcc = 1 "
"GROUP BY player.id_player) AS bcc "
"INNER JOIN software ON software.id_software = bcc.fk_software")
cursor.execute(request2)
for players, totalLv, totalPrice in cursor:
print "BCC", players, totalLv, totalPrice
cursor.close()
db_conn.close()
print ("connection ended") | mit |
mhostetter/gnuradio | gr-wxgui/python/wxgui/waterfall_window.py | 47 | 10668 | #
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1`301, USA.
#
##################################################
# Imports
##################################################
import plotter
import common
import wx
import numpy
import math
import pubsub
from constants import *
from gnuradio import gr #for gr.prefs
import forms
##################################################
# Constants
##################################################
SLIDER_STEPS = 100
AVG_ALPHA_MIN_EXP, AVG_ALPHA_MAX_EXP = -3, 0
DEFAULT_FRAME_RATE = gr.prefs().get_long('wxgui', 'waterfall_rate', 30)
DEFAULT_COLOR_MODE = gr.prefs().get_string('wxgui', 'waterfall_color', 'rgb1')
DEFAULT_WIN_SIZE = (600, 300)
DIV_LEVELS = (1, 2, 5, 10, 20)
MIN_DYNAMIC_RANGE, MAX_DYNAMIC_RANGE = 10, 200
DYNAMIC_RANGE_STEP = 10.
COLOR_MODES = (
('RGB1', 'rgb1'),
('RGB2', 'rgb2'),
('RGB3', 'rgb3'),
('Gray', 'gray'),
)
##################################################
# Waterfall window control panel
##################################################
class control_panel(wx.Panel):
"""
A control panel with wx widgits to control the plotter and fft block chain.
"""
def __init__(self, parent):
"""
Create a new control panel.
Args:
parent: the wx parent window
"""
self.parent = parent
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)
parent[SHOW_CONTROL_PANEL_KEY] = True
parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)
control_box = wx.BoxSizer(wx.VERTICAL)
control_box.AddStretchSpacer()
options_box = forms.static_box_sizer(
parent=self, sizer=control_box, label='Options',
bold=True, orient=wx.VERTICAL,
)
#average
forms.check_box(
sizer=options_box, parent=self, label='Average',
ps=parent, key=AVERAGE_KEY,
)
avg_alpha_text = forms.static_text(
sizer=options_box, parent=self, label='Avg Alpha',
converter=forms.float_converter(lambda x: '%.4f'%x),
ps=parent, key=AVG_ALPHA_KEY, width=50,
)
avg_alpha_slider = forms.log_slider(
sizer=options_box, parent=self,
min_exp=AVG_ALPHA_MIN_EXP,
max_exp=AVG_ALPHA_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=AVG_ALPHA_KEY,
)
for widget in (avg_alpha_text, avg_alpha_slider):
parent.subscribe(AVERAGE_KEY, widget.Enable)
widget.Enable(parent[AVERAGE_KEY])
#begin axes box
control_box.AddStretchSpacer()
axes_box = forms.static_box_sizer(
parent=self, sizer=control_box, label='Axes Options',
bold=True, orient=wx.VERTICAL,
)
#num lines buttons
forms.incr_decr_buttons(
parent=self, sizer=axes_box, label='Time Scale',
on_incr=self._on_incr_time_scale, on_decr=self._on_decr_time_scale,
)
#dyanmic range buttons
forms.incr_decr_buttons(
parent=self, sizer=axes_box, label='Dyn Range',
on_incr=self._on_incr_dynamic_range, on_decr=self._on_decr_dynamic_range,
)
#ref lvl buttons
forms.incr_decr_buttons(
parent=self, sizer=axes_box, label='Ref Level',
on_incr=self._on_incr_ref_level, on_decr=self._on_decr_ref_level,
)
#color mode
forms.drop_down(
parent=self, sizer=axes_box, width=100,
ps=parent, key=COLOR_MODE_KEY, label='Color',
choices=map(lambda x: x[1], COLOR_MODES),
labels=map(lambda x: x[0], COLOR_MODES),
)
#autoscale
forms.single_button(
parent=self, sizer=axes_box, label='Autoscale',
callback=self.parent.autoscale,
)
#clear
control_box.AddStretchSpacer()
forms.single_button(
parent=self, sizer=control_box, label='Clear',
callback=self._on_clear_button,
)
#run/stop
forms.toggle_button(
sizer=control_box, parent=self,
true_label='Stop', false_label='Run',
ps=parent, key=RUNNING_KEY,
)
#set sizer
self.SetSizerAndFit(control_box)
##################################################
# Event handlers
##################################################
def _on_clear_button(self, event):
self.parent[NUM_LINES_KEY] = self.parent[NUM_LINES_KEY]
def _on_incr_dynamic_range(self, event):
self.parent[DYNAMIC_RANGE_KEY] = min(MAX_DYNAMIC_RANGE, common.get_clean_incr(self.parent[DYNAMIC_RANGE_KEY]))
def _on_decr_dynamic_range(self, event):
self.parent[DYNAMIC_RANGE_KEY] = max(MIN_DYNAMIC_RANGE, common.get_clean_decr(self.parent[DYNAMIC_RANGE_KEY]))
def _on_incr_ref_level(self, event):
self.parent[REF_LEVEL_KEY] = self.parent[REF_LEVEL_KEY] + self.parent[DYNAMIC_RANGE_KEY]/DYNAMIC_RANGE_STEP
def _on_decr_ref_level(self, event):
self.parent[REF_LEVEL_KEY] = self.parent[REF_LEVEL_KEY] - self.parent[DYNAMIC_RANGE_KEY]/DYNAMIC_RANGE_STEP
def _on_incr_time_scale(self, event):
old_rate = self.parent[FRAME_RATE_KEY]
self.parent[FRAME_RATE_KEY] *= 0.75
if self.parent[FRAME_RATE_KEY] < 1.0:
self.parent[FRAME_RATE_KEY] = 1.0
if self.parent[FRAME_RATE_KEY] == old_rate:
self.parent[DECIMATION_KEY] += 1
def _on_decr_time_scale(self, event):
old_rate = self.parent[FRAME_RATE_KEY]
self.parent[FRAME_RATE_KEY] *= 1.25
if self.parent[FRAME_RATE_KEY] == old_rate:
self.parent[DECIMATION_KEY] -= 1
##################################################
# Waterfall window with plotter and control panel
##################################################
class waterfall_window(wx.Panel, pubsub.pubsub):
def __init__(
self,
parent,
controller,
size,
title,
real,
fft_size,
num_lines,
decimation_key,
baseband_freq,
sample_rate_key,
frame_rate_key,
dynamic_range,
ref_level,
average_key,
avg_alpha_key,
msg_key,
):
pubsub.pubsub.__init__(self)
#setup
self.samples = list()
self.real = real
self.fft_size = fft_size
#proxy the keys
self.proxy(MSG_KEY, controller, msg_key)
self.proxy(DECIMATION_KEY, controller, decimation_key)
self.proxy(FRAME_RATE_KEY, controller, frame_rate_key)
self.proxy(AVERAGE_KEY, controller, average_key)
self.proxy(AVG_ALPHA_KEY, controller, avg_alpha_key)
self.proxy(SAMPLE_RATE_KEY, controller, sample_rate_key)
#init panel and plot
wx.Panel.__init__(self, parent, style=wx.SIMPLE_BORDER)
self.plotter = plotter.waterfall_plotter(self)
self.plotter.SetSize(wx.Size(*size))
self.plotter.SetSizeHints(*size)
self.plotter.set_title(title)
self.plotter.enable_point_label(True)
self.plotter.enable_grid_lines(False)
#plotter listeners
self.subscribe(COLOR_MODE_KEY, self.plotter.set_color_mode)
self.subscribe(NUM_LINES_KEY, self.plotter.set_num_lines)
#initialize values
self[DYNAMIC_RANGE_KEY] = dynamic_range
self[NUM_LINES_KEY] = num_lines
self[Y_DIVS_KEY] = 8
self[X_DIVS_KEY] = 8 #approximate
self[REF_LEVEL_KEY] = ref_level
self[BASEBAND_FREQ_KEY] = baseband_freq
self[COLOR_MODE_KEY] = COLOR_MODES[0][1]
self[COLOR_MODE_KEY] = DEFAULT_COLOR_MODE
self[RUNNING_KEY] = True
#setup the box with plot and controls
self.control_panel = control_panel(self)
main_box = wx.BoxSizer(wx.HORIZONTAL)
main_box.Add(self.plotter, 1, wx.EXPAND)
main_box.Add(self.control_panel, 0, wx.EXPAND)
self.SetSizerAndFit(main_box)
#register events
self.subscribe(MSG_KEY, self.handle_msg)
for key in (
DECIMATION_KEY, SAMPLE_RATE_KEY, FRAME_RATE_KEY,
BASEBAND_FREQ_KEY, X_DIVS_KEY, Y_DIVS_KEY, NUM_LINES_KEY,
): self.subscribe(key, self.update_grid)
#initial update
self.update_grid()
def set_callback(self,callb):
self.plotter.set_callback(callb)
def autoscale(self, *args):
"""
Autoscale the waterfall plot to the last frame.
Set the dynamic range and reference level.
Does not affect the current data in the waterfall.
"""
if not len(self.samples): return
min_level, max_level = common.get_min_max_fft(self.samples)
#set the range and level
self[DYNAMIC_RANGE_KEY] = common.get_clean_num(max_level - min_level)
self[REF_LEVEL_KEY] = DYNAMIC_RANGE_STEP*round(.5+max_level/DYNAMIC_RANGE_STEP)
def handle_msg(self, msg):
"""
Handle the message from the fft sink message queue.
If complex, reorder the fft samples so the negative bins come first.
If real, keep take only the positive bins.
Send the data to the plotter.
Args:
msg: the fft array as a character array
"""
if not self[RUNNING_KEY]: return
#convert to floating point numbers
self.samples = samples = numpy.fromstring(msg, numpy.float32)[:self.fft_size] #only take first frame
num_samps = len(samples)
#reorder fft
if self.real: samples = samples[:(num_samps+1)/2]
else: samples = numpy.concatenate((samples[num_samps/2+1:], samples[:(num_samps+1)/2]))
#plot the fft
self.plotter.set_samples(
samples=samples,
minimum=self[REF_LEVEL_KEY] - self[DYNAMIC_RANGE_KEY],
maximum=self[REF_LEVEL_KEY],
)
#update the plotter
self.plotter.update()
def update_grid(self, *args):
"""
Update the plotter grid.
This update method is dependent on the variables below.
Determine the x and y axis grid parameters.
The x axis depends on sample rate, baseband freq, and x divs.
The y axis depends on y per div, y divs, and ref level.
"""
#grid parameters
sample_rate = self[SAMPLE_RATE_KEY]
frame_rate = self[FRAME_RATE_KEY]
if frame_rate < 1.0 :
frame_rate = 1.0
baseband_freq = self[BASEBAND_FREQ_KEY]
num_lines = self[NUM_LINES_KEY]
y_divs = self[Y_DIVS_KEY]
x_divs = self[X_DIVS_KEY]
#determine best fitting x_per_div
if self.real: x_width = sample_rate/2.0
else: x_width = sample_rate/1.0
x_per_div = common.get_clean_num(x_width/x_divs)
#update the x grid
if self.real:
self.plotter.set_x_grid(
baseband_freq,
baseband_freq + sample_rate/2.0,
x_per_div, True,
)
else:
self.plotter.set_x_grid(
baseband_freq - sample_rate/2.0,
baseband_freq + sample_rate/2.0,
x_per_div, True,
)
#update x units
self.plotter.set_x_label('Frequency', 'Hz')
#update y grid
duration = float(num_lines)/frame_rate
y_per_div = common.get_clean_num(duration/y_divs)
self.plotter.set_y_grid(0, duration, y_per_div, True)
#update y units
self.plotter.set_y_label('Time', 's')
#update plotter
self.plotter.update()
| gpl-3.0 |
studio666/gnuradio | gr-filter/python/filter/qa_filterbank.py | 47 | 4546 | #!/usr/bin/env python
#
# Copyright 2012,2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
import random
import math
from gnuradio import gr, gr_unittest, filter, blocks
def convolution(A, B):
"""
Returns a convolution of the A and B vectors of length
len(A)-len(B).
"""
rs = []
for i in range(len(B)-1, len(A)):
r = 0
for j, b in enumerate(B):
r += A[i-j] * b
rs.append(r)
return rs
class test_filterbank_vcvcf(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000(self):
"""
Generates nfilts sets of random complex data.
Generates two sets of random taps for each filter.
Applies one set of the random taps, gets some output,
applies the second set of random taps, gets some more output,
The output is then compared with a python-implemented
convolution.
"""
myrand = random.Random(123).random
nfilts = 10
ntaps = 5
# Sets some of the taps to be all zeros.
zero_filts1 = (3, 7)
zero_filts2 = (1, 6, 9)
ndatapoints = 100
# Generate some random sets of data
data_sets = []
for i in range(0, nfilts):
data_sets.append([(myrand()-0.5) + (myrand()-0.5)*(0+1j)
for k in range(0, ndatapoints)])
# Join them together to pass to vector_source block
data = []
for dp in zip(*data_sets):
data += dp
# Generate some random taps.
taps1 = []
taps2 = []
for i in range(0, nfilts):
if i in zero_filts1:
taps1.append([0]*ntaps)
else:
taps1.append([myrand()-0.5 for k in range(0, ntaps)])
if i in zero_filts2:
taps2.append([0]*ntaps)
else:
taps2.append([myrand()-0.5 for k in range(0, ntaps)])
# Calculate results with a python-implemented convolution.
results = []
results2 = []
for ds, ts, ts2 in zip(data_sets, taps1, taps2):
results.append(convolution(ds[-len(ts):]+ds[:-1], ts))
results2.append(convolution(ds[-len(ts):]+ds[:-1], ts2))
# Convert results from 2D arrays to 1D arrays for ease of comparison.
comb_results = []
for rs in zip(*results):
comb_results += rs
comb_results2 = []
for rs in zip(*results2):
comb_results2 += rs
# Construct the signal-processing chain.
src = blocks.vector_source_c(data, True, nfilts)
fb = filter.filterbank_vcvcf(taps1)
v2s = blocks.vector_to_stream(gr.sizeof_gr_complex, nfilts)
s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, nfilts*ndatapoints)
snk = blocks.probe_signal_vc(nfilts*ndatapoints)
self.tb.connect(src, fb, v2s, s2v, snk)
# Run the signal-processing chain.
self.tb.start()
all_zero = True
outdata = None
waittime = 0.001
# Wait until we have some data.
while (not outdata) or outdata[0]==0:
time.sleep(waittime)
outdata = snk.level()
# Apply the second set of taps.
fb.set_taps(taps2)
outdata2 = None
# Wait until we have new data.
while (not outdata2) or outdata[0] == outdata2[0]:
time.sleep(waittime)
outdata2 = snk.level()
self.tb.stop()
# Compare the datasets.
self.assertComplexTuplesAlmostEqual(comb_results, outdata, 6)
self.assertComplexTuplesAlmostEqual(comb_results2, outdata2, 6)
if __name__ == '__main__':
gr_unittest.run(test_filterbank_vcvcf, "test_filterbank_vcvcf.xml")
| gpl-3.0 |
CiscoSystems/vespa | neutron/plugins/nicira/vshield/edge_loadbalancer_driver.py | 5 | 13306 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Leon Cui, VMware
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.nicira.dbexts import vcns_db
from neutron.plugins.nicira.vshield.common import (
constants as vcns_const)
from neutron.plugins.nicira.vshield.common import (
exceptions as vcns_exc)
from neutron.services.loadbalancer import constants as lb_constants
LOG = logging.getLogger(__name__)
BALANCE_MAP = {
lb_constants.LB_METHOD_ROUND_ROBIN: 'round-robin',
lb_constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
lb_constants.LB_METHOD_SOURCE_IP: 'source'
}
PROTOCOL_MAP = {
lb_constants.PROTOCOL_TCP: 'tcp',
lb_constants.PROTOCOL_HTTP: 'http',
lb_constants.PROTOCOL_HTTPS: 'tcp'
}
class EdgeLbDriver():
"""Implementation of driver APIs for
Edge Loadbalancer feature configuration
"""
def _convert_lb_vip(self, context, edge_id, vip, app_profileid):
pool_id = vip.get('pool_id')
poolid_map = vcns_db.get_vcns_edge_pool_binding(
context.session, pool_id, edge_id)
pool_vseid = poolid_map['pool_vseid']
return {
'name': vip.get('name'),
'ipAddress': vip.get('address'),
'protocol': vip.get('protocol'),
'port': vip.get('protocol_port'),
'defaultPoolId': pool_vseid,
'applicationProfileId': app_profileid
}
def _restore_lb_vip(self, context, edge_id, vip_vse):
pool_binding = vcns_db.get_vcns_edge_pool_binding_by_vseid(
context.session,
edge_id,
vip_vse['defaultPoolId'])
return {
'name': vip_vse['name'],
'address': vip_vse['ipAddress'],
'protocol': vip_vse['protocol'],
'protocol_port': vip_vse['port'],
'pool_id': pool_binding['pool_id']
}
def _convert_lb_pool(self, context, edge_id, pool, members):
vsepool = {
'name': pool.get('name'),
'algorithm': BALANCE_MAP.get(
pool.get('lb_method'),
'round-robin'),
'member': [],
'monitorId': []
}
for member in members:
vsepool['member'].append({
'ipAddress': member['address'],
'port': member['protocol_port']
})
##TODO(linb) right now, vse only accept at most one monitor per pool
monitors = pool.get('health_monitors')
if not monitors:
return vsepool
monitorid_map = vcns_db.get_vcns_edge_monitor_binding(
context.session,
monitors[0],
edge_id)
vsepool['monitorId'].append(monitorid_map['monitor_vseid'])
return vsepool
def _restore_lb_pool(self, context, edge_id, pool_vse):
#TODO(linb): Get more usefule info
return {
'name': pool_vse['name'],
}
def _convert_lb_monitor(self, context, monitor):
return {
'type': PROTOCOL_MAP.get(
monitor.get('type'), 'http'),
'interval': monitor.get('delay'),
'timeout': monitor.get('timeout'),
'maxRetries': monitor.get('max_retries'),
'name': monitor.get('id')
}
def _restore_lb_monitor(self, context, edge_id, monitor_vse):
return {
'delay': monitor_vse['interval'],
'timeout': monitor_vse['timeout'],
'max_retries': monitor_vse['maxRetries'],
'id': monitor_vse['name']
}
def _convert_app_profile(self, name, app_profile):
#TODO(linb): convert the session_persistence to
#corresponding app_profile
return {
"insertXForwardedFor": False,
"name": name,
"persistence": {
"method": "sourceip"
},
"serverSslEnabled": False,
"sslPassthrough": False,
"template": "HTTP"
}
def create_vip(self, context, edge_id, vip):
app_profile = self._convert_app_profile(
vip['name'], vip.get('session_persistence'))
try:
header, response = self.vcns.create_app_profile(
edge_id, app_profile)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create app profile on edge: %s"),
edge_id)
objuri = header['location']
app_profileid = objuri[objuri.rfind("/") + 1:]
vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid)
try:
header, response = self.vcns.create_vip(
edge_id, vip_new)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create vip on vshield edge: %s"),
edge_id)
objuri = header['location']
vip_vseid = objuri[objuri.rfind("/") + 1:]
# Add the vip mapping
map_info = {
"vip_id": vip['id'],
"vip_vseid": vip_vseid,
"edge_id": edge_id,
"app_profileid": app_profileid
}
vcns_db.add_vcns_edge_vip_binding(context.session, map_info)
def get_vip(self, context, id):
vip_binding = vcns_db.get_vcns_edge_vip_binding(context.session, id)
edge_id = vip_binding[vcns_const.EDGE_ID]
vip_vseid = vip_binding['vip_vseid']
try:
response = self.vcns.get_vip(edge_id, vip_vseid)[1]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get vip on edge"))
return self._restore_lb_vip(context, edge_id, response)
def update_vip(self, context, vip):
vip_binding = vcns_db.get_vcns_edge_vip_binding(
context.session, vip['id'])
edge_id = vip_binding[vcns_const.EDGE_ID]
vip_vseid = vip_binding.get('vip_vseid')
app_profileid = vip_binding.get('app_profileid')
vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid)
try:
self.vcns.update_vip(edge_id, vip_vseid, vip_new)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update vip on edge: %s"), edge_id)
def delete_vip(self, context, id):
vip_binding = vcns_db.get_vcns_edge_vip_binding(
context.session, id)
edge_id = vip_binding[vcns_const.EDGE_ID]
vip_vseid = vip_binding['vip_vseid']
app_profileid = vip_binding['app_profileid']
try:
self.vcns.delete_vip(edge_id, vip_vseid)
self.vcns.delete_app_profile(edge_id, app_profileid)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete vip on edge: %s"), edge_id)
vcns_db.delete_vcns_edge_vip_binding(context.session, id)
def create_pool(self, context, edge_id, pool, members):
pool_new = self._convert_lb_pool(context, edge_id, pool, members)
try:
header = self.vcns.create_pool(edge_id, pool_new)[0]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create pool"))
objuri = header['location']
pool_vseid = objuri[objuri.rfind("/") + 1:]
# update the pool mapping table
map_info = {
"pool_id": pool['id'],
"pool_vseid": pool_vseid,
"edge_id": edge_id
}
vcns_db.add_vcns_edge_pool_binding(context.session, map_info)
def get_pool(self, context, id, edge_id):
pool_binding = vcns_db.get_vcns_edge_pool_binding(
context.session, id, edge_id)
if not pool_binding:
msg = (_("pool_binding not found with id: %(id)s "
"edge_id: %(edge_id)s") % {'id': id, 'edge_id': edge_id})
LOG.error(msg)
raise vcns_exc.VcnsNotFound(
resource='router_service_binding', msg=msg)
pool_vseid = pool_binding['pool_vseid']
try:
response = self.vcns.get_pool(edge_id, pool_vseid)[1]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get pool on edge"))
return self._restore_lb_pool(context, edge_id, response)
def update_pool(self, context, edge_id, pool, members):
pool_binding = vcns_db.get_vcns_edge_pool_binding(
context.session, pool['id'], edge_id)
pool_vseid = pool_binding['pool_vseid']
pool_new = self._convert_lb_pool(context, edge_id, pool, members)
try:
self.vcns.update_pool(edge_id, pool_vseid, pool_new)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool"))
def delete_pool(self, context, id, edge_id):
pool_binding = vcns_db.get_vcns_edge_pool_binding(
context.session, id, edge_id)
pool_vseid = pool_binding['pool_vseid']
try:
self.vcns.delete_pool(edge_id, pool_vseid)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete pool"))
vcns_db.delete_vcns_edge_pool_binding(
context.session, id, edge_id)
def create_health_monitor(self, context, edge_id, health_monitor):
monitor_new = self._convert_lb_monitor(context, health_monitor)
try:
header = self.vcns.create_health_monitor(edge_id, monitor_new)[0]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create monitor on edge: %s"),
edge_id)
objuri = header['location']
monitor_vseid = objuri[objuri.rfind("/") + 1:]
# update the health_monitor mapping table
map_info = {
"monitor_id": health_monitor['id'],
"monitor_vseid": monitor_vseid,
"edge_id": edge_id
}
vcns_db.add_vcns_edge_monitor_binding(context.session, map_info)
def get_health_monitor(self, context, id, edge_id):
monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
context.session, id, edge_id)
if not monitor_binding:
msg = (_("monitor_binding not found with id: %(id)s "
"edge_id: %(edge_id)s") % {'id': id, 'edge_id': edge_id})
LOG.error(msg)
raise vcns_exc.VcnsNotFound(
resource='router_service_binding', msg=msg)
monitor_vseid = monitor_binding['monitor_vseid']
try:
response = self.vcns.get_health_monitor(edge_id, monitor_vseid)[1]
except vcns_exc.VcnsApiException as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get monitor on edge: %s"),
e.response)
return self._restore_lb_monitor(context, edge_id, response)
def update_health_monitor(self, context, edge_id,
old_health_monitor, health_monitor):
monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
context.session,
old_health_monitor['id'], edge_id)
monitor_vseid = monitor_binding['monitor_vseid']
monitor_new = self._convert_lb_monitor(
context, health_monitor)
try:
self.vcns.update_health_monitor(
edge_id, monitor_vseid, monitor_new)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update monitor on edge: %s"),
edge_id)
def delete_health_monitor(self, context, id, edge_id):
monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
context.session, id, edge_id)
monitor_vseid = monitor_binding['monitor_vseid']
try:
self.vcns.delete_health_monitor(edge_id, monitor_vseid)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete monitor"))
vcns_db.delete_vcns_edge_monitor_binding(
context.session, id, edge_id)
| apache-2.0 |
jeremiahmarks/sl4a | python/src/Lib/plat-irix6/FILE.py | 66 | 11296 | # Generated by h2py from /usr/include/sys/file.h
from warnings import warnpy3k
warnpy3k("the FILE module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# Included from standards.h
# Included from sys/types.h
# Included from sgidefs.h
_MIPS_ISA_MIPS1 = 1
_MIPS_ISA_MIPS2 = 2
_MIPS_ISA_MIPS3 = 3
_MIPS_ISA_MIPS4 = 4
_MIPS_SIM_ABI32 = 1
_MIPS_SIM_NABI32 = 2
_MIPS_SIM_ABI64 = 3
# Included from sys/pthread.h
P_MYID = (-1)
P_MYHOSTID = (-1)
# Included from sys/bsd_types.h
# Included from sys/mkdev.h
ONBITSMAJOR = 7
ONBITSMINOR = 8
OMAXMAJ = 0x7f
OMAXMIN = 0xff
NBITSMAJOR = 14
NBITSMINOR = 18
MAXMAJ = 0x1ff
MAXMIN = 0x3ffff
OLDDEV = 0
NEWDEV = 1
MKDEV_VER = NEWDEV
def IS_STRING_SPEC_DEV(x): return ((dev_t)(x)==__makedev(MKDEV_VER, 0, 0))
def major(dev): return __major(MKDEV_VER, dev)
def minor(dev): return __minor(MKDEV_VER, dev)
# Included from sys/select.h
FD_SETSIZE = 1024
__NBBY = 8
# Included from string.h
NULL = 0L
NBBY = 8
# Included from sys/cpumask.h
MAXCPU = 128
def CPUMASK_INDEX(bit): return ((bit) >> 6)
def CPUMASK_SHFT(bit): return ((bit) & 0x3f)
def CPUMASK_IS_ZERO(p): return ((p) == 0)
def CPUMASK_IS_NONZERO(p): return ((p) != 0)
# Included from sys/nodemask.h
def CNODEMASK_IS_ZERO(p): return ((p) == 0)
def CNODEMASK_IS_NONZERO(p): return ((p) != 0)
# Included from sys/sema.h
# Included from sys/timespec.h
# Included from sys/param.h
# Included from sys/signal.h
SIGHUP = 1
SIGINT = 2
SIGQUIT = 3
SIGILL = 4
SIGTRAP = 5
SIGIOT = 6
SIGABRT = 6
SIGEMT = 7
SIGFPE = 8
SIGKILL = 9
SIGBUS = 10
SIGSEGV = 11
SIGSYS = 12
SIGPIPE = 13
SIGALRM = 14
SIGTERM = 15
SIGUSR1 = 16
SIGUSR2 = 17
SIGCLD = 18
SIGCHLD = 18
SIGPWR = 19
SIGWINCH = 20
SIGURG = 21
SIGPOLL = 22
SIGIO = 22
SIGSTOP = 23
SIGTSTP = 24
SIGCONT = 25
SIGTTIN = 26
SIGTTOU = 27
SIGVTALRM = 28
SIGPROF = 29
SIGXCPU = 30
SIGXFSZ = 31
SIGK32 = 32
SIGCKPT = 33
SIGRESTART = 34
SIGUME = 35
SIGPTINTR = 47
SIGPTRESCHED = 48
SIGRTMIN = 49
SIGRTMAX = 64
__sigargs = int
# Included from sys/sigevent.h
SIGEV_NONE = 128
SIGEV_SIGNAL = 129
SIGEV_CALLBACK = 130
SIGEV_THREAD = 131
# Included from sys/siginfo.h
SI_MAXSZ = 128
SI_USER = 0
SI_KILL = SI_USER
SI_QUEUE = -1
SI_ASYNCIO = -2
SI_TIMER = -3
SI_MESGQ = -4
ILL_ILLOPC = 1
ILL_ILLOPN = 2
ILL_ILLADR = 3
ILL_ILLTRP = 4
ILL_PRVOPC = 5
ILL_PRVREG = 6
ILL_COPROC = 7
ILL_BADSTK = 8
NSIGILL = 8
FPE_INTDIV = 1
FPE_INTOVF = 2
FPE_FLTDIV = 3
FPE_FLTOVF = 4
FPE_FLTUND = 5
FPE_FLTRES = 6
FPE_FLTINV = 7
FPE_FLTSUB = 8
NSIGFPE = 8
SEGV_MAPERR = 1
SEGV_ACCERR = 2
NSIGSEGV = 2
BUS_ADRALN = 1
BUS_ADRERR = 2
BUS_OBJERR = 3
NSIGBUS = 3
TRAP_BRKPT = 1
TRAP_TRACE = 2
NSIGTRAP = 2
CLD_EXITED = 1
CLD_KILLED = 2
CLD_DUMPED = 3
CLD_TRAPPED = 4
CLD_STOPPED = 5
CLD_CONTINUED = 6
NSIGCLD = 6
POLL_IN = 1
POLL_OUT = 2
POLL_MSG = 3
POLL_ERR = 4
POLL_PRI = 5
POLL_HUP = 6
NSIGPOLL = 6
UME_ECCERR = 1
NSIGUME = 1
SIG_NOP = 0
SIG_BLOCK = 1
SIG_UNBLOCK = 2
SIG_SETMASK = 3
SIG_SETMASK32 = 256
SA_ONSTACK = 0x00000001
SA_RESETHAND = 0x00000002
SA_RESTART = 0x00000004
SA_SIGINFO = 0x00000008
SA_NODEFER = 0x00000010
SA_NOCLDWAIT = 0x00010000
SA_NOCLDSTOP = 0x00020000
_SA_BSDCALL = 0x10000000
MINSIGSTKSZ = 512
SIGSTKSZ = 8192
SS_ONSTACK = 0x00000001
SS_DISABLE = 0x00000002
# Included from sys/ucontext.h
NGREG = 36
NGREG = 37
GETCONTEXT = 0
SETCONTEXT = 1
UC_SIGMASK = 001
UC_STACK = 002
UC_CPU = 004
UC_MAU = 010
UC_MCONTEXT = (UC_CPU|UC_MAU)
UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
CTX_R0 = 0
CTX_AT = 1
CTX_V0 = 2
CTX_V1 = 3
CTX_A0 = 4
CTX_A1 = 5
CTX_A2 = 6
CTX_A3 = 7
CTX_T0 = 8
CTX_T1 = 9
CTX_T2 = 10
CTX_T3 = 11
CTX_T4 = 12
CTX_T5 = 13
CTX_T6 = 14
CTX_T7 = 15
CTX_A4 = 8
CTX_A5 = 9
CTX_A6 = 10
CTX_A7 = 11
CTX_T0 = 12
CTX_T1 = 13
CTX_T2 = 14
CTX_T3 = 15
CTX_S0 = 16
CTX_S1 = 17
CTX_S2 = 18
CTX_S3 = 19
CTX_S4 = 20
CTX_S5 = 21
CTX_S6 = 22
CTX_S7 = 23
CTX_T8 = 24
CTX_T9 = 25
CTX_K0 = 26
CTX_K1 = 27
CTX_GP = 28
CTX_SP = 29
CTX_S8 = 30
CTX_RA = 31
CTX_MDLO = 32
CTX_MDHI = 33
CTX_CAUSE = 34
CTX_EPC = 35
CTX_SR = 36
CXT_R0 = CTX_R0
CXT_AT = CTX_AT
CXT_V0 = CTX_V0
CXT_V1 = CTX_V1
CXT_A0 = CTX_A0
CXT_A1 = CTX_A1
CXT_A2 = CTX_A2
CXT_A3 = CTX_A3
CXT_T0 = CTX_T0
CXT_T1 = CTX_T1
CXT_T2 = CTX_T2
CXT_T3 = CTX_T3
CXT_T4 = CTX_T4
CXT_T5 = CTX_T5
CXT_T6 = CTX_T6
CXT_T7 = CTX_T7
CXT_S0 = CTX_S0
CXT_S1 = CTX_S1
CXT_S2 = CTX_S2
CXT_S3 = CTX_S3
CXT_S4 = CTX_S4
CXT_S5 = CTX_S5
CXT_S6 = CTX_S6
CXT_S7 = CTX_S7
CXT_T8 = CTX_T8
CXT_T9 = CTX_T9
CXT_K0 = CTX_K0
CXT_K1 = CTX_K1
CXT_GP = CTX_GP
CXT_SP = CTX_SP
CXT_S8 = CTX_S8
CXT_RA = CTX_RA
CXT_MDLO = CTX_MDLO
CXT_MDHI = CTX_MDHI
CXT_CAUSE = CTX_CAUSE
CXT_EPC = CTX_EPC
CXT_SR = CTX_SR
CTX_FV0 = 0
CTX_FV1 = 2
CTX_FA0 = 12
CTX_FA1 = 13
CTX_FA2 = 14
CTX_FA3 = 15
CTX_FA4 = 16
CTX_FA5 = 17
CTX_FA6 = 18
CTX_FA7 = 19
CTX_FT0 = 4
CTX_FT1 = 5
CTX_FT2 = 6
CTX_FT3 = 7
CTX_FT4 = 8
CTX_FT5 = 9
CTX_FT6 = 10
CTX_FT7 = 11
CTX_FT8 = 20
CTX_FT9 = 21
CTX_FT10 = 22
CTX_FT11 = 23
CTX_FT12 = 1
CTX_FT13 = 3
CTX_FS0 = 24
CTX_FS1 = 25
CTX_FS2 = 26
CTX_FS3 = 27
CTX_FS4 = 28
CTX_FS5 = 29
CTX_FS6 = 30
CTX_FS7 = 31
CTX_FT8 = 21
CTX_FT9 = 23
CTX_FT10 = 25
CTX_FT11 = 27
CTX_FT12 = 29
CTX_FT13 = 31
CTX_FT14 = 1
CTX_FT15 = 3
CTX_FS0 = 20
CTX_FS1 = 22
CTX_FS2 = 24
CTX_FS3 = 26
CTX_FS4 = 28
CTX_FS5 = 30
SV_ONSTACK = 0x0001
SV_INTERRUPT = 0x0002
NUMBSDSIGS = (32)
def sigmask(sig): return (1L << ((sig)-1))
def sigmask(sig): return (1L << ((sig)-1))
SIG_ERR = (-1)
SIG_IGN = (1)
SIG_HOLD = (2)
SIG_DFL = (0)
NSIG = 65
MAXSIG = (NSIG-1)
NUMSIGS = (NSIG-1)
BRK_USERBP = 0
BRK_KERNELBP = 1
BRK_ABORT = 2
BRK_BD_TAKEN = 3
BRK_BD_NOTTAKEN = 4
BRK_SSTEPBP = 5
BRK_OVERFLOW = 6
BRK_DIVZERO = 7
BRK_RANGE = 8
BRK_PSEUDO_OP_BIT = 0x80
BRK_PSEUDO_OP_MAX = 0x3
BRK_CACHE_SYNC = 0x80
BRK_MULOVF = 1023
_POSIX_VERSION = 199506L
_POSIX_VERSION = 199506
_POSIX_VDISABLE = 0
MAX_INPUT = 512
MAX_CANON = 256
UID_NOBODY = 60001
GID_NOBODY = UID_NOBODY
UID_NOACCESS = 60002
MAXPID = 0x7ffffff0
MAXUID = 0x7fffffff
MAXLINK = 30000
SSIZE = 1
SINCR = 1
KSTKSIZE = 1
EXTKSTKSIZE = 1
KSTKIDX = 0
KSTEIDX = 1
EXTKSTKSIZE = 0
KSTKIDX = 0
CANBSIZ = 256
HZ = 100
TICK = 10000000
NOFILE = 20
NGROUPS_UMIN = 0
NGROUPS_UMAX = 32
NGROUPS = 16
PMASK = 0177
PCATCH = 0400
PLTWAIT = 01000
PRECALC = 01000
PSWP = 0
PINOD = 10
PSNDD = PINOD
PRIBIO = 20
PZERO = 25
PMEM = 0
NZERO = 20
PPIPE = 26
PVFS = 27
PWAIT = 30
PSLEP = 39
PUSER = 60
PBATCH_CRITICAL = -1
PTIME_SHARE = -2
PTIME_SHARE_OVER = -3
PBATCH = -4
PWEIGHTLESS = -5
IO_NBPC = 4096
IO_BPCSHIFT = 12
MIN_NBPC = 4096
MIN_BPCSHIFT = 12
MIN_CPSSHIFT = 10
BPCSHIFT = 12
CPSSHIFT = 10
BPCSHIFT = 14
CPSSHIFT = 12
CPSSHIFT = 11
BPSSHIFT = (BPCSHIFT+CPSSHIFT)
NULL = 0L
CMASK = 022
NODEV = (-1)
NOPAGE = (-1)
NBPSCTR = 512
SCTRSHFT = 9
def BASEPRI(psw): return (((psw) & SR_IMASK) == SR_IMASK0)
def BASEPRI(psw): return (((psw) & SR_IMASK) == SR_IMASK)
def USERMODE(psw): return (((psw) & SR_KSU_MSK) == SR_KSU_USR)
MAXPATHLEN = 1024
MAXSYMLINKS = 30
MAXNAMELEN = 256
PIPE_BUF = 10240
PIPE_MAX = 10240
NBBY = 8
BBSHIFT = 9
BBSIZE = (1<<BBSHIFT)
BBMASK = (BBSIZE-1)
def BBTOB(bbs): return ((bbs) << BBSHIFT)
def OFFTOBB(bytes): return (((__uint64_t)(bytes) + BBSIZE - 1) >> BBSHIFT)
def OFFTOBBT(bytes): return ((off_t)(bytes) >> BBSHIFT)
def BBTOOFF(bbs): return ((off_t)(bbs) << BBSHIFT)
SEEKLIMIT32 = 0x7fffffff
MAXBSIZE = 8192
DEV_BSIZE = BBSIZE
DEV_BSHIFT = BBSHIFT
def btodb(bytes): return \
def dbtob(db): return \
BLKDEV_IOSHIFT = BPCSHIFT
BLKDEV_IOSIZE = (1<<BLKDEV_IOSHIFT)
def BLKDEV_OFF(off): return ((off) & (BLKDEV_IOSIZE - 1))
def BLKDEV_LBN(off): return ((off) >> BLKDEV_IOSHIFT)
def BLKDEV_LTOP(bn): return ((bn) * BLKDEV_BB)
MAXHOSTNAMELEN = 256
def DELAY(n): return us_delay(n)
def DELAYBUS(n): return us_delaybus(n)
TIMEPOKE_NOW = -100L
MUTEX_DEFAULT = 0x0
METER_NAMSZ = 16
METER_NO_SEQ = -1
def mutex_spinlock(l): return splhi()
def mutex_spintrylock(l): return splhi()
def spinlock_initialized(l): return 1
SV_FIFO = 0x0
SV_LIFO = 0x2
SV_PRIO = 0x4
SV_KEYED = 0x6
SV_DEFAULT = SV_FIFO
SEMA_NOHIST = 0x0001
SEMA_LOCK = 0x0004
NSCHEDCLASS = (-(PWEIGHTLESS)+1)
MR_ACCESS = 1
MR_UPDATE = 2
MRLOCK_BARRIER = 0x1
MRLOCK_BEHAVIOR = 0x2
MRLOCK_DBLTRIPPABLE = 0x4
MRLOCK_ALLOW_EQUAL_PRI = 0x8
MRLOCK_DEFAULT = MRLOCK_BARRIER
def mraccess(mrp): return mraccessf(mrp, 0)
def mrupdate(mrp): return mrupdatef(mrp, 0)
def mp_mutex_unlock(m): return mutex_unlock(m)
def mp_mutex_trylock(m): return mutex_trylock(m)
def mp_mutex_spinlock(m): return mutex_spinlock(m)
# Included from sys/mon.h
MON_LOCKED = 0x01
MON_WAITING = 0x02
MON_TIMEOUT = 0x04
MON_DOSRV = 0x08
MON_RUN = 0x10
MR_READER_BUCKETS = 13
def initlock(l): return spinlock_init(l,0)
def ownlock(x): return 1
def mutex_enter(m): return mutex_lock(m, PZERO)
def mutex_tryenter(m): return mutex_trylock(m)
def mutex_exit(m): return mutex_unlock(m)
def cv_signal(cv): return sv_signal(cv)
def cv_broadcast(cv): return sv_broadcast(cv)
def cv_destroy(cv): return sv_destroy(cv)
RW_READER = MR_ACCESS
RW_WRITER = MR_UPDATE
def rw_exit(r): return mrunlock(r)
def rw_tryupgrade(r): return mrtrypromote(r)
def rw_downgrade(r): return mrdemote(r)
def rw_destroy(r): return mrfree(r)
def RW_WRITE_HELD(r): return ismrlocked(r, MR_UPDATE)
def RW_READ_HELD(r): return ismrlocked(r, MR_ACCESS)
MS_FREE = 0
MS_UPD = 1
MS_ACC = 2
MS_WAITERS = 4
# Included from sys/fcntl.h
FNDELAY = 0x04
FAPPEND = 0x08
FSYNC = 0x10
FDSYNC = 0x20
FRSYNC = 0x40
FNONBLOCK = 0x80
FASYNC = 0x1000
FLARGEFILE = 0x2000
FNONBLK = FNONBLOCK
FDIRECT = 0x8000
FBULK = 0x10000
FDIRENT64 = 0x8000
FCREAT = 0x0100
FTRUNC = 0x0200
FEXCL = 0x0400
FNOCTTY = 0x0800
O_RDONLY = 0
O_WRONLY = 1
O_RDWR = 2
O_NDELAY = 0x04
O_APPEND = 0x08
O_SYNC = 0x10
O_DSYNC = 0x20
O_RSYNC = 0x40
O_NONBLOCK = 0x80
O_LARGEFILE = 0x2000
O_DIRECT = 0x8000
O_BULK = 0x10000
O_CREAT = 0x100
O_TRUNC = 0x200
O_EXCL = 0x400
O_NOCTTY = 0x800
F_DUPFD = 0
F_GETFD = 1
F_SETFD = 2
F_GETFL = 3
F_SETFL = 4
F_SETLK = 6
F_SETLKW = 7
F_CHKFL = 8
F_ALLOCSP = 10
F_FREESP = 11
F_SETBSDLK = 12
F_SETBSDLKW = 13
F_GETLK = 14
F_CHKLK = 15
F_CHKLKW = 16
F_CLNLK = 17
F_RSETLK = 20
F_RGETLK = 21
F_RSETLKW = 22
F_GETOWN = 23
F_SETOWN = 24
F_DIOINFO = 30
F_FSGETXATTR = 31
F_FSSETXATTR = 32
F_GETLK64 = 33
F_SETLK64 = 34
F_SETLKW64 = 35
F_ALLOCSP64 = 36
F_FREESP64 = 37
F_GETBMAP = 38
F_FSSETDM = 39
F_RESVSP = 40
F_UNRESVSP = 41
F_RESVSP64 = 42
F_UNRESVSP64 = 43
F_GETBMAPA = 44
F_FSGETXATTRA = 45
F_SETBIOSIZE = 46
F_GETBIOSIZE = 47
F_GETOPS = 50
F_DMAPI = 51
F_FSYNC = 52
F_FSYNC64 = 53
F_GETBDSATTR = 54
F_SETBDSATTR = 55
F_GETBMAPX = 56
F_SETPRIO = 57
F_GETPRIO = 58
F_RDLCK = 01
F_WRLCK = 02
F_UNLCK = 03
O_ACCMODE = 3
FD_CLOEXEC = 1
FD_NODUP_FORK = 4
BMV_IF_ATTRFORK = 0x1
BMV_IF_NO_DMAPI_READ = 0x2
BMV_IF_PREALLOC = 0x4
BMV_IF_VALID = (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC)
BMV_OF_PREALLOC = 0x1
BMV_IF_EXTENDED = 0x40000000
FMASK = 0x190FF
FOPEN = 0xFFFFFFFF
FREAD = 0x01
FWRITE = 0x02
FNDELAY = 0x04
FAPPEND = 0x08
FSYNC = 0x10
FDSYNC = 0x20
FRSYNC = 0x40
FNONBLOCK = 0x80
FASYNC = 0x1000
FNONBLK = FNONBLOCK
FLARGEFILE = 0x2000
FDIRECT = 0x8000
FBULK = 0x10000
FCREAT = 0x0100
FTRUNC = 0x0200
FEXCL = 0x0400
FNOCTTY = 0x0800
FINVIS = 0x0100
FSOCKET = 0x0200
FINPROGRESS = 0x0400
FPRIORITY = 0x0800
FPRIO = 0x4000
FDIRENT64 = 0x8000
FCLOSEXEC = 0x01
LOCK_SH = 1
LOCK_EX = 2
LOCK_NB = 4
LOCK_UN = 8
L_SET = 0
L_INCR = 1
L_XTND = 2
F_OK = 0
X_OK = 1
W_OK = 2
R_OK = 4
| apache-2.0 |
Shrhawk/edx-platform | pavelib/quality.py | 26 | 15940 | """
Check code quality using pep8, pylint, and diff_quality.
"""
from paver.easy import sh, task, cmdopts, needs, BuildFailure
import os
import re
from .utils.envs import Env
ALL_SYSTEMS = 'lms,cms,common,openedx,pavelib'
def top_python_dirs(dirname):
"""
Find the directories to start from in order to find all the Python files in `dirname`.
"""
top_dirs = []
dir_init = os.path.join(dirname, "__init__.py")
if os.path.exists(dir_init):
top_dirs.append(dirname)
for directory in ['djangoapps', 'lib']:
subdir = os.path.join(dirname, directory)
subdir_init = os.path.join(subdir, "__init__.py")
if os.path.exists(subdir) and not os.path.exists(subdir_init):
dirs = os.listdir(subdir)
top_dirs.extend(d for d in dirs if os.path.isdir(os.path.join(subdir, d)))
return top_dirs
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
])
def find_fixme(options):
"""
Run pylint on system code, only looking for fixme items.
"""
num_fixme = 0
systems = getattr(options, 'system', ALL_SYSTEMS).split(',')
for system in systems:
# Directory to put the pylint report in.
# This makes the folder if it doesn't already exist.
report_dir = (Env.REPORT_DIR / system).makedirs_p()
apps_list = ' '.join(top_python_dirs(system))
pythonpath_prefix = (
"PYTHONPATH={system}:{system}/lib"
"common/djangoapps:common/lib".format(
system=system
)
)
sh(
"{pythonpath_prefix} pylint --disable R,C,W,E --enable=fixme "
"--msg-template={msg_template} {apps} "
"| tee {report_dir}/pylint_fixme.report".format(
pythonpath_prefix=pythonpath_prefix,
msg_template='"{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"',
apps=apps_list,
report_dir=report_dir
)
)
num_fixme += _count_pylint_violations(
"{report_dir}/pylint_fixme.report".format(report_dir=report_dir))
print "Number of pylint fixmes: " + str(num_fixme)
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
("errors", "e", "Check for errors only"),
("limit=", "l", "limit for number of acceptable violations"),
])
def run_pylint(options):
"""
Run pylint on system code. When violations limit is passed in,
fail the task if too many violations are found.
"""
num_violations = 0
violations_limit = int(getattr(options, 'limit', -1))
errors = getattr(options, 'errors', False)
systems = getattr(options, 'system', ALL_SYSTEMS).split(',')
# Make sure the metrics subdirectory exists
Env.METRICS_DIR.makedirs_p()
for system in systems:
# Directory to put the pylint report in.
# This makes the folder if it doesn't already exist.
report_dir = (Env.REPORT_DIR / system).makedirs_p()
flags = []
if errors:
flags.append("--errors-only")
apps_list = ' '.join(top_python_dirs(system))
pythonpath_prefix = (
"PYTHONPATH={system}:{system}/djangoapps:{system}/"
"lib:common/djangoapps:common/lib".format(
system=system
)
)
sh(
"{pythonpath_prefix} pylint {flags} --msg-template={msg_template} {apps} | "
"tee {report_dir}/pylint.report".format(
pythonpath_prefix=pythonpath_prefix,
flags=" ".join(flags),
msg_template='"{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"',
apps=apps_list,
report_dir=report_dir
)
)
num_violations += _count_pylint_violations(
"{report_dir}/pylint.report".format(report_dir=report_dir))
# Print number of violations to log
violations_count_str = "Number of pylint violations: " + str(num_violations)
print violations_count_str
# Also write the number of violations to a file
with open(Env.METRICS_DIR / "pylint", "w") as f:
f.write(violations_count_str)
# Fail number of violations is greater than the limit
if num_violations > violations_limit > -1:
raise Exception("Failed. Too many pylint violations. "
"The limit is {violations_limit}.".format(violations_limit=violations_limit))
def _count_pylint_violations(report_file):
"""
Parses a pylint report line-by-line and determines the number of violations reported
"""
num_violations_report = 0
# An example string:
# common/lib/xmodule/xmodule/tests/test_conditional.py:21: [C0111(missing-docstring), DummySystem] Missing docstring
# More examples can be found in the unit tests for this method
pylint_pattern = re.compile(r".(\d+):\ \[(\D\d+.+\]).")
for line in open(report_file):
violation_list_for_line = pylint_pattern.split(line)
# If the string is parsed into four parts, then we've found a violation. Example of split parts:
# test file, line number, violation name, violation details
if len(violation_list_for_line) == 4:
num_violations_report += 1
return num_violations_report
def _get_pep8_violations():
"""
Runs pep8. Returns a tuple of (number_of_violations, violations_string)
where violations_string is a string of all pep8 violations found, separated
by new lines.
"""
report_dir = (Env.REPORT_DIR / 'pep8')
report_dir.rmtree(ignore_errors=True)
report_dir.makedirs_p()
# Make sure the metrics subdirectory exists
Env.METRICS_DIR.makedirs_p()
sh('pep8 . | tee {report_dir}/pep8.report -a'.format(report_dir=report_dir))
count, violations_list = _pep8_violations(
"{report_dir}/pep8.report".format(report_dir=report_dir)
)
return (count, violations_list)
def _pep8_violations(report_file):
"""
Returns a tuple of (num_violations, violations_list) for all
pep8 violations in the given report_file.
"""
with open(report_file) as f:
violations_list = f.readlines()
num_lines = len(violations_list)
return num_lines, violations_list
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
])
def run_pep8(options): # pylint: disable=unused-argument
"""
Run pep8 on system code.
Fail the task if any violations are found.
"""
(count, violations_list) = _get_pep8_violations()
violations_list = ''.join(violations_list)
# Print number of violations to log
violations_count_str = "Number of pep8 violations: {count}".format(count=count)
print violations_count_str
print violations_list
# Also write the number of violations to a file
with open(Env.METRICS_DIR / "pep8", "w") as f:
f.write(violations_count_str + '\n\n')
f.write(violations_list)
# Fail if any violations are found
if count:
failure_string = "Too many pep8 violations. " + violations_count_str
failure_string += "\n\nViolations:\n{violations_list}".format(violations_list=violations_list)
raise Exception(failure_string)
@task
@needs('pavelib.prereqs.install_python_prereqs')
def run_complexity():
"""
Uses radon to examine cyclomatic complexity.
For additional details on radon, see http://radon.readthedocs.org/
"""
system_string = 'cms/ lms/ common/ openedx/'
print "--> Calculating cyclomatic complexity of files..."
try:
sh(
"radon cc {system_string} --total-average".format(
system_string=system_string
)
)
except BuildFailure:
print "ERROR: Unable to calculate python-only code-complexity."
@task
@needs('pavelib.prereqs.install_node_prereqs')
@cmdopts([
("limit=", "l", "limit for number of acceptable violations"),
])
def run_jshint(options):
"""
Runs jshint on static asset directories
"""
violations_limit = int(getattr(options, 'limit', -1))
jshint_report_dir = (Env.REPORT_DIR / "jshint")
jshint_report = jshint_report_dir / "jshint.report"
_prepare_report_dir(jshint_report_dir)
jshint_directories = ["common/static/js", "cms/static/js", "lms/static/js"]
sh(
"jshint {list} --config .jshintrc >> {jshint_report}".format(
list=(" ".join(jshint_directories)), jshint_report=jshint_report
),
ignore_error=True
)
num_violations = _get_count_from_last_line(jshint_report)
if not num_violations:
raise BuildFailure("Error in calculating total number of violations.")
# Record the metric
_write_metric(str(num_violations), (Env.METRICS_DIR / "jshint"))
# Fail if number of violations is greater than the limit
if num_violations > violations_limit > -1:
raise Exception(
"JSHint Failed. Too many violations ({count}).\nThe limit is {violations_limit}.".format(
count=num_violations, violations_limit=violations_limit
)
)
def _write_metric(metric, filename):
"""
Write a given metric to a given file
Used for things like reports/metrics/jshint, which will simply tell you the number of
jshint violations found
"""
with open(filename, "w") as metric_file:
metric_file.write(metric)
def _prepare_report_dir(dir_name):
"""
Sets a given directory to a created, but empty state
"""
dir_name.rmtree_p()
dir_name.mkdir_p()
def _get_last_report_line(filename):
"""
Returns the last line of a given file. Used for getting output from quality output files.
"""
with open(filename, 'r') as report_file:
lines = report_file.readlines()
return lines[len(lines) - 1]
def _get_count_from_last_line(filename):
"""
This will return the number in a line that looks something like "3000 errors found". It is returning
the digits only (as an integer).
"""
last_line = _get_last_report_line(filename)
try:
return int(re.search(r'^\d+', last_line).group(0))
except AttributeError:
return None
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
("percentage=", "p", "fail if diff-quality is below this percentage"),
])
def run_quality(options):
"""
Build the html diff quality reports, and print the reports to the console.
:param: b, the branch to compare against, defaults to origin/master
:param: p, diff-quality will fail if the quality percentage calculated is
below this percentage. For example, if p is set to 80, and diff-quality finds
quality of the branch vs the compare branch is less than 80%, then this task will fail.
This threshold would be applied to both pep8 and pylint.
"""
# Directory to put the diff reports in.
# This makes the folder if it doesn't already exist.
dquality_dir = (Env.REPORT_DIR / "diff_quality").makedirs_p()
diff_quality_percentage_failure = False
def _pep8_output(count, violations_list, is_html=False):
"""
Given a count & list of pep8 violations, pretty-print the pep8 output.
If `is_html`, will print out with HTML markup.
"""
if is_html:
lines = ['<body>\n']
sep = '-------------<br/>\n'
title = "<h1>Quality Report: pep8</h1>\n"
violations_bullets = ''.join(
['<li>{violation}</li><br/>\n'.format(violation=violation) for violation in violations_list]
)
violations_str = '<ul>\n{bullets}</ul>\n'.format(bullets=violations_bullets)
violations_count_str = "<b>Violations</b>: {count}<br/>\n"
fail_line = "<b>FAILURE</b>: pep8 count should be 0<br/>\n"
else:
lines = []
sep = '-------------\n'
title = "Quality Report: pep8\n"
violations_str = ''.join(violations_list)
violations_count_str = "Violations: {count}\n"
fail_line = "FAILURE: pep8 count should be 0\n"
violations_count_str = violations_count_str.format(count=count)
lines.extend([sep, title, sep, violations_str, sep, violations_count_str])
if count > 0:
lines.append(fail_line)
lines.append(sep + '\n')
if is_html:
lines.append('</body>')
return ''.join(lines)
# Run pep8 directly since we have 0 violations on master
(count, violations_list) = _get_pep8_violations()
# Print number of violations to log
print _pep8_output(count, violations_list)
# Also write the number of violations to a file
with open(dquality_dir / "diff_quality_pep8.html", "w") as f:
f.write(_pep8_output(count, violations_list, is_html=True))
if count > 0:
diff_quality_percentage_failure = True
# ----- Set up for diff-quality pylint call -----
# Set the string, if needed, to be used for the diff-quality --compare-branch switch.
compare_branch = getattr(options, 'compare_branch', None)
compare_branch_string = ''
if compare_branch:
compare_branch_string = '--compare-branch={0}'.format(compare_branch)
# Set the string, if needed, to be used for the diff-quality --fail-under switch.
diff_threshold = int(getattr(options, 'percentage', -1))
percentage_string = ''
if diff_threshold > -1:
percentage_string = '--fail-under={0}'.format(diff_threshold)
# Generate diff-quality html report for pylint, and print to console
# If pylint reports exist, use those
# Otherwise, `diff-quality` will call pylint itself
pylint_files = get_violations_reports("pylint")
pylint_reports = u' '.join(pylint_files)
pythonpath_prefix = (
"PYTHONPATH=$PYTHONPATH:lms:lms/djangoapps:lms/lib:cms:cms/djangoapps:cms/lib:"
"common:common/djangoapps:common/lib"
)
try:
sh(
"{pythonpath_prefix} diff-quality --violations=pylint "
"{pylint_reports} {percentage_string} {compare_branch_string} "
"--html-report {dquality_dir}/diff_quality_pylint.html ".format(
pythonpath_prefix=pythonpath_prefix,
pylint_reports=pylint_reports,
percentage_string=percentage_string,
compare_branch_string=compare_branch_string,
dquality_dir=dquality_dir,
)
)
except BuildFailure, error_message:
if is_percentage_failure(error_message):
diff_quality_percentage_failure = True
else:
raise BuildFailure(error_message)
# If one of the diff-quality runs fails, then paver exits with an error when it is finished
if diff_quality_percentage_failure:
raise BuildFailure("Diff-quality failure(s).")
def is_percentage_failure(error_message):
"""
When diff-quality is run with a threshold percentage, it ends with an exit code of 1. This bubbles up to
paver with a subprocess return code error. If the subprocess exits with anything other than 1, raise
a paver exception.
"""
if "Subprocess return code: 1" not in error_message:
return False
else:
return True
def get_violations_reports(violations_type):
"""
Finds violations reports files by naming convention (e.g., all "pep8.report" files)
"""
violations_files = []
for subdir, _dirs, files in os.walk(os.path.join(Env.REPORT_DIR)):
for f in files:
if f == "{violations_type}.report".format(violations_type=violations_type):
violations_files.append(os.path.join(subdir, f))
return violations_files
| agpl-3.0 |
grlee77/numpy | tools/cythonize.py | 2 | 7355 | #!/usr/bin/env python3
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'numpy'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'numpy'
VENDOR = 'NumPy'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
flags = ['-3', '--fast-fail']
if tofile.endswith('.cxx'):
flags.append('--cplus')
try:
# try the cython in the installed python first (somewhat related to scipy/scipy#2397)
from Cython.Compiler.Version import version as cython_version
except ImportError:
# The `cython` command need not point to the version installed in the
# Python running this script, so raise an error to avoid the chance of
# using the wrong version of Cython.
raise OSError('Cython needs to be installed in Python as a module')
else:
# check the version, and invoke through python
from distutils.version import LooseVersion
# Cython 0.29.21 is required for Python 3.9 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with that in pyproject.toml
required_version = LooseVersion('0.29.21')
if LooseVersion(cython_version) < required_version:
raise RuntimeError(f'Building {VENDOR} requires Cython >= {required_version}')
subprocess.check_call(
[sys.executable, '-m', 'cython'] + flags + ["-o", tofile, fromfile])
def process_tempita_pyx(fromfile, tofile):
import npy_tempita as tempita
assert fromfile.endswith('.pyx.in')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
def process_tempita_pyd(fromfile, tofile):
import npy_tempita as tempita
assert fromfile.endswith('.pxd.in')
assert tofile.endswith('.pxd')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(tofile, "w") as f:
f.write(pyxcontent)
def process_tempita_pxi(fromfile, tofile):
import npy_tempita as tempita
assert fromfile.endswith('.pxi.in')
assert tofile.endswith('.pxi')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(tofile, "w") as f:
f.write(pyxcontent)
def process_tempita_pxd(fromfile, tofile):
import npy_tempita as tempita
assert fromfile.endswith('.pxd.in')
assert tofile.endswith('.pxd')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(tofile, "w") as f:
f.write(pyxcontent)
rules = {
# fromext : function, toext
'.pyx' : (process_pyx, '.c'),
'.pyx.in' : (process_tempita_pyx, '.c'),
'.pxi.in' : (process_tempita_pxi, '.pxi'),
'.pxd.in' : (process_tempita_pxd, '.pxd'),
'.pyd.in' : (process_tempita_pyd, '.pyd'),
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print(f'{fullfrompath} has not changed')
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print(f'Processing {fullfrompath}')
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
files = [x for x in os.listdir(root_dir) if not os.path.isdir(x)]
# .pxi or .pxi.in files are most likely dependencies for
# .pyx files, so we need to process them first
files.sort(key=lambda name: (name.endswith('.pxi') or
name.endswith('.pxi.in') or
name.endswith('.pxd.in')),
reverse=True)
for filename in files:
in_file = os.path.join(root_dir, filename + ".in")
for fromext, value in rules.items():
if filename.endswith(fromext):
if not value:
break
function, toext = value
if toext == '.c':
with open(os.path.join(root_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(root_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
break
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| bsd-3-clause |
tchernomax/ansible | lib/ansible/modules/network/f5/bigip_monitor_external.py | 8 | 22421 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_monitor_external
short_description: Manages external LTM monitors on a BIG-IP
description:
- Manages external LTM monitors on a BIG-IP.
version_added: 2.6
options:
name:
description:
- Specifies the name of the monitor.
required: True
description:
description:
- The description of the monitor.
version_added: 2.7
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(http)
parent on the C(Common) partition.
default: /Common/external
arguments:
description:
- Specifies any command-line arguments that the script requires.
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified.
external_program:
description:
- Specifies the name of the file for the monitor to use. In order to reference
a file, you must first import it using options on the System > File Management > External
Monitor Program File List > Import screen. The BIG-IP system automatically
places the file in the proper location on the file system.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request.
- If the target responds within the set time period, it is considered up.
- If the target does not respond within the set time period, it is considered
down.
- You can change this number to any number you want, however, it should be
3 times the interval number of seconds plus 1 second.
- If this parameter is not provided when creating a new monitor, then the
default value will be C(16).
variables:
description:
- Specifies any variables that the script requires.
- Note that double quotes in values will be suppressed.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create an external monitor
bigip_monitor_external:
name: foo
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Create an external monitor with variables
bigip_monitor_external:
name: foo
timeout: 10
variables:
var1: foo
var2: bar
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Add a variable to an existing set
bigip_monitor_external:
name: foo
timeout: 10
variables:
var1: foo
var2: bar
cat: dog
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: external
description:
description: The description of the monitor.
returned: changed
type: str
sample: Important Monitor
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import compare_dictionary
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import compare_dictionary
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'apiRawValues': 'variables',
'run': 'external_program',
'args': 'arguments',
}
api_attributes = [
'defaultsFrom', 'interval', 'timeout', 'destination', 'run', 'args',
'description',
]
returnables = [
'parent', 'ip', 'port', 'interval', 'timeout', 'variables', 'external_program',
'arguments', 'description',
]
updatables = [
'destination', 'interval', 'timeout', 'variables', 'external_program',
'arguments', 'description',
]
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def interval(self):
if self._values['interval'] is None:
return None
# Per BZ617284, the BIG-IP UI does not raise a warning about this.
# So I do
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'external'
class ApiParameters(Parameters):
@property
def variables(self):
if self._values['variables'] is None:
return None
pattern = r'^userDefined\s(?P<key>.*)'
result = {}
for k, v in iteritems(self._values['variables']):
matches = re.match(pattern, k)
if not matches:
raise F5ModuleError(
"Unable to find the variable 'key' in the API payload."
)
key = matches.group('key')
result[key] = v
return result
class ModuleParameters(Parameters):
@property
def variables(self):
if self._values['variables'] is None:
return None
result = {}
for k, v in iteritems(self._values['variables']):
result[k] = str(v).replace('"', '')
return result
@property
def external_program(self):
if self._values['external_program'] is None:
return None
return fq_name(self.partition, self._values['external_program'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
@property
def variables(self):
if self.want.variables is None:
return None
if self.have.variables is None:
return dict(
variables=self.want.variables
)
result = dict()
different = compare_dictionary(self.want.variables, self.have.variables)
if not different:
return None
for k, v in iteritems(self.want.variables):
if k in self.have.variables and v != self.have.variables[k]:
result[k] = v
elif k not in self.have.variables:
result[k] = v
for k, v in iteritems(self.have.variables):
if k not in self.want.variables:
result[k] = "none"
if result:
result = dict(
variables=result
)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.module.check_mode:
return True
self.create_on_device()
return True
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.variables:
self.set_variable_on_device(self.want.variables)
def set_variable_on_device(self, commands):
command = ' '.join(['user-defined {0} \\\"{1}\\\"'.format(k, v) for k, v in iteritems(commands)])
command = 'tmsh modify ltm monitor external {0} {1}'.format(self.want.name, command)
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(command)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
if params:
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.changes.variables:
self.set_variable_on_device(self.changes.variables)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/external'),
description=dict(),
arguments=dict(),
ip=dict(),
port=dict(type='int'),
external_program=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
state=dict(
default='present',
choices=['present', 'absent']
),
variables=dict(type='dict'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
sebastien-forestier/NIPS2017 | ros/nips2017/src/nips2017/torso/torso.py | 1 | 2749 | import rospy
import json
from os import system
from nips2017.srv import *
from threading import RLock
from rospkg import RosPack
from os.path import join
from .services import TorsoServices
class Torso(object):
def __init__(self):
self.rospack = RosPack()
with open(join(self.rospack.get_path('nips2017'), 'config', 'torso.json')) as f:
self.params = json.load(f)
self.publish_rate = rospy.Rate(self.params['publish_rate'])
self.primitive_head = None
self.primitive_right = None
# Protected resources
self.in_rest_pose = False
self.robot_lock = RLock()
# Used services
self.torso = TorsoServices(self.params['robot_name'])
# Proposed servuces
self.reset_srv_name = 'torso/reset'
self.reset_srv = None
def go_to_start(self, duration=5):
d = {"abs_z": 0,
"bust_y": 0,
"bust_x": 0,
"head_z": 0,
"head_y": -0,
"l_shoulder_y": 0,
"l_shoulder_x": 0,
"l_arm_z": 20,
"l_elbow_y": 0,
"r_shoulder_y": 0,
"r_shoulder_x": 0,
"r_arm_z": 0,
"r_elbow_y": 0}
self.torso.set_compliant(False)
self.torso.reach(d, duration)
rospy.sleep(duration)
def go_to_rest(self, slow):
with self.robot_lock:
duration = 2 if slow else 0.5
self.torso.set_torque_max(self.params['torques'])
self.torso.reach({'l_elbow_y': -35, 'l_shoulder_x': 30}, duration)
rospy.sleep(duration)
self.torso.reach({'l_shoulder_y': -25, 'l_shoulder_x': 40, 'l_arm_z': 30, 'l_elbow_y': 0}, duration)
rospy.sleep(duration)
rospy.sleep(0.5)
self.in_rest_pose = True
self.torso.set_torque_max()
def run(self):
self.reset_srv = rospy.Service(self.reset_srv_name, Reset, self._cb_reset)
self.go_to_start()
self.torso.start_idle_motion('head')
self.torso.start_idle_motion('right_arm')
rospy.spin()
self.torso.stop_idle_motion('head')
self.torso.stop_idle_motion('right_arm')
def _cb_reset(self, request):
rospy.loginfo("Resetting Torso{}...".format(" in slow mode" if request.slow else ""))
if request.slow:
with self.robot_lock:
self.torso.set_arm_compliant(False, 'left')
system('beep')
rospy.sleep(1)
self.go_to_rest(True)
else:
with self.robot_lock:
self.torso.set_arm_compliant(False, 'left')
self.go_to_rest(False)
return ResetResponse()
| gpl-3.0 |
ahhda/sympy | sympy/solvers/inequalities.py | 2 | 18294 | """Tools for solving inequalities and systems of inequalities. """
from __future__ import print_function, division
from sympy.core import Symbol, Dummy, sympify
from sympy.core.compatibility import iterable, reduce
from sympy.sets import Interval
from sympy.core.relational import Relational, Eq, Ge, Lt
from sympy.sets.sets import FiniteSet, Union
from sympy.core.singleton import S
from sympy.functions import Abs
from sympy.logic import And
from sympy.polys import Poly, PolynomialError, parallel_poly_from_expr
from sympy.polys.polyutils import _nsort
from sympy.utilities.misc import filldedent
def solve_poly_inequality(poly, rel):
"""Solve a polynomial inequality with rational coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import solve_poly_inequality
>>> solve_poly_inequality(Poly(x, x, domain='ZZ'), '==')
[{0}]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '!=')
[(-oo, -1), (-1, 1), (1, oo)]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '==')
[{-1}, {1}]
See Also
========
solve_poly_inequalities
"""
if not isinstance(poly, Poly):
raise ValueError(
'For efficiency reasons, `poly` should be a Poly instance')
if poly.is_number:
t = Relational(poly.as_expr(), 0, rel)
if t is S.true:
return [S.Reals]
elif t is S.false:
return [S.EmptySet]
else:
raise NotImplementedError(
"could not determine truth value of %s" % t)
reals, intervals = poly.real_roots(multiple=False), []
if rel == '==':
for root, _ in reals:
interval = Interval(root, root)
intervals.append(interval)
elif rel == '!=':
left = S.NegativeInfinity
for right, _ in reals + [(S.Infinity, 1)]:
interval = Interval(left, right, True, True)
intervals.append(interval)
left = right
else:
if poly.LC() > 0:
sign = +1
else:
sign = -1
eq_sign, equal = None, False
if rel == '>':
eq_sign = +1
elif rel == '<':
eq_sign = -1
elif rel == '>=':
eq_sign, equal = +1, True
elif rel == '<=':
eq_sign, equal = -1, True
else:
raise ValueError("'%s' is not a valid relation" % rel)
right, right_open = S.Infinity, True
for left, multiplicity in reversed(reals):
if multiplicity % 2:
if sign == eq_sign:
intervals.insert(
0, Interval(left, right, not equal, right_open))
sign, right, right_open = -sign, left, not equal
else:
if sign == eq_sign and not equal:
intervals.insert(
0, Interval(left, right, True, right_open))
right, right_open = left, True
elif sign != eq_sign and equal:
intervals.insert(0, Interval(left, left))
if sign == eq_sign:
intervals.insert(
0, Interval(S.NegativeInfinity, right, True, right_open))
return intervals
def solve_poly_inequalities(polys):
"""Solve polynomial inequalities with rational coefficients.
Examples
========
>>> from sympy.solvers.inequalities import solve_poly_inequalities
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> solve_poly_inequalities(((
... Poly(x**2 - 3), ">"), (
... Poly(-x**2 + 1), ">")))
(-oo, -sqrt(3)) U (-1, 1) U (sqrt(3), oo)
"""
from sympy import Union
return Union(*[solve_poly_inequality(*p) for p in polys])
def solve_rational_inequalities(eqs):
"""Solve a system of rational inequalities with rational coefficients.
Examples
========
>>> from sympy.abc import x
>>> from sympy import Poly
>>> from sympy.solvers.inequalities import solve_rational_inequalities
>>> solve_rational_inequalities([[
... ((Poly(-x + 1), Poly(1, x)), '>='),
... ((Poly(-x + 1), Poly(1, x)), '<=')]])
{1}
>>> solve_rational_inequalities([[
... ((Poly(x), Poly(1, x)), '!='),
... ((Poly(-x + 1), Poly(1, x)), '>=')]])
(-oo, 0) U (0, 1]
See Also
========
solve_poly_inequality
"""
result = S.EmptySet
for _eqs in eqs:
if not _eqs:
continue
global_intervals = [Interval(S.NegativeInfinity, S.Infinity)]
for (numer, denom), rel in _eqs:
numer_intervals = solve_poly_inequality(numer*denom, rel)
denom_intervals = solve_poly_inequality(denom, '==')
intervals = []
for numer_interval in numer_intervals:
for global_interval in global_intervals:
interval = numer_interval.intersect(global_interval)
if interval is not S.EmptySet:
intervals.append(interval)
global_intervals = intervals
intervals = []
for global_interval in global_intervals:
for denom_interval in denom_intervals:
global_interval -= denom_interval
if global_interval is not S.EmptySet:
intervals.append(global_interval)
global_intervals = intervals
if not global_intervals:
break
for interval in global_intervals:
result = result.union(interval)
return result
def reduce_rational_inequalities(exprs, gen, relational=True):
"""Reduce a system of rational inequalities with rational coefficients.
Examples
========
>>> from sympy import Poly, Symbol
>>> from sympy.solvers.inequalities import reduce_rational_inequalities
>>> x = Symbol('x', real=True)
>>> reduce_rational_inequalities([[x**2 <= 0]], x)
Eq(x, 0)
>>> reduce_rational_inequalities([[x + 2 > 0]], x)
And(-2 < x, x < oo)
>>> reduce_rational_inequalities([[(x + 2, ">")]], x)
And(-2 < x, x < oo)
>>> reduce_rational_inequalities([[x + 2]], x)
Eq(x, -2)
"""
exact = True
eqs = []
solution = S.EmptySet
for _exprs in exprs:
_eqs = []
for expr in _exprs:
if isinstance(expr, tuple):
expr, rel = expr
else:
if expr.is_Relational:
expr, rel = expr.lhs - expr.rhs, expr.rel_op
else:
expr, rel = expr, '=='
if expr is S.true:
numer, denom, rel = S.Zero, S.One, '=='
elif expr is S.false:
numer, denom, rel = S.One, S.One, '=='
else:
numer, denom = expr.together().as_numer_denom()
try:
(numer, denom), opt = parallel_poly_from_expr(
(numer, denom), gen)
except PolynomialError:
raise PolynomialError(filldedent('''
only polynomials and
rational functions are supported in this context'''))
if not opt.domain.is_Exact:
numer, denom, exact = numer.to_exact(), denom.to_exact(), False
domain = opt.domain.get_exact()
if not (domain.is_ZZ or domain.is_QQ):
expr = numer/denom
expr = Relational(expr, 0, rel)
solution = Union(solution, solve_univariate_inequality(expr, gen, relational=False))
else:
_eqs.append(((numer, denom), rel))
eqs.append(_eqs)
solution = Union(solution, solve_rational_inequalities(eqs))
if not exact:
solution = solution.evalf()
if relational:
solution = solution.as_relational(gen)
return solution
def reduce_abs_inequality(expr, rel, gen):
"""Reduce an inequality with nested absolute values.
Examples
========
>>> from sympy import Abs, Symbol
>>> from sympy.solvers.inequalities import reduce_abs_inequality
>>> x = Symbol('x', real=True)
>>> reduce_abs_inequality(Abs(x - 5) - 3, '<', x)
And(2 < x, x < 8)
>>> reduce_abs_inequality(Abs(x + 2)*3 - 13, '<', x)
And(-19/3 < x, x < 7/3)
See Also
========
reduce_abs_inequalities
"""
if gen.is_real is False:
raise TypeError(filldedent('''
can't solve inequalities with absolute
values containing non-real variables'''))
def _bottom_up_scan(expr):
exprs = []
if expr.is_Add or expr.is_Mul:
op = expr.func
for arg in expr.args:
_exprs = _bottom_up_scan(arg)
if not exprs:
exprs = _exprs
else:
args = []
for expr, conds in exprs:
for _expr, _conds in _exprs:
args.append((op(expr, _expr), conds + _conds))
exprs = args
elif expr.is_Pow:
n = expr.exp
if not n.is_Integer or n < 0:
raise ValueError(
"only non-negative integer powers are allowed")
_exprs = _bottom_up_scan(expr.base)
for expr, conds in _exprs:
exprs.append((expr**n, conds))
elif isinstance(expr, Abs):
_exprs = _bottom_up_scan(expr.args[0])
for expr, conds in _exprs:
exprs.append(( expr, conds + [Ge(expr, 0)]))
exprs.append((-expr, conds + [Lt(expr, 0)]))
else:
exprs = [(expr, [])]
return exprs
exprs = _bottom_up_scan(expr)
mapping = {'<': '>', '<=': '>='}
inequalities = []
for expr, conds in exprs:
if rel not in mapping.keys():
expr = Relational( expr, 0, rel)
else:
expr = Relational(-expr, 0, mapping[rel])
inequalities.append([expr] + conds)
return reduce_rational_inequalities(inequalities, gen)
def reduce_abs_inequalities(exprs, gen):
"""Reduce a system of inequalities with nested absolute values.
Examples
========
>>> from sympy import Abs, Symbol
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import reduce_abs_inequalities
>>> x = Symbol('x', real=True)
>>> reduce_abs_inequalities([(Abs(3*x - 5) - 7, '<'),
... (Abs(x + 25) - 13, '>')], x)
And(-2/3 < x, Or(And(-12 < x, x < oo), And(-oo < x, x < -38)), x < 4)
>>> reduce_abs_inequalities([(Abs(x - 4) + Abs(3*x - 5) - 7, '<')], x)
And(1/2 < x, x < 4)
See Also
========
reduce_abs_inequality
"""
return And(*[ reduce_abs_inequality(expr, rel, gen)
for expr, rel in exprs ])
def solve_univariate_inequality(expr, gen, relational=True):
"""Solves a real univariate inequality.
Examples
========
>>> from sympy.solvers.inequalities import solve_univariate_inequality
>>> from sympy.core.symbol import Symbol
>>> x = Symbol('x')
>>> solve_univariate_inequality(x**2 >= 4, x)
Or(And(-oo < x, x <= -2), And(2 <= x, x < oo))
>>> solve_univariate_inequality(x**2 >= 4, x, relational=False)
(-oo, -2] U [2, oo)
"""
from sympy.solvers.solvers import solve, denoms
# This keeps the function independent of the assumptions about `gen`.
# `solveset` makes sure this function is called only when the domain is
# real.
d = Dummy(real=True)
expr = expr.subs(gen, d)
_gen = gen
gen = d
if expr is S.true:
rv = S.Reals
elif expr is S.false:
rv = S.EmptySet
else:
e = expr.lhs - expr.rhs
parts = n, d = e.as_numer_denom()
if all(i.is_polynomial(gen) for i in parts):
solns = solve(n, gen, check=False)
singularities = solve(d, gen, check=False)
else:
solns = solve(e, gen, check=False)
singularities = []
for d in denoms(e):
singularities.extend(solve(d, gen))
include_x = expr.func(0, 0)
def valid(x):
v = e.subs(gen, x)
try:
r = expr.func(v, 0)
except TypeError:
r = S.false
if r in (S.true, S.false):
return r
if v.is_real is False:
return S.false
else:
v = v.n(2)
if v.is_comparable:
return expr.func(v, 0)
return S.false
start = S.NegativeInfinity
sol_sets = [S.EmptySet]
try:
reals = _nsort(set(solns + singularities), separated=True)[0]
except NotImplementedError:
raise NotImplementedError('sorting of these roots is not supported')
for x in reals:
end = x
if end in [S.NegativeInfinity, S.Infinity]:
if valid(S(0)):
sol_sets.append(Interval(start, S.Infinity, True, True))
break
if valid((start + end)/2 if start != S.NegativeInfinity else end - 1):
sol_sets.append(Interval(start, end, True, True))
if x in singularities:
singularities.remove(x)
elif include_x:
sol_sets.append(FiniteSet(x))
start = end
end = S.Infinity
# in case start == -oo then there were no solutions so we just
# check a point between -oo and oo (e.g. 0) else pick a point
# past the last solution (which is start after the end of the
# for-loop above
if valid(start + 1 if start is not S.NegativeInfinity else 0):
sol_sets.append(Interval(start, end, True, True))
rv = Union(*sol_sets).subs(gen, _gen)
return rv if not relational else rv.as_relational(_gen)
def _solve_inequality(ie, s):
""" A hacky replacement for solve, since the latter only works for
univariate inequalities. """
expr = ie.lhs - ie.rhs
try:
p = Poly(expr, s)
if p.degree() != 1:
raise NotImplementedError
except (PolynomialError, NotImplementedError):
try:
return reduce_rational_inequalities([[ie]], s)
except PolynomialError:
return solve_univariate_inequality(ie, s)
a, b = p.all_coeffs()
if a.is_positive or ie.rel_op in ('!=', '=='):
return ie.func(s, -b/a)
elif a.is_negative:
return ie.reversed.func(s, -b/a)
else:
raise NotImplementedError
def _reduce_inequalities(inequalities, symbols):
# helper for reduce_inequalities
poly_part, abs_part = {}, {}
other = []
for inequality in inequalities:
expr, rel = inequality.lhs, inequality.rel_op # rhs is 0
# check for gens using atoms which is more strict than free_symbols to
# guard against EX domain which won't be handled by
# reduce_rational_inequalities
gens = expr.atoms(Symbol)
if len(gens) == 1:
gen = gens.pop()
else:
common = expr.free_symbols & symbols
if len(common) == 1:
gen = common.pop()
other.append(_solve_inequality(Relational(expr, 0, rel), gen))
continue
else:
raise NotImplementedError(filldedent('''
inequality has more than one
symbol of interest'''))
if expr.is_polynomial(gen):
poly_part.setdefault(gen, []).append((expr, rel))
else:
components = expr.find(lambda u:
u.has(gen) and (
u.is_Function or u.is_Pow and not u.exp.is_Integer))
if components and all(isinstance(i, Abs) for i in components):
abs_part.setdefault(gen, []).append((expr, rel))
else:
other.append(_solve_inequality(Relational(expr, 0, rel), gen))
poly_reduced = []
abs_reduced = []
for gen, exprs in poly_part.items():
poly_reduced.append(reduce_rational_inequalities([exprs], gen))
for gen, exprs in abs_part.items():
abs_reduced.append(reduce_abs_inequalities(exprs, gen))
return And(*(poly_reduced + abs_reduced + other))
def reduce_inequalities(inequalities, symbols=[]):
"""Reduce a system of inequalities with rational coefficients.
Examples
========
>>> from sympy import sympify as S, Symbol
>>> from sympy.abc import x, y
>>> from sympy.solvers.inequalities import reduce_inequalities
>>> reduce_inequalities(0 <= x + 3, [])
And(-3 <= x, x < oo)
>>> reduce_inequalities(0 <= x + y*2 - 1, [x])
x >= -2*y + 1
"""
if not iterable(inequalities):
inequalities = [inequalities]
inequalities = [sympify(i) for i in inequalities]
gens = set().union(*[i.free_symbols for i in inequalities])
if not iterable(symbols):
symbols = [symbols]
symbols = (set(symbols) or gens) & gens
if any(i.is_real is False for i in symbols):
raise TypeError(filldedent('''
inequalities cannot contain symbols that are not real.'''))
# make vanilla symbol real
recast = dict([(i, Dummy(i.name, real=True))
for i in gens if i.is_real is None])
inequalities = [i.xreplace(recast) for i in inequalities]
symbols = set([i.xreplace(recast) for i in symbols])
# prefilter
keep = []
for i in inequalities:
if isinstance(i, Relational):
i = i.func(i.lhs.as_expr() - i.rhs.as_expr(), 0)
elif i not in (True, False):
i = Eq(i, 0)
if i == True:
continue
elif i == False:
return S.false
if i.lhs.is_number:
raise NotImplementedError(
"could not determine truth value of %s" % i)
keep.append(i)
inequalities = keep
del keep
# solve system
rv = _reduce_inequalities(inequalities, symbols)
# restore original symbols and return
return rv.xreplace(dict([(v, k) for k, v in recast.items()]))
| bsd-3-clause |
renyi533/tensorflow | tensorflow/python/estimator/canned/metric_keys.py | 41 | 1303 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""metric_keys python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.canned import metric_keys
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
metric_keys.__all__ = [s for s in dir(metric_keys) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.canned.metric_keys import *
| apache-2.0 |
chromium/chromium | third_party/abseil-cpp/roll_abseil.py | 9 | 4289 | #!/usr/bin/env python
# NOTE: This script requires python 3.
"""Script to do the first step of Abseil roll into chromium.
"""
import logging
import os
import re
import subprocess
import tempfile
from datetime import datetime
ABSL_URI = 'https://github.com/abseil/abseil-cpp.git'
def _PullAbseil(abseil_dir):
logging.info('Updating abseil...')
subprocess.check_call(['git', 'clone', ABSL_URI],
cwd=abseil_dir)
def _SyncChromium(chromium_dir):
logging.info('Updating chromium...')
subprocess.check_call(['git', 'checkout', 'main'], cwd=chromium_dir)
subprocess.check_call(['git', 'pull', '--rebase'], cwd=chromium_dir)
subprocess.check_call(['gclient', 'sync'], cwd=chromium_dir)
def _UpdateChromiumReadme(readme_filename, abseil_dir):
logging.info('Updating ' + readme_filename)
stdout = subprocess.check_output(['git', 'log', '-n1', '--pretty=short'],
cwd=abseil_dir)
new_revision = re.search('commit\\s(.{40})', str(stdout)).group(1)
with open(readme_filename, 'r+') as f:
content = f.read()
prefix = 'Revision: '
pos = content.find(prefix)
assert(pos > 0)
pos = pos + len(prefix)
old_revision = content[pos:pos+40]
f.seek(pos)
f.write(new_revision)
logging.info('Abseil old revision is ' + old_revision)
logging.info('Abseil new revision is ' + new_revision)
return old_revision[0:10] + '..' + new_revision[0:10]
def _UpdateAbseilInChromium(abseil_dir, chromium_dir):
logging.info('Syncing abseil in chromium/src/third_party...')
exclude = [
'*BUILD.gn',
'DIR_METADATA',
'README.chromium',
'OWNERS',
'.gitignore',
'.git',
'*.gni',
'*clang-format',
'patches/*',
'patches',
'absl_hardening_test.cc',
'roll_abseil.py',
'generate_def_files.py',
'*.def',
]
params = ['rsync', '-aP', abseil_dir, os.path.join(chromium_dir, 'third_party'), '--delete']
for e in exclude:
params.append('--exclude={}'.format(e))
subprocess.check_call(params, cwd=chromium_dir)
def _PatchAbseil(abseil_in_chromium_dir):
logging.info('Patching abseil...')
for patch in os.listdir(os.path.join(abseil_in_chromium_dir, 'patches')):
subprocess.check_call(['patch', '--strip', '1', '-i', os.path.join(abseil_in_chromium_dir, 'patches', patch)])
os.remove(os.path.join(abseil_in_chromium_dir, 'absl', 'base', 'internal', 'thread_annotations.h'))
os.remove(os.path.join(abseil_in_chromium_dir, 'absl', 'base', 'internal', 'dynamic_annotations.h'))
def _Commit(chromium_dir, hash_diff):
logging.info('Commit...')
desc="""Roll abseil_revision {0}
Change Log:
https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+log/{0}
Full diff:
https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+/{0}
Bug: None""".format(hash_diff)
subprocess.check_call(['git', 'add', 'third_party/abseil-cpp'], cwd=chromium_dir)
subprocess.check_call(['git', 'commit', '-m', desc], cwd=chromium_dir)
logging.info('Upload...')
subprocess.check_call(['git', 'cl', 'upload', '-m', desc, '--bypass-hooks'], cwd=chromium_dir)
def _Roll():
chromium_dir = os.getcwd()
abseil_in_chromium_dir = os.path.join(chromium_dir, 'third_party', 'abseil-cpp')
_SyncChromium(chromium_dir)
branch_name = datetime.today().strftime('rolling-absl-%Y%m%d')
logging.info('Creating branch ' + branch_name + ' for the roll...')
subprocess.check_call(['git', 'checkout', '-b', branch_name], cwd=chromium_dir)
with tempfile.TemporaryDirectory() as abseil_root:
_PullAbseil(abseil_root)
abseil_dir = os.path.join(abseil_root, 'abseil-cpp')
_UpdateAbseilInChromium(abseil_dir, chromium_dir)
hash_diff = _UpdateChromiumReadme(os.path.join(abseil_in_chromium_dir, 'README.chromium'),
abseil_dir)
_PatchAbseil(abseil_in_chromium_dir)
_Commit(chromium_dir, hash_diff)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if os.getcwd().endswith('src') and os.path.exists('chrome/browser'):
_Roll()
logging.info("Next step is manual: Fix BUILD.gn files to match BUILD.bazel changes.")
logging.info("After that run generate_def_files.py. ")
else:
logging.error('Run this script from a chromium/src/ directory.')
| bsd-3-clause |
bakkou-badri/dataminingproject | env/lib/python2.7/site-packages/pip/_vendor/requests/auth.py | 294 | 6173 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import logging
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
log = logging.getLogger(__name__)
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
setattr(self, 'num_401_calls', 1)
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r
| gpl-2.0 |
chkamil/ardupilot | Tools/LogAnalyzer/tests/TestVibration.py | 261 | 3069 | from LogAnalyzer import Test,TestResult
import DataflashLog
import numpy
class TestVibration(Test):
'''test for accelerometer vibration (accX/accY/accZ) within recommendations'''
def __init__(self):
Test.__init__(self)
self.name = "Vibration"
def run(self, logdata, verbose):
self.result = TestResult()
if logdata.vehicleType != "ArduCopter":
self.result.status = TestResult.StatusType.NA
return
# constants
gravity = -9.81
aimRangeWarnXY = 1.5
aimRangeFailXY = 3.0
aimRangeWarnZ = 2.0 # gravity +/- aim range
aimRangeFailZ = 5.0 # gravity +/- aim range
if not "IMU" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No IMU log data"
return
# find some stable LOITER data to analyze, at least 10 seconds
chunks = DataflashLog.DataflashLogHelper.findLoiterChunks(logdata, minLengthSeconds=10, noRCInputs=True)
if not chunks:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No stable LOITER log data found"
return
# for now we'll just use the first (largest) chunk of LOITER data
# TODO: ignore the first couple of secs to avoid bad data during transition - or can we check more analytically that we're stable?
# TODO: accumulate all LOITER chunks over min size, or just use the largest one?
startLine = chunks[0][0]
endLine = chunks[0][1]
#print "TestVibration using LOITER chunk from lines %s to %s" % (`startLine`, `endLine`)
def getStdDevIMU(logdata, channelName, startLine,endLine):
loiterData = logdata.channels["IMU"][channelName].getSegment(startLine,endLine)
numpyData = numpy.array(loiterData.dictData.values())
return numpy.std(numpyData)
# use 2x standard deviations as the metric, so if 95% of samples lie within the aim range we're good
stdDevX = abs(2 * getStdDevIMU(logdata,"AccX",startLine,endLine))
stdDevY = abs(2 * getStdDevIMU(logdata,"AccY",startLine,endLine))
stdDevZ = abs(2 * getStdDevIMU(logdata,"AccZ",startLine,endLine))
if (stdDevX > aimRangeFailXY) or (stdDevY > aimRangeFailXY) or (stdDevZ > aimRangeFailZ):
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Vibration too high (X:%.2fg, Y:%.2fg, Z:%.2fg)" % (stdDevX,stdDevY,stdDevZ)
elif (stdDevX > aimRangeWarnXY) or (stdDevY > aimRangeWarnXY) or (stdDevZ > aimRangeWarnZ):
self.result.status = TestResult.StatusType.WARN
self.result.statusMessage = "Vibration slightly high (X:%.2fg, Y:%.2fg, Z:%.2fg)" % (stdDevX,stdDevY,stdDevZ)
else:
self.result.status = TestResult.StatusType.GOOD
self.result.statusMessage = "Good vibration values (X:%.2fg, Y:%.2fg, Z:%.2fg)" % (stdDevX,stdDevY,stdDevZ)
| gpl-3.0 |
rbharath/deepchem | deepchem/models/tensorflow_models/lr.py | 2 | 9592 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 08 14:10:02 2016
@author: Zhenqin Wu
"""
import tensorflow as tf
import numpy as np
import os
import time
from deepchem.metrics import from_one_hot
from deepchem.models.tensorflow_models import TensorflowGraph
from deepchem.models.tensorflow_models import TensorflowGraphModel
from deepchem.nn import model_ops
from deepchem.utils.save import log
from deepchem.data import pad_features
from deepchem.metrics import to_one_hot
def weight_decay(penalty_type, penalty):
# due to the different shape of weight(ndims=2) and bias(ndims=1),
# will using this version for logreg
variables = []
# exclude bias variables
for v in tf.trainable_variables():
if v.get_shape().as_list()[0] > 1:
variables.append(v)
with tf.name_scope('weight_decay'):
if penalty_type == 'l1':
cost = tf.add_n([tf.reduce_sum(tf.abs(v)) for v in variables])
elif penalty_type == 'l2':
cost = tf.add_n([tf.nn.l2_loss(v) for v in variables])
else:
raise NotImplementedError('Unsupported penalty_type %s' % penalty_type)
cost *= penalty
tf.summary.scalar('Weight Decay Cost', cost)
return cost
class TensorflowLogisticRegression(TensorflowGraphModel):
""" A simple tensorflow based logistic regression model. """
def build(self, graph, name_scopes, training):
"""Constructs the graph architecture of model: n_tasks * sigmoid nodes.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
name_scopes)
n_features = self.n_features
with graph.as_default():
with placeholder_scope:
mol_features = tf.placeholder(
tf.float32, shape=[None, n_features], name='mol_features')
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
lg_list = []
label_placeholders = self.add_label_placeholders(graph, name_scopes)
weight_placeholders = self.add_example_weight_placeholders(graph,
name_scopes)
if training:
graph.queue = tf.FIFOQueue(
capacity=5,
dtypes=[tf.float32] *
(len(label_placeholders) + len(weight_placeholders) + 1))
graph.enqueue = graph.queue.enqueue([mol_features] + label_placeholders
+ weight_placeholders)
queue_outputs = graph.queue.dequeue()
labels = queue_outputs[1:len(label_placeholders) + 1]
weights = queue_outputs[len(label_placeholders) + 1:]
prev_layer = queue_outputs[0]
else:
labels = label_placeholders
weights = weight_placeholders
prev_layer = mol_features
for task in range(self.n_tasks):
#setting up n_tasks nodes(output nodes)
lg = model_ops.fully_connected_layer(
tensor=prev_layer,
size=1,
weight_init=tf.truncated_normal(
shape=[self.n_features, 1], stddev=weight_init_stddevs[0]),
bias_init=tf.constant(value=bias_init_consts[0], shape=[1]))
lg_list.append(lg)
return (lg_list, labels, weights)
def add_label_placeholders(self, graph, name_scopes):
#label placeholders with size batch_size * 1
labels = []
placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
name_scopes)
with placeholder_scope:
for task in range(self.n_tasks):
labels.append(
tf.identity(
tf.placeholder(
tf.float32, shape=[None, 1], name='labels_%d' % task)))
return labels
def add_training_cost(self, graph, name_scopes, output, labels, weights):
with graph.as_default():
epsilon = 1e-3 # small float to avoid dividing by zero
weighted_costs = [] # weighted costs for each example
gradient_costs = [] # costs used for gradient calculation
with TensorflowGraph.shared_name_scope('costs', graph, name_scopes):
for task in range(self.n_tasks):
task_str = str(task).zfill(len(str(self.n_tasks)))
with TensorflowGraph.shared_name_scope('cost_{}'.format(task_str),
graph, name_scopes):
with tf.name_scope('weighted'):
weighted_cost = self.cost(output[task], labels[task],
weights[task])
weighted_costs.append(weighted_cost)
with tf.name_scope('gradient'):
# Note that we divide by the batch size and not the number of
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
gradient_cost = tf.div(
tf.reduce_sum(weighted_cost), self.batch_size)
gradient_costs.append(gradient_cost)
# aggregated costs
with TensorflowGraph.shared_name_scope('aggregated', graph,
name_scopes):
with tf.name_scope('gradient'):
loss = tf.add_n(gradient_costs)
# weight decay
if self.penalty != 0.0:
# using self-defined regularization
penalty = weight_decay(self.penalty_type, self.penalty)
loss += penalty
return loss
def cost(self, logits, labels, weights):
return tf.multiply(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels),
weights)
def add_output_ops(self, graph, output):
# adding output nodes of sigmoid function
with graph.as_default():
sigmoid = []
with tf.name_scope('inference'):
for i, logits in enumerate(output):
sigmoid.append(tf.nn.sigmoid(logits, name='sigmoid_%d' % i))
output = sigmoid
return output
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
orig_dict = {}
orig_dict["mol_features"] = X_b
for task in range(self.n_tasks):
if y_b is not None:
y_2column = to_one_hot(y_b[:, task])
# fix the size to be [?,1]
orig_dict["labels_%d" % task] = y_2column[:, 1:2]
else:
# Dummy placeholders
orig_dict["labels_%d" % task] = np.zeros((self.batch_size, 1))
if w_b is not None:
orig_dict["weights_%d" % task] = w_b[:, task]
else:
# Dummy placeholders
orig_dict["weights_%d" % task] = np.ones((self.batch_size,))
return TensorflowGraph.get_feed_dict(orig_dict)
def predict_proba_on_batch(self, X):
if self.pad_batches:
X = pad_features(self.batch_size, X)
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
with self._get_shared_session(train=False).as_default():
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_outputs = np.asarray(data[:n_tasks], dtype=float)
# transfer 2D prediction tensor to 2D x n_classes(=2)
complimentary = np.ones(np.shape(batch_outputs))
complimentary = complimentary - batch_outputs
batch_outputs = np.concatenate(
[complimentary, batch_outputs], axis=batch_outputs.ndim - 1)
# reshape to batch_size x n_tasks x ...
if batch_outputs.ndim == 3:
batch_outputs = batch_outputs.transpose((1, 0, 2))
elif batch_outputs.ndim == 2:
batch_outputs = batch_outputs.transpose((1, 0))
else:
raise ValueError('Unrecognized rank combination for output: %s ' %
(batch_outputs.shape,))
outputs = batch_outputs
return np.copy(outputs)
def predict_on_batch(self, X):
if self.pad_batches:
X = pad_features(self.batch_size, X)
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
output = []
start = time.time()
with self._get_shared_session(train=False).as_default():
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_output = np.asarray(data[:n_tasks], dtype=float)
# transfer 2D prediction tensor to 2D x n_classes(=2)
complimentary = np.ones(np.shape(batch_output))
complimentary = complimentary - batch_output
batch_output = np.concatenate(
[complimentary, batch_output], axis=batch_output.ndim - 1)
# reshape to batch_size x n_tasks x ...
if batch_output.ndim == 3:
batch_output = batch_output.transpose((1, 0, 2))
elif batch_output.ndim == 2:
batch_output = batch_output.transpose((1, 0))
else:
raise ValueError('Unrecognized rank combination for output: %s' %
(batch_output.shape,))
output.append(batch_output)
outputs = np.array(
from_one_hot(np.squeeze(np.concatenate(output)), axis=-1))
return np.copy(outputs)
| mit |
purism/pdak | dak/import_users_from_passwd.py | 1 | 5063 | #!/usr/bin/env python
""" Sync PostgreSQL users with system users """
# Copyright (C) 2001, 2002, 2006 James Troup <james@nocrew.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
# <aj> ARRRGGGHHH
# <aj> what's wrong with me!?!?!?
# <aj> i was just nice to some mormon doorknockers!!!
# <Omnic> AJ?!?!
# <aj> i know!!!!!
# <Omnic> I'm gonna have to kick your ass when you come over
# <Culus> aj: GET THE HELL OUT OF THE CABAL! :P
################################################################################
import pwd
import grp
import sys
import re
import apt_pkg
from daklib.config import Config
from daklib.dbconn import *
from daklib import utils
################################################################################
def usage (exit_code=0):
print """Usage: dak import-users-from-passwd [OPTION]...
Sync PostgreSQL's users with system users.
-h, --help show this help and exit
-n, --no-action don't do anything
-q, --quiet be quiet about what is being done
-v, --verbose explain what is being done"""
sys.exit(exit_code)
################################################################################
def main ():
cnf = Config()
Arguments = [('n', "no-action", "Import-Users-From-Passwd::Options::No-Action"),
('q', "quiet", "Import-Users-From-Passwd::Options::Quiet"),
('v', "verbose", "Import-Users-From-Passwd::Options::Verbose"),
('h', "help", "Import-Users-From-Passwd::Options::Help")]
for i in [ "no-action", "quiet", "verbose", "help" ]:
key = "Import-Users-From-Passwd::Options::%s" % i
if key not in cnf:
cnf[key] = ""
arguments = apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv)
Options = cnf.subtree("Import-Users-From-Passwd::Options")
if Options["Help"]:
usage()
elif arguments:
utils.warn("dak import-users-from-passwd takes no non-option arguments.")
usage(1)
session = DBConn().session()
valid_gid = cnf.get("Import-Users-From-Passwd::ValidGID", "")
if valid_gid:
debiangrp = grp.getgrnam(valid_gid).gr_mem
else:
debiangrp = []
passwd_unames = {}
for entry in pwd.getpwall():
uname = entry[0]
if uname not in debiangrp:
if Options["Verbose"]:
print "Skipping %s (Not in group %s)." % (uname, valid_gid)
continue
passwd_unames[uname] = ""
postgres_unames = {}
q = session.execute("SELECT usename FROM pg_user")
for i in q.fetchall():
uname = i[0]
postgres_unames[uname] = ""
known_postgres_unames = {}
for i in cnf.get("Import-Users-From-Passwd::KnownPostgres","").split(","):
uname = i.strip()
known_postgres_unames[uname] = ""
keys = postgres_unames.keys()
keys.sort()
for uname in keys:
if uname not in passwd_unames and uname not in known_postgres_unames:
print "I: Deleting %s from Postgres, no longer in passwd or list of known Postgres users" % (uname)
q = session.execute('DROP USER "%s"' % (uname))
keys = passwd_unames.keys()
keys.sort()
safe_name = re.compile('^[A-Za-z0-9]+$')
for uname in keys:
if uname not in postgres_unames:
if not Options["Quiet"]:
print "Creating %s user in Postgres." % (uname)
if not Options["No-Action"]:
if safe_name.match(uname):
# NB: I never figured out how to use a bind parameter for this query
# XXX: Fix this as it looks like a potential SQL injection attack to me
# (hence the safe_name match we do)
try:
q = session.execute('CREATE USER "%s"' % (uname))
session.commit()
except Exception as e:
utils.warn("Could not create user %s (%s)" % (uname, str(e)))
session.rollback()
else:
print "NOT CREATING USER %s. Doesn't match safety regex" % uname
session.commit()
#######################################################################################
if __name__ == '__main__':
main()
| gpl-2.0 |
rjaguar3/wesnoth-old | data/tools/wesnoth/libgithub.py | 33 | 24064 | # vim: tabstop=4: shiftwidth=4: expandtab: softtabstop=4: autoindent:
"""
This library provides an interface to github, the interface is build upon
the command line git tool.
"""
import logging
import os
try:
# Externally distributed, usually more up-to-date
import simplejson as json
except ImportError:
# Distributed with python since 2.6
import json
import shutil
import subprocess
import tempfile
import urllib2
#TODO: document and log where missing
class Error(StandardError):
"""Base class for exceptions in this module."""
pass
class AddonError(Error):
"""Class for exceptions that belong to an add-on."""
def __init__(self, addon, message):
self.addon = addon
self.message = message
self.args = (addon, message)
def __str__(self):
return "{0}: {1}".format(str(self.addon), str(self.message))
class _execresult(object):
"""Store the results of GitHub._execute and Addon._execute"""
def __init__(self, out, err, returncode):
self.out = out
self.err = err
self.returncode = returncode
def __iter__(self):
yield self.out
yield self.err
yield self.returncode
class Addon(object):
"""Represents an add-on from a github directory.
Each Addon object belongs to GitHub object and should not be created manually.
"""
def __init__(self, github, name, readonly):
"""Initialize an Addon object.
Do NOT use this constructor directly.
github: Parent GitHub object that created this object.
name: Name of the add-on that this object represents.
readonly: Whether the add-on has been checked out over git: instead of ssh:
"""
logging.debug("Addon created with name {0} and version {1}{2}".format(name, github.version, ". It is read-only" if readonly else ""))
self.github = github
self.name = name
self.readonly = readonly
def update(self):
"""Update this add-on.
Returns whether anything changed.
"""
logging.debug("Updating add-on {0}".format(self.name))
out, err, ret = self._execute(["git", "pull"], check_error=False)
if len(err):
real_errs = []
for line in err.splitlines():
if line in ["Your configuration specifies to merge with the ref 'master'", "from the remote, but no such ref was fetched."]:
# This means the repository has no commits yet
pass
elif "From" in line or "origin/master" in line:
# Regular fetch stuff
pass
elif "Checking out files" in line:
# Irregular fetch stuff
# not being attached to a terminal *should* squelch progress reports
pass
else:
real_errs.append(line)
if real_errs:
raise AddonError(self.name, "Error pulling:\n{0}".format("\n".join(real_errs)))
def remove_untracked():
untracked = [line.replace("?? ","",1) for line in self._status() if line.startswith("??")]
for item in untracked:
try:
path = os.path.join(self.get_dir(), item)
if item.endswith("/"):
shutil.rmtree(path)
else:
os.remove(path)
except:
logging.error("Failed to remove {0}".format(item))
if "Already up-to-date." in out:
return False
elif "Fast-forward" in out:
return True
elif "Merge made by recursive." in out:
logging.warn("Merge done in add-on {0}.".format(self.name))
return True
elif "CONFLICT" in out:
#This means that a conflicting local commit was done
#Its author will have to fix it
logging.error("CONFLICT in add-on {0}. Please merge".format(self.name))
return False
elif "local changes" in err:
logging.error("Found local changes in add-on {0}.".format(self.name))
# If this is a read-write repo, leave the files be
# If it's read-only, they're not supposed to be here
if self.readonly:
logging.warn("Attempting to fix.")
# Get rid of local modifications
self._execute(["git", "reset", "--hard"], check_error=False)
status = self._status()
untracked = [line for line in status if "??" in line]
# I don't want to recursively delete directories
if len(untracked) > 0:
logging.warn("Untracked files found. Attempting to remove...")
remove_untracked()
return False
elif "Untracked working tree" in err:
if self.readonly:
logging.error("Untracked files blocking pull of {0}. Attempting to remove...".format(self.name))
remove_untracked()
else:
logging.error("Untracked files blocking pull of {0}. Please remove.".format(self.name))
return False
elif "Your configuration specifies to merge with the ref 'master'" in err:
logging.info("Pulled from still-empty (not initialized) repository {0}.".format(self.name))
return False
else:
logging.error("Unknown pull result in add-on {0}:\nOut: {1}\nErr: {2}".format(self.name, out, err))
return False
def sync_from(self, src, exclude):
"""Synchronises add-on from another directory.
src: Directory with new add-on version.
exclude: List of files to ignore.
Returns whether anything changed.
Raises libgithub.Error if the checkout is not clean.
"""
logging.debug("Syncing add-on {0} from add-on server ({1})".format(self.name, src))
status = self._status()
if status:
raise AddonError(self.name, "Checkout is not clean:\n{0}".format("\n".join(status)))
self._rmtree(".", exclude)
#actual copying
self._copytree(src, self.get_dir(), ignore=lambda src,names: [n for n in names if n in exclude])
self._execute(["git", "add", "."], check_error=True)
status = self._status()
return len(status) > 0
def commit(self, message):
"""Commits and pushes add-on to git repo.
message: Commit message.
Raises libgithub.Error if something went wrong
"""
logging.debug("Committing and pushing add-on {0}".format(self.name))
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.write(message)
tmpfile.close()
tmpname = tmpfile.name
self._execute(["git", "commit", "-F", tmpname], check_error=True)
os.remove(tmpname)
out, err, ret = self._execute(["git", "push", "-u", "--porcelain", "origin", "master"], check_error=False)
statusline = [x for x in out.splitlines() if "refs/heads/master" in x]
if not statusline:
raise AddonError(self.name, "No statusline produced by git push")
else:
status = statusline[0][0]
refs, summary = statusline[0][1:].split(None, 1)
if status == " ":
# Fast forward
pass
elif status == "*":
# Freshly initiated repository
pass
elif status == "=":
# Up to date?
logging.warn("Commit to add-on {0} with message '{1}' has not made any changes".format(self.name, message))
elif status == "!":
raise AddonError(self.name, "Commit with message '{0}' failed for reason {1}".format(message, summary))
else:
raise AddonError(self.name, "Commit with message '{0}' has done something unexpected: {1}".format(message, statusline[0]))
def get_dir(self):
"""Return the directory this add-on's checkout is in.
"""
return os.path.join(self.github.directory, self.name)
# Internal functions
def _rmtree(self, directory, exclude):
logging.debug("Deleting tree {0}, except for {1}".format(self.name, ",".join(exclude)))
# Ensure the os calls all happen in the right directory
# not needed for _execute, as that does the cwd manipulation itself
# so only the os.chdir and os.path.isdir here need it
# Another option would be to os.path.join with self.get_dir
os.chdir(self.get_dir())
for entry in os.listdir(directory):
if entry in exclude:
continue
if entry == ".git":
continue
relpath = os.path.join(directory, entry)
if os.path.isdir(relpath):
self._rmtree(relpath, exclude)
# git rm removes directories that it empties
if os.path.exists(relpath):
self._execute(["rmdir", "--ignore-fail-on-non-empty", relpath])
else:
self._execute(["git", "rm", relpath], check_error=True)
def _copytree(self, src, dst, ignore=None):
"""Recursively copy a directory tree using copy2().
Based on shutil.copytree
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if not os.path.exists(dst):
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.isdir(srcname):
self._copytree(srcname, dstname, ignore)
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except OSError as why:
if shutil.WindowsError is not None and isinstance(why, shutil.WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise AddonError(self.name, "Errors attempting to sync:\n{0}".format("\n".join(errors)))
def _status(self):
out, err, ret = self._execute(["git", "status", "--porcelain"])
if err:
raise AddonError(self.name, "Status failed with message: {0}".format(err))
return [line for line in out.split('\n') if len(line)]
def _execute(self, command, check_error = False):
return self.github._execute(command, cwd=self.get_dir(), check_error=check_error)
_GITHUB_API_BASE = "https://api.github.com/"
_GITHUB_API_REPOS = "orgs/wescamp/repos"
_GITHUB_API_TEAMS = "orgs/wescamp/teams"
# PUT /teams/:id/repos/:org/:repo
_GITHUB_API_TEAM_REPO = "teams/{0}/repos/wescamp/{1}"
# POST /repos/:user/:repo/hooks
_GITHUB_API_HOOKS = "repos/wescamp/{0}/hooks"
class GitHub(object):
"""Interface to a github checkout directory. Such a directory contains all translatable add-ons for a certain wesnoth version.
Every GitHub object is specific to a directory and wesnoth version.
"""
def __init__(self, directory, version, authorization=None):
"""Initializes a GitHub object.
directory: Directory in which the git repos for this wesnoth branch live.
version: The version of this wesnoth branch.
"""
logging.debug("GitHub created with directory {0} and version {1}, {2} authentication data".format(directory, version, "with" if authorization else "without"))
self.directory = directory
self.version = version
self.authorization = authorization
def update(self):
"""Update all add-ons.
Returns whether anything changed.
"""
logging.debug("Updating in directory {0}".format(self.directory))
changed = False
changed |= self._get_new_addons()
for addon in self._get_local_addons():
changed |= self.addon(addon).update()
return changed
def addon(self, name, readonly=False):
"""Returns an add-on object for the given name.
name: Name of the add-on.
readonly: If set, and the add-on needs to be freshly cloned, use a read-only protocol
Raises libgithub.Error if no such add-on exists.
"""
logging.debug("Generating add-on object for {0}".format(name))
if not os.path.isdir(self._absolute_path(name)):
logging.debug("Add-on {0} not found locally, checking github.".format(name))
github_list = self._github_repos_list(readonly=readonly)
matches = [x for x in github_list if x[0] == name]
if matches:
repo = matches[0]
self._clone(repo[0], repo[1])
else:
raise AddonError(name, "Add-on not found")
return Addon(self, name, readonly)
def create_addon(self, name):
"""Creates a new add-on on github.
name: Name of the add-on.
Returns an Addon object for the new add-on.
"""
logging.debug("Creating new add-on {0}".format(name))
response = self._github_repos_create(name)
self._clone(name, response["ssh_url"])
return self.addon(name)
def addon_exists(self, name):
"""Checks whether an add-on exists on github..
name: Name of the add-on.
Returns a bool representing the existence of the add-on.
"""
logging.debug("Checking whether add-on {0} exists".format(name))
github_list = self._github_repos_list()
return name in [repo[0] for repo in github_list]
def list_addons(self):
"""Returns a list of valid add-on names.
Returns a list of names that can be passed to self.addon()
"""
logging.debug("Generating list of add-on names for version {0}".format(self.version))
github_list = self._github_repos_list()
return [repo[0] for repo in github_list]
def _absolute_path(self, name):
return os.path.join(self.directory, name)
def _clone(self, name, url):
target = self._absolute_path(name)
out, err, ret = self._execute(["git", "clone", url, target])
# Rather hacky
if len(err):
errors = [line.strip() for line in err.split('\n') if len(line)]
got_error = False
for error in errors:
if error != "warning: You appear to have cloned an empty repository.":
got_error = True
break
if got_error:
raise AddonError(name, "Error cloning: " + err)
def _get_new_addons(self):
"""Check out any new add-ons.
Returns whether anything changed.
"""
changed = False
github_list = self._github_repos_list()
local_list = self._get_local_addons()
for repo in github_list:
if repo[0] not in local_list:
self._clone(repo[0], repo[1])
changed = True
return changed
def _get_local_addons(self):
"""...
Returns list of local add-ons.
"""
return os.listdir(self.directory)
_github_repos_memo = None
def _github_repos_list(self, readonly=False):
"""Get a list of repositories.
readonly: Should the tuples have ssh urls or readonly urls.
Returns a list of tuples that contain the add-on name and the url.
"""
if not self._github_repos_memo:
url = _GITHUB_API_BASE + _GITHUB_API_REPOS
self._github_repos_memo = self._github_api_request(url)
version_suffix = "-{0}".format(self.version)
return [(repo["name"][:-len(version_suffix)], repo["git_url"] if readonly else repo["ssh_url"])
for repo in self._github_repos_memo if repo["name"].endswith(version_suffix)]
def _github_repos_create(self, name):
"""Create a new repository.
name: The name of the add-on for which the repository will be created.
"""
reponame = "{0}-{1}".format(name, self.version)
# Create the repository
url = _GITHUB_API_BASE + _GITHUB_API_REPOS
requestdata = { "name" : reponame }
repodata = self._github_api_request(url, requestdata, authenticate=True)
# Request the teams
url = _GITHUB_API_BASE + _GITHUB_API_TEAMS
teams = self._github_api_request(url, authenticate=True)
# Find the right team number
# This can probably be cleaner
team_number = [team["id"] for team in teams if team["name"] == "Developers"][0]
# Add the repository to the team
# PUT /teams/:id/repos/:org/:repo
baseurl = _GITHUB_API_BASE + _GITHUB_API_TEAM_REPO
url = baseurl.format(team_number, reponame)
# Github requires data for every modifying request, even if there is none
self._github_api_request(url, data="", method="PUT", authenticate=True)
# Add commit hook
baseurl = _GITHUB_API_BASE + _GITHUB_API_HOOKS
url = baseurl.format(reponame)
requestdata = { "name" : "web", "events" : ["push"], "active" : True,
"config" : {
"url" : "http://ai0867.net:6660/wescamp",
"content_type" : "json"
}
}
self._github_api_request(url, requestdata, authenticate=True)
return repodata
def _github_api_request(self, url, data=None, method=None, authenticate=False):
logging.debug("Making github API request {0}".format(url))
request = urllib2.Request(url)
if method:
request.get_method = lambda: method
if data == "":
# Workaround for PUTs requiring data, even if you have nothing to pass
request.add_data(data)
elif data:
request.add_data(json.dumps(data))
# Manually adding authentication data
# Basic works in curl, but urllib2 doesn't
# probably because github's API doesn't send a www-authenticate header
if authenticate or self._github_have_authorization():
from base64 import encodestring
auth = self._github_authorization()
if ":" in auth:
# username:password
base64string = encodestring(auth).replace('\n','')
request.add_header("Authorization", "Basic {0}".format(base64string))
else:
# token
request.add_header("Authorization", "Bearer {0}".format(auth))
try:
response = urllib2.urlopen(request)
except IOError as e:
raise Error("GitHub API failure: " + str(e))
if response.code == 204:
# 204 = No content
return None
json_parsed = json.load(response)
link_headers = response.info().getallmatchingheaders("Link")
if link_headers:
logging.debug("Found a Link header in response, analyzing...")
link_header = link_headers[0].lstrip("Link:")
links_raw = link_header.split(",")
links_split_raw = [link.split(";") for link in links_raw]
links_split_proc = [(l[1].strip().lstrip('rel="').rstrip('"'), l[0].strip().lstrip("<").rstrip(">")) for l in links_split_raw]
links_dict = dict((k,v) for (k,v) in links_split_proc)
if "next" in links_dict:
logging.debug("Link with rel=\"next\" found, recursing to deal with pagination")
rest = self._github_api_request(links_dict["next"], data, method, authenticate)
json_parsed += rest
return json_parsed
def _github_have_authorization(self):
return self.authorization != None
def _github_authorization(self):
if self.authorization:
return self.authorization
else:
raise Error("Authentication required")
def _execute(self, command, cwd=None, check_error=False):
#TODO: have an errorcheck that actually checks the returncode?
"""Executes a command.
command: The command to execute.
cwd: Directory to execute the command from.
check_error: Whether to raise an exception if there's stderr output.
Returns stdout, stderr.
Raises libgithub.Error if check_error and len(err).
"""
logging.debug("execute command = '%s'", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=cwd)
out = ""
err = ""
while(p.poll() == None):
out += p.stdout.read()
err += p.stderr.read()
out += p.stdout.read()
err += p.stderr.read()
logging.debug("===== stdout ====\n%s\n===== stdout ====", out)
logging.debug("===== stderr ====\n%s\n===== stderr ====", err)
if check_error and len(err):
raise Error("Failure executing command '{0}': {1}".format(" ".join(command), err))
return _execresult(out, err, p.returncode)
def _gen(possible_dirs):
def _get_build_system(possible_dirs):
logging.debug("get_build_system with paths: %s", ";".join(possible_dirs))
if not isinstance(possible_dirs, list):
raise Error("Incorrect argument type passed, {0} instead of {1}".format(str(type(possible_dirs)), str(list)))
def is_good_checkout(addon):
try:
out, err, ret = addon._execute(["git", "remote", "-v"], check_error=True)
test = "wescamp/build-system"
return test in out
except:
return False
for path in possible_dirs:
base, rest = os.path.split(path.rstrip(os.sep))
fake_github = GitHub(base, "system")
fake_build = Addon(fake_github, rest, True)
if is_good_checkout(fake_build):
logging.debug("Found {0} to be valid build-system checkout".format(path))
return fake_build, False
else:
logging.debug("Discarded possible checkout {0}".format(path))
logging.debug("No candidates left, creating new checkout")
realish_github = GitHub(tempfile.mkdtemp(),"system")
build_system = realish_github.addon("build", readonly=True)
return build_system, True
try:
bs, fresh = _get_build_system(possible_dirs)
bs.update()
except Error as e:
# Exception to make sure nobody catches it
# Use raise ... from syntax in python3
import sys
raise Exception(str(e)), None, sys.exc_info()[2]
# Add references to shutil and os to ensure we're destructed before they are
stored_shutil = shutil
stored_os = os
try:
while True:
# Don't make a fresh clone every call
yield bs
except GeneratorExit:
# Clean up our temporary clone
if fresh:
stored_shutil.rmtree(bs.get_dir())
stored_os.rmdir(os.path.dirname(bs.get_dir()))
_g = None
def get_build_system(possible_dirs=[]):
"""Create a special 'add-on', containing the wescamp build system.
possible_dirs: List of paths to possible existing.
Returns: The Addon object of the build-system
"""
global _g
if _g == None:
_g = _gen(possible_dirs)
return _g.next()
| gpl-2.0 |
fjalir/odie-server | routes/common.py | 1 | 1467 | #! /usr/bin/env python3
import config
from login import get_user
from functools import partial
from marshmallow import fields, Schema
from marshmallow.utils import missing
from marshmallow.validate import OneOf
class IdSchema(Schema):
id = fields.Int(required=True)
class DocumentDumpSchema(IdSchema):
department = fields.Str()
lectures = fields.List(fields.Nested(IdSchema))
examinants = fields.List(fields.Nested(IdSchema))
date = fields.Date()
number_of_pages = fields.Int()
solution = fields.Str()
comment = fields.Str()
document_type = fields.Str()
available = fields.Boolean(attribute='has_file')
validated = fields.Boolean()
validation_time = fields.Date()
submitted_by = fields.Method('scrub_submitted_by')
early_document_eligible = fields.Integer()
deposit_return_eligible = fields.Integer()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# cache DB operation
self._authenticated = bool(get_user())
def scrub_submitted_by(self, obj):
return obj.submitted_by if self._authenticated else missing
CashBoxField = partial(fields.Str, required=True, validate=OneOf([cash_box for office in config.FS_CONFIG['OFFICES'].values() for cash_box in office['cash_boxes']]))
PrinterField = partial(fields.Str, required=True, validate=OneOf([printer for office in config.FS_CONFIG['OFFICES'].values() for printer in office['printers']]))
| mit |
antidotcb/googlemock | scripts/upload.py | 2511 | 51024 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| bsd-3-clause |
eoghanmurray/jsonpickle_prev | tests/benchmark.py | 1 | 1321 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- 7oars.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import sys
import timeit
IS_25_DOWN = sys.version_info[:2] <= (2, 5)
number = 1000
cjson = """\
import feedparser
import jsonpickle
import jsonpickle.tests.thirdparty_tests as test
doc = feedparser.parse(test.RSS_DOC)
jsonpickle.set_preferred_backend('cjson')
pickled = jsonpickle.encode(doc)
unpickled = jsonpickle.decode(pickled)
if doc['feed']['title'] != unpickled['feed']['title']:
print 'Not a match'
"""
print 'Using cjson'
cjson_test = timeit.Timer(stmt=cjson)
print "%.9f sec/pass " % (cjson_test.timeit(number=number) / number)
mod = 'json'
if IS_25_DOWN:
mod = 'simplejson'
json = """\
import feedparser
import jsonpickle
import jsonpickle.tests.thirdparty_tests as test
doc = feedparser.parse(test.RSS_DOC)
jsonpickle.set_preferred_backend('%s')
pickled = jsonpickle.encode(doc)
unpickled = jsonpickle.decode(pickled)
if doc['feed']['title'] != unpickled['feed']['title']:
print 'Not a match'
""" % mod
print 'Using %s' % mod
json_test = timeit.Timer(stmt=json)
print "%.9f sec/pass " % (json_test.timeit(number=number) / number)
| bsd-3-clause |
bverdu/onDemand | upnpy_spyne/services/test.py | 1 | 6017 | # encoding: utf-8
'''
Created on 17 mars 2015
@author: Bertrand Verdu
'''
import sys
from spyne.client import Service
from spyne.client import RemoteProcedureBase
from spyne.client import ClientBase
from spyne.application import Application
from spyne.decorator import rpc
from spyne.service import ServiceBase
from spyne.model.primitive import Integer
from spyne.model.primitive import Unicode
from spyne.model.complex import Iterable
from spyne.protocol.soap import Soap11
from spyne.server.twisted import TwistedWebResource
from spyne.client.twisted import TwistedHttpClient, _Producer, _Protocol
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.web import error as werror
from twisted.python import log
from upnpy_spyne.services.templates import contentdirectory
from upnpy_spyne.utils import didl_decode
test_ok = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?><s:Envelope s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"><s:Body><u:Browse xmlns:u="urn:schemas-upnp-org:service:ContentDirectory:1"><ObjectID>0</ObjectID><BrowseFlag>BrowseDirectChildren</BrowseFlag><Filter>*</Filter><StartingIndex>0</StartingIndex><RequestedCount>16</RequestedCount><SortCriteria></SortCriteria></u:Browse></s:Body></s:Envelope>'''
class MyClass(ServiceBase):
'''
classdocs
'''
def __new__(self):
print('new')
def __init__(self):
print('init !')
super(MyClass, self).__init__()
@rpc(Unicode, Integer, _returns=Iterable(Unicode))
def say_hello(ctx, name, times): # @NoSelf
pass
# for i in range(times):
# yield 'Hello, %s' % name
class titi(MyClass):
player = ('do')
def __init__(self):
print('toto')
self.player = 're'
@rpc(Unicode, Integer, _returns=Iterable(Unicode))
def say_hello(ctx, name, times): # @NoSelf
for i in range(times): # @UnusedVariable
yield 'Hello, %s' % name
application = Application([titi],
tns='spyne.examples.hello',
in_protocol=Soap11(validator='lxml'),
out_protocol=Soap11())
class Custom_RemoteProcedure(RemoteProcedureBase):
def __call__(self, *args, **kwargs):
# there's no point in having a client making the same request more than
# once, so if there's more than just one context, it's rather a bug.
# The comma-in-assignment trick is a pedantic way of getting the first
# and the only variable from an iterable. so if there's more than one
# element in the iterable, it'll fail miserably.
self.ctx, = self.contexts
self.get_out_object(self.ctx, args, kwargs)
self.get_out_string(self.ctx)
# self.ctx.out_string[0] = self.ctx.out_string[0].replace('tns:', '').replace('tns=', '')
# self.ctx.out_string[0] = test_ok
self.ctx.in_string = []
header = {'User-Agent': ['onDemand Controller']}
if self.ctx.out_header is not None:
if 'Soapaction' in self.ctx.out_header:
self.ctx.out_header.update(
{'Soapaction': [
'"' + self.ctx.out_header['Soapaction'][0] +
'#' + self.ctx.method_request_string + '"']})
header.update(self.ctx.out_header)
agent = Agent(reactor)
d = agent.request(
'POST', self.url,
Headers(header),
_Producer(self.ctx.out_string)
)
def _process_response(_, response):
# this sets ctx.in_error if there's an error, and ctx.in_object if
# there's none.
print(response.code)
self.get_in_object(self.ctx)
if self.ctx.in_error is not None:
log.err(self.ctx.in_error)
# raise self.ctx.in_error
elif response.code >= 400:
log.err(werror.Error(response.code))
return self.ctx.in_object
def _cb_request(response):
p = _Protocol(self.ctx)
response.deliverBody(p)
return p.deferred.addCallback(_process_response, response)
d.addCallback(_cb_request)
return d
class Client(ClientBase):
def __init__(self, url, app):
super(Client, self).__init__(url, app)
self.service = Service(Custom_RemoteProcedure, url, app)
def show(res):
log.msg('result: %s' % res)
print(dir(res))
if res:
for i, r in enumerate(res):
print('%s --> %s' % (i, r))
for item in didl_decode(res.Result):
print(item)
reactor.callLater(2, reactor.stop) # @UndefinedVariable
if __name__ == '__main__':
log.startLogging(sys.stdout)
def test():
client = Client(
'http://192.168.0.134:58645/dev/106c66cc-d1be-6466-0000-00000156d879/svc/upnp-org/ContentDirectory/action', Application(
[contentdirectory.ContentDirectory],
contentdirectory.ContentDirectory.tns,
in_protocol=Soap11(),
out_protocol=Soap11()))
client.set_options(
out_header={
'Content-Type': ['text/xml;charset="utf-8"'],
'Soapaction': [contentdirectory.ContentDirectory.tns]})
d = client.service.Browse('0', 'BrowseDirectChildren', '*', 0, 0, '')
d.addCallback(show)
# client2 = od_TwistedHttpClient('http://127.0.0.1:8000', application)
# d= client2.service.say_hello('word', 5)
# d.addCallback(show)
# resource = TwistedWebResource(application)
# site = Site(resource)
# reactor.listenTCP(8000, site, interface='0.0.0.0') # @UndefinedVariable
reactor.callWhenRunning(test) # @UndefinedVariable
# print(client.service.say_hello('word', 5))
reactor.run() # @UndefinedVariable
| agpl-3.0 |
davidzchen/tensorflow | tensorflow/python/kernel_tests/draw_bounding_box_op_test.py | 26 | 5086 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for draw_bounding_box_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class DrawBoundingBoxOpTest(test.TestCase):
def _fillBorder(self, image, color):
"""Fill the border of the image.
Args:
image: Numpy array of shape [height, width, depth].
color: Numpy color of shape [depth] and either contents RGB/RGBA.
Returns:
image of original shape with border filled with "color".
Raises:
ValueError: Depths of image and color don"t match.
"""
height, width, depth = image.shape
if depth != color.shape[0]:
raise ValueError("Image (%d) and color (%d) depths must match." %
(depth, color.shape[0]))
image[0:height, 0, 0:depth] = color
image[0:height, width - 1, 0:depth] = color
image[0, 0:width, 0:depth] = color
image[height - 1, 0:width, 0:depth] = color
return image
def _testDrawBoundingBoxColorCycling(self, img, colors=None):
"""Tests if cycling works appropriately.
Args:
img: 3-D numpy image on which to draw.
"""
color_table = colors
if colors is None:
# THIS TABLE MUST MATCH draw_bounding_box_op.cc
color_table = np.asarray([[1, 1, 0, 1], [0, 0, 1, 1], [1, 0, 0, 1],
[0, 1, 0, 1], [0.5, 0, 0.5,
1], [0.5, 0.5, 0, 1],
[0.5, 0, 0, 1], [0, 0, 0.5, 1], [0, 1, 1, 1],
[1, 0, 1, 1]])
assert len(img.shape) == 3
depth = img.shape[2]
assert depth <= color_table.shape[1]
assert depth == 1 or depth == 3 or depth == 4
## Set red channel to 1 if image is GRY.
if depth == 1:
color_table[:, 0] = 1
num_colors = color_table.shape[0]
for num_boxes in range(1, num_colors + 2):
# Generate draw_bounding_box_op drawn image
image = np.copy(img)
color = color_table[(num_boxes - 1) % num_colors, 0:depth]
test_drawn_image = self._fillBorder(image, color)
bboxes = np.asarray([0, 0, 1, 1])
bboxes = np.vstack([bboxes for _ in range(num_boxes)])
bboxes = math_ops.cast(bboxes, dtypes.float32)
bboxes = array_ops.expand_dims(bboxes, 0)
image = ops.convert_to_tensor(image)
image = image_ops_impl.convert_image_dtype(image, dtypes.float32)
image = array_ops.expand_dims(image, 0)
image = image_ops.draw_bounding_boxes(image, bboxes, colors=colors)
with self.cached_session(use_gpu=False) as sess:
op_drawn_image = np.squeeze(sess.run(image), 0)
self.assertAllEqual(test_drawn_image, op_drawn_image)
def testDrawBoundingBoxRGBColorCycling(self):
"""Test if RGB color cycling works correctly."""
image = np.zeros([10, 10, 3], "float32")
self._testDrawBoundingBoxColorCycling(image)
def testDrawBoundingBoxRGBAColorCycling(self):
"""Test if RGBA color cycling works correctly."""
image = np.zeros([10, 10, 4], "float32")
self._testDrawBoundingBoxColorCycling(image)
def testDrawBoundingBoxGRY(self):
"""Test if drawing bounding box on a GRY image works."""
image = np.zeros([4, 4, 1], "float32")
self._testDrawBoundingBoxColorCycling(image)
def testDrawBoundingBoxRGBColorCyclingWithColors(self):
"""Test if RGB color cycling works correctly with provided colors."""
image = np.zeros([10, 10, 3], "float32")
colors = np.asarray([[1, 1, 0, 1], [0, 0, 1, 1], [0.5, 0, 0.5, 1],
[0.5, 0.5, 0, 1], [0, 1, 1, 1], [1, 0, 1, 1]])
self._testDrawBoundingBoxColorCycling(image, colors=colors)
def testDrawBoundingBoxRGBAColorCyclingWithColors(self):
"""Test if RGBA color cycling works correctly with provided colors."""
image = np.zeros([10, 10, 4], "float32")
colors = np.asarray([[0.5, 0, 0.5, 1], [0.5, 0.5, 0, 1], [0.5, 0, 0, 1],
[0, 0, 0.5, 1]])
self._testDrawBoundingBoxColorCycling(image, colors=colors)
if __name__ == "__main__":
test.main()
| apache-2.0 |
pyq881120/pupy | pupy/pupylib/PupyCmd.py | 16 | 20773 | # --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER (contact@n1nj4.eu)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
import sys
import readline
import cmd
import shlex
import string
import re
import os
import os.path
import traceback
try:
import ConfigParser as configparser
except ImportError:
import configparser
import random
import code
try:
import __builtin__ as builtins
except ImportError:
import builtins
from multiprocessing.pool import ThreadPool
import time
import logging
import traceback
import rpyc
import rpyc.utils.classic
from .PythonCompleter import PupyCompleter
from .PupyErrors import PupyModuleExit, PupyModuleError
from .PupyModule import PupyArgumentParser
from .PupyJob import PupyJob
import argparse
from pupysh import __version__
import copy
from functools import partial
BANNER="""
_____ _ _ _
___ ___ | _ |_ _ ___ _ _ ___| |_ ___| | | ___ ___
|___|___| | __| | | . | | | |_ -| | -_| | | |___|___|
|__| |___| _|_ | |___|_|_|___|_|_|
|_| |___|
%s
"""%__version__
def color_real(s, color, prompt=False, colors_enabled=True):
""" color a string using ansi escape characters. set prompt to true to add marks for readline to see invisible portions of the prompt
cf. http://stackoverflow.com/questions/9468435/look-how-to-fix-column-calculation-in-python-readline-if-use-color-prompt"""
if s is None:
return ""
s=str(s)
if not colors_enabled:
return s
res=s
COLOR_STOP="\033[0m"
prompt_stop=""
prompt_start=""
if prompt:
prompt_stop="\002"
prompt_start="\001"
if prompt:
COLOR_STOP=prompt_start+COLOR_STOP+prompt_stop
if color.lower()=="random":
color=random.choice(["blue","red","green","yellow"])
if color.lower()=="blue":
res=prompt_start+"\033[34m"+prompt_stop+s+COLOR_STOP
if color.lower()=="red":
res=prompt_start+"\033[31m"+prompt_stop+s+COLOR_STOP
if color.lower()=="green":
res=prompt_start+"\033[32m"+prompt_stop+s+COLOR_STOP
if color.lower()=="yellow":
res=prompt_start+"\033[33m"+prompt_stop+s+COLOR_STOP
if color.lower()=="grey":
res=prompt_start+"\033[37m"+prompt_stop+s+COLOR_STOP
if color.lower()=="darkgrey":
res=prompt_start+"\033[1;30m"+prompt_stop+s+COLOR_STOP
return res
def get_columns_size(l):
size_dic={}
for d in l:
for i,k in d.iteritems():
if type(k) is not str:
k=str(k)
if not i in size_dic:
size_dic[i]=len(k)
elif size_dic[i]<len(k):
size_dic[i]=len(k)
return size_dic
def obj2utf8(obj):
if type(obj)==dict:
for k in obj:
obj[k]=obj2utf8(obj[k])
elif type(obj)==list:
for i in range(0,len(obj)):
obj[i]=obj2utf8(obj[i])
elif type(obj)==tuple:
obj=list(obj)
for i in range(0,len(obj)):
obj[i]=obj2utf8(obj[i])
obj=tuple(obj)
elif type(obj)==unicode or type(obj)==str:
return obj.encode('utf8', errors='replace')
else:
obj=str(obj)
return obj
class PupyCmd(cmd.Cmd):
def __init__(self, pupsrv, configFile="pupy.conf"):
cmd.Cmd.__init__(self)
self.pupsrv=pupsrv
self.pupsrv.register_handler(self)
self.config = configparser.ConfigParser()
self.config.read(configFile)
self.init_readline()
global color
try:
color = partial(color_real, colors_enabled=self.config.getboolean("cmdline","colors"))
except Exception:
color = color_real
self.intro = color(BANNER, 'green')
self.prompt = color('>> ','blue', prompt=True)
self.doc_header = 'Available commands :\n'
self.complete_space=['run']
self.default_filter=None
try:
if not self.config.getboolean("cmdline","display_banner"):
self.intro=""
except Exception:
pass
self.aliases={}
try:
for command, alias in self.config.items("aliases"):
logging.debug("adding alias: %s => %s"%(command, alias))
self.aliases[command]=alias
except Exception as e:
logging.warning("error while parsing aliases from pupy.conf ! %s"%str(traceback.format_exc()))
@staticmethod
def table_format(diclist, wl=[], bl=[]):
"""
this function takes a list a dictionaries to display in columns. Dictionnaries keys are the columns names.
All dictionaries must have the same keys.
wl is a whitelist of column names to display
bl is a blacklist of columns names to hide
"""
res=""
if diclist:
diclist=obj2utf8(diclist)
keys=[x for x in diclist[0].iterkeys()]
if wl:
keys=[x for x in wl if x in keys]
if bl:
keys=[x for x in keys if x not in bl]
titlesdic={}
for k in keys:
titlesdic[k]=k
diclist.insert(0,titlesdic)
colsize=get_columns_size(diclist)
i=0
for c in diclist:
if i==1:
res+="-"*sum([k+2 for k in [y for x,y in colsize.iteritems() if x in titlesdic]])+"\n"
i+=1
for name in keys:
if c[name] is not unicode:
value=str(c[name]).strip()
else:
value=c[name].strip()
utf8align=len(value)-len(value.decode('utf8',errors='replace'))
res+=value.ljust(colsize[name]+2+utf8align)
res+="\n"
return res
def default(self, line):
tab=line.split(" ",1)
if tab[0] in self.aliases:
arg_parser = PupyArgumentParser(prog=tab[0], add_help=False)
arg_parser.add_argument('-f', '--filter', metavar='<client filter>', help="filter to a subset of all clients. All fields available in the \"info\" module can be used. example: run get_info -f 'platform:win release:7 os_arch:64'")
arg_parser.add_argument('--bg', action='store_true', help="run in background")
arg_parser.add_argument('arguments', nargs=argparse.REMAINDER, metavar='<arguments>', help="module arguments")
if len(tab)==1:
self.do_run(self.aliases[tab[0]])
else:
left=[]
try:
modargs,left=arg_parser.parse_known_args(shlex.split(tab[1]))
except PupyModuleExit:
return
#putting run arguments (-f and --bg) back at their place in case of aliases
newargs_str=""
if modargs.bg:
newargs_str+=" --bg"
if modargs.filter:
newargs_str+=" -f '"+modargs.filter.replace("'","'\\''")+"'"
newargs_str+=" "+self.aliases[tab[0]]
if left:
newargs_str+=" "+' '.join(left)
if modargs.arguments:
newargs_str+=" '"+(' '.join(modargs.arguments)).replace("'","'\\''")+"'"
self.do_run(newargs_str.strip())
else:
self.display_error("Unknown syntax: %s"%line)
def init_readline(self):
try:
readline.read_history_file(".pupy_history")
except Exception:
pass
self.init_completer()
def cmdloop(self, intro=None):
try:
cmd.Cmd.cmdloop(self, intro)
except KeyboardInterrupt as e:
self.stdout.write('\n')
self.cmdloop(intro="")
def init_completer(self):
readline.set_pre_input_hook(self.pre_input_hook)
readline.set_completer_delims(" \t")
def completenames(self, text, *ignored):
dotext = 'do_'+text
if text in self.complete_space:
return [a[3:]+" " for a in self.get_names() if a.startswith(dotext)]+[x+" " for x in self.aliases.iterkeys() if x.startswith(text)]
return [a[3:] for a in self.get_names() if a.startswith(dotext)]+[x for x in self.aliases.iterkeys() if x.startswith(text)]
def pre_input_hook(self):
#readline.redisplay()
pass
def emptyline(self):
""" do nothing when an emptyline is entered """
pass
def do_help(self, arg):
""" show this help """
if arg:
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append((cmd, getattr(self, name).__doc__))
else:
cmds_doc.append((cmd, ""))
for name in [x for x in self.aliases.iterkeys()]:
cmds_doc.append((name, self.pupsrv.get_module(self.aliases[name]).__doc__))
self.stdout.write("%s\n"%str(self.doc_header))
for command,doc in cmds_doc:
self.stdout.write("- {:<10} {}\n".format(command, color(doc,'grey')))
@staticmethod
def format_log(msg):
""" return a formated log line """
return msg.rstrip()+"\n"
@staticmethod
def format_error(msg):
""" return a formated error log line """
return color('[-] ','red')+msg.rstrip()+"\n"
@staticmethod
def format_warning(msg):
""" return a formated warning log line """
return color('[!] ','yellow')+msg.rstrip()+"\n"
@staticmethod
def format_success(msg):
""" return a formated info log line """
return color('[+] ','green')+msg.rstrip()+"\n"
@staticmethod
def format_info(msg):
""" return a formated info log line """
return color('[%] ','darkgrey')+msg.rstrip()+"\n"
@staticmethod
def format_srvinfo(msg):
""" return a formated info log line """
return color('[*] ','blue')+msg.rstrip()+"\n"
@staticmethod
def format_section(msg):
""" return a formated info log line """
return color('#>#> ','green')+color(msg.rstrip(),'darkgrey')+color(' <#<#','green')+"\n"
def display(self, msg, modifier=None):
if not type(msg) is unicode:
msg=str(msg)
if msg:
if modifier=="error":
sys.stdout.write(PupyCmd.format_error(msg))
elif modifier=="success":
sys.stdout.write(PupyCmd.format_success(msg))
elif modifier=="info":
sys.stdout.write(PupyCmd.format_info(msg))
elif modifier=="srvinfo":
sys.stdout.write(PupyCmd.format_srvinfo(msg))
#readline.redisplay()
elif modifier=="warning":
sys.stdout.write(PupyCmd.format_warning(msg))
else:
sys.stdout.write(PupyCmd.format_log(msg))
def display_srvinfo(self, msg):
return self.display(msg, modifier="srvinfo")
def display_success(self, msg):
return self.display(msg, modifier="success")
def display_error(self, msg):
return self.display(msg, modifier="error")
def display_warning(self, msg):
return self.display(msg, modifier="warning")
def display_info(self, msg):
return self.display(msg, modifier="info")
def postcmd(self, stop, line):
readline.write_history_file('.pupy_history')
def do_list_modules(self, arg):
""" List available modules with a brief description """
for m,d in self.pupsrv.list_modules():
self.stdout.write("{:<20} {}\n".format(m, color(d,'grey')))
def do_clients(self, arg):
""" alias for sessions """
self.do_sessions(arg)
def do_sessions(self, arg):
""" list/interact with established sessions """
arg_parser = PupyArgumentParser(prog='sessions', description=self.do_sessions.__doc__)
arg_parser.add_argument('-i', '--interact', metavar='<filter>', help="change the default --filter value for other commands")
arg_parser.add_argument('-g', '--global-reset', action='store_true', help="reset --interact to the default global behavior")
arg_parser.add_argument('-l', dest='list', action='store_true', help='List all active sessions')
arg_parser.add_argument('-k', dest='kill', metavar='<id>', type=int, help='Kill the selected session')
try:
modargs=arg_parser.parse_args(shlex.split(arg))
except PupyModuleExit:
return
if modargs.global_reset:
self.default_filter=None
self.display_success("default filter reset to global !")
elif modargs.interact:
self.default_filter=modargs.interact
self.display_success("default filter set to %s"%self.default_filter)
elif modargs.kill:
selected_client = self.pupsrv.get_clients(modargs.kill)
if selected_client:
try:
selected_client[0].conn.exit()
except Exception:
pass
elif modargs.list or not arg:
client_list=self.pupsrv.get_clients_list()
self.display(PupyCmd.table_format([x.desc for x in client_list], wl=["id", "user", "hostname", "platform", "release", "os_arch", "address"]))
def do_jobs(self, arg):
""" manage jobs """
arg_parser = PupyArgumentParser(prog='jobs', description='list or kill jobs')
arg_parser.add_argument('-k', '--kill', metavar='<job_id>', help="print the job current output before killing it")
arg_parser.add_argument('-l', '--list', action='store_true', help="list jobs")
arg_parser.add_argument('-p', '--print-output', metavar='<job_id>', help="print a job output")
try:
modargs=arg_parser.parse_args(shlex.split(arg))
except PupyModuleExit:
return
try:
if modargs.kill:
j=self.pupsrv.get_job(modargs.kill)
self.display(j.result_summary())
j.stop()
del j
self.display_success("job killed")
elif modargs.print_output:
j=self.pupsrv.get_job(modargs.print_output)
self.display(j.result_summary())
elif modargs.list:
if len(self.pupsrv.jobs)>0:
dictable=[]
for k,v in self.pupsrv.jobs.iteritems():
dic={"id":k, "job":str(v)}
status="running"
if v.is_finished():
status="finished"
dic["status"]=status
dic["clients_nb"]=str(v.get_clients_nb())
dictable.append(dic)
self.display(PupyCmd.table_format(dictable, wl=["id", "job", "clients_nb","status"]))
else:
self.display_error("No jobs are currently running !")
else: #display help
try:
arg_parser.parse_args(["-h"])
except PupyModuleExit:
return
except PupyModuleError as e:
self.display_error(e)
except Exception as e:
self.display_error(traceback.format_exc())
def do_python(self,arg):
""" start the local python interpreter (for debugging purposes) """
orig_exit=builtins.exit
orig_quit=builtins.quit
def disabled_exit(*args, **kwargs):
self.display_warning("exit() disabled ! use ctrl+D to exit the python shell")
builtins.exit=disabled_exit
builtins.quit=disabled_exit
oldcompleter=readline.get_completer()
try:
local_ns={"pupsrv":self.pupsrv}
readline.set_completer(PupyCompleter(local_ns=local_ns).complete)
readline.parse_and_bind('tab: complete')
code.interact(local=local_ns)
except Exception as e:
self.display_error(str(e))
finally:
readline.set_completer(oldcompleter)
readline.parse_and_bind('tab: complete')
builtins.exit=orig_exit
builtins.quit=orig_quit
def do_run(self, arg):
""" run a module on one or multiple clients"""
arg_parser = PupyArgumentParser(prog='run', description='run a module on one or multiple clients')
arg_parser.add_argument('module', metavar='<module>', help="module")
arg_parser.add_argument('-f', '--filter', metavar='<client filter>', default=self.default_filter ,help="filter to a subset of all clients. All fields available in the \"info\" module can be used. example: run get_info -f 'platform:win release:7 os_arch:64'")
arg_parser.add_argument('--bg', action='store_true', help="run in background")
arg_parser.add_argument('arguments', nargs=argparse.REMAINDER, metavar='<arguments>', help="module arguments")
pj=None
try:
modargs=arg_parser.parse_args(shlex.split(arg))
except PupyModuleExit:
return
if not modargs.arguments:
args=""
else:
args=modargs.arguments
selected_clients="*"
if modargs.filter:
selected_clients=modargs.filter
try:
mod=self.pupsrv.get_module(modargs.module)
except Exception as e:
self.display_error("%s : %s"%(modargs.module,str(e)))
return
if not mod:
self.display_error("unknown module %s !"%modargs.module)
return
#logging.debug("args passed to %s: %s"%(modargs.module,args))
l=self.pupsrv.get_clients(selected_clients)
if not l:
if not self.pupsrv.clients:
self.display_error("no clients currently connected")
else:
self.display_error("no clients match this search!")
return
try:
self.pupsrv.module_parse_args(modargs.module, args)
except PupyModuleExit:
return
if mod.max_clients!=0 and len(l)>mod.max_clients:
self.display_error("This module is limited to %s client(s) at a time and you selected %s clients"%(mod.max_clients, len(l)))
return
modjobs=[x for x in self.pupsrv.jobs.itervalues() if str(type(x.pupymodules[0]))== str(mod) and x.pupymodules[0].client in l]
#print [x for x in self.pupsrv.jobs.itervalues()]
#print modjobs
#if mod.unique_instance and len(modjobs)>=1:
# self.display_error("This module is limited to %s instances per client. Job(s) containing this modules are still running."%(len(modjobs)))
# return
pj=None
try:
interactive=False
if mod.daemon and mod.unique_instance and modjobs:
pj=modjobs[0]
else:
pj=PupyJob(self.pupsrv,"%s %s"%(modargs.module, args))
if len(l)==1 and not modargs.bg and not mod.daemon:
ps=mod(l[0], pj, stdout=self.stdout)
pj.add_module(ps)
interactive=True
else:
for c in l:
ps=mod(c, pj)
pj.add_module(ps)
pj.start(args)
if not modjobs:
if modargs.bg:
self.pupsrv.add_job(pj)
return
elif mod.daemon:
self.pupsrv.add_job(pj)
error=pj.interactive_wait()
if error and not modjobs:
pj.stop()
except KeyboardInterrupt:
self.display_warning("interrupting job ... (please wait)")
pj.interrupt()
self.display_warning("job interrupted")
if not interactive:
self.display(pj.result_summary())
if pj:
del pj
#text : word match
#line : complete line
def complete_run(self, text, line, begidx, endidx):
mline = line.partition(' ')[2]
joker=1
found_module=False
#handle autocompletion of modules with --filter argument
for x in shlex.split(mline):
if x in ("-f", "--filter"):#arguments with a param
joker+=1
elif x in ("--bg",):#arguments without parameter
pass
else:
joker-=1
if not x.startswith("-") and joker==0:
found_module=True
if joker<0:
return
if ((len(text)>0 and joker==0) or (len(text)==0 and not found_module and joker<=1)):
return [re.sub(r"(.*)\.pyc?$",r"\1",x) for x in os.listdir("./modules") if x.startswith(text) and not x=="__init__.py" and not x=="__init__.pyc"]
def do_exit(self, arg):
""" Quit Pupy Shell """
sys.exit()
def do_read(self, arg):
""" execute a list of commands from a file """
try:
if not arg:
self.display_error("usage: read <filename>")
return
with open(arg,'r') as f:
self.cmdqueue.extend(f.read().splitlines())
except Exception as e:
self.display_error(str(e))
def _complete_path(self, path=None):
"Perform completion of filesystem path."
if not path:
return os.listdir('.')
dirname, rest = os.path.split(path)
tmp = dirname if dirname else '.'
res = [os.path.join(dirname, p)
for p in os.listdir(tmp) if p.startswith(rest)]
# more than one match, or single match which does not exist (typo)
if len(res) > 1 or not os.path.exists(path):
return res
# resolved to a single directory, so return list of files below it
if os.path.isdir(path):
return [os.path.join(path, p) for p in os.listdir(path)]
# exact file match terminates this completion
return [path + ' ']
def complete_read(self, text, line, begidx, endidx):
tab = line.split(' ',1)
if len(tab)>=2:
return self._complete_path(tab[1])
| bsd-3-clause |
blaggacao/odoo | openerp/addons/base/ir/ir_sequence.py | 83 | 14810 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
import openerp
from openerp.osv import osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class ir_sequence_type(openerp.osv.osv.osv):
_name = 'ir.sequence.type'
_order = 'name'
_columns = {
'name': openerp.osv.fields.char('Name', required=True),
'code': openerp.osv.fields.char('Code', size=32, required=True),
}
_sql_constraints = [
('code_unique', 'unique(code)', '`code` must be unique.'),
]
def _code_get(self, cr, uid, context=None):
cr.execute('select code, name from ir_sequence_type')
return cr.fetchall()
class ir_sequence(openerp.osv.osv.osv):
""" Sequence model.
The sequence model allows to define and use so-called sequence objects.
Such objects are used to generate unique identifiers in a transaction-safe
way.
"""
_name = 'ir.sequence'
_order = 'name'
def _get_number_next_actual(self, cr, user, ids, field_name, arg, context=None):
'''Return number from ir_sequence row when no_gap implementation,
and number from postgres sequence when standard implementation.'''
res = dict.fromkeys(ids)
for element in self.browse(cr, user, ids, context=context):
if element.implementation != 'standard':
res[element.id] = element.number_next
else:
# get number from postgres sequence. Cannot use
# currval, because that might give an error when
# not having used nextval before.
statement = (
"SELECT last_value, increment_by, is_called"
" FROM ir_sequence_%03d"
% element.id)
cr.execute(statement)
(last_value, increment_by, is_called) = cr.fetchone()
if is_called:
res[element.id] = last_value + increment_by
else:
res[element.id] = last_value
return res
def _set_number_next_actual(self, cr, uid, id, name, value, args=None, context=None):
return self.write(cr, uid, id, {'number_next': value or 0}, context=context)
_columns = {
'name': openerp.osv.fields.char('Name', size=64, required=True),
'code': openerp.osv.fields.selection(_code_get, 'Sequence Type', size=64),
'implementation': openerp.osv.fields.selection( # TODO update the view
[('standard', 'Standard'), ('no_gap', 'No gap')],
'Implementation', required=True,
help="Two sequence object implementations are offered: Standard "
"and 'No gap'. The later is slower than the former but forbids any"
" gap in the sequence (while they are possible in the former)."),
'active': openerp.osv.fields.boolean('Active'),
'prefix': openerp.osv.fields.char('Prefix', help="Prefix value of the record for the sequence"),
'suffix': openerp.osv.fields.char('Suffix', help="Suffix value of the record for the sequence"),
'number_next': openerp.osv.fields.integer('Next Number', required=True, help="Next number of this sequence"),
'number_next_actual': openerp.osv.fields.function(_get_number_next_actual, fnct_inv=_set_number_next_actual, type='integer', required=True, string='Next Number', help='Next number that will be used. This number can be incremented frequently so the displayed value might already be obsolete'),
'number_increment': openerp.osv.fields.integer('Increment Number', required=True, help="The next number of the sequence will be incremented by this number"),
'padding' : openerp.osv.fields.integer('Number Padding', required=True, help="Odoo will automatically adds some '0' on the left of the 'Next Number' to get the required padding size."),
'company_id': openerp.osv.fields.many2one('res.company', 'Company'),
}
_defaults = {
'implementation': 'standard',
'active': True,
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.sequence', context=c),
'number_increment': 1,
'number_next': 1,
'number_next_actual': 1,
'padding' : 0,
}
def init(self, cr):
return # Don't do the following index yet.
# CONSTRAINT/UNIQUE INDEX on (code, company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate (code, NULL) ir_sequences.
cr.execute("""
SELECT indexname FROM pg_indexes WHERE indexname =
'ir_sequence_unique_code_company_id_idx'""")
if not cr.fetchone():
cr.execute("""
CREATE UNIQUE INDEX ir_sequence_unique_code_company_id_idx
ON ir_sequence (code, (COALESCE(company_id,-1)))""")
def _create_sequence(self, cr, id, number_increment, number_next):
""" Create a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
sql = "CREATE SEQUENCE ir_sequence_%03d INCREMENT BY %%s START WITH %%s" % id
cr.execute(sql, (number_increment, number_next))
def _drop_sequence(self, cr, ids):
""" Drop the PostreSQL sequence if it exists.
There is no access rights check.
"""
ids = ids if isinstance(ids, (list, tuple)) else [ids]
assert all(isinstance(i, (int, long)) for i in ids), \
"Only ids in (int, long) allowed."
names = ','.join('ir_sequence_%03d' % i for i in ids)
# RESTRICT is the default; it prevents dropping the sequence if an
# object depends on it.
cr.execute("DROP SEQUENCE IF EXISTS %s RESTRICT " % names)
def _alter_sequence(self, cr, id, number_increment, number_next=None):
""" Alter a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
seq_name = 'ir_sequence_%03d' % (id,)
cr.execute("SELECT relname FROM pg_class WHERE relkind = %s AND relname=%s", ('S', seq_name))
if not cr.fetchone():
# sequence is not created yet, we're inside create() so ignore it, will be set later
return
statement = "ALTER SEQUENCE %s INCREMENT BY %d" % (seq_name, number_increment)
if number_next is not None:
statement += " RESTART WITH %d" % (number_next, )
cr.execute(statement)
def create(self, cr, uid, values, context=None):
""" Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used.
"""
values = self._add_missing_default_values(cr, uid, values, context)
values['id'] = super(ir_sequence, self).create(cr, uid, values, context)
if values['implementation'] == 'standard':
self._create_sequence(cr, values['id'], values['number_increment'], values['number_next'])
return values['id']
def unlink(self, cr, uid, ids, context=None):
super(ir_sequence, self).unlink(cr, uid, ids, context)
self._drop_sequence(cr, ids)
return True
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids, (list, tuple)):
ids = [ids]
new_implementation = values.get('implementation')
rows = self.read(cr, uid, ids, ['implementation', 'number_increment', 'number_next'], context)
super(ir_sequence, self).write(cr, uid, ids, values, context)
for row in rows:
# 4 cases: we test the previous impl. against the new one.
i = values.get('number_increment', row['number_increment'])
n = values.get('number_next', row['number_next'])
if row['implementation'] == 'standard':
if new_implementation in ('standard', None):
# Implementation has NOT changed.
# Only change sequence if really requested.
if row['number_next'] != n:
self._alter_sequence(cr, row['id'], i, n)
else:
# Just in case only increment changed
self._alter_sequence(cr, row['id'], i)
else:
self._drop_sequence(cr, row['id'])
else:
if new_implementation in ('no_gap', None):
pass
else:
self._create_sequence(cr, row['id'], i, n)
return True
def _interpolate(self, s, d):
if s:
return s % d
return ''
def _interpolation_dict(self):
t = time.localtime() # Actually, the server is always in UTC.
return {
'year': time.strftime('%Y', t),
'month': time.strftime('%m', t),
'day': time.strftime('%d', t),
'y': time.strftime('%y', t),
'doy': time.strftime('%j', t),
'woy': time.strftime('%W', t),
'weekday': time.strftime('%w', t),
'h24': time.strftime('%H', t),
'h12': time.strftime('%I', t),
'min': time.strftime('%M', t),
'sec': time.strftime('%S', t),
}
def _next(self, cr, uid, ids, context=None):
if not ids:
return False
if context is None:
context = {}
force_company = context.get('force_company')
if not force_company:
force_company = self.pool.get('res.users').browse(cr, uid, uid).company_id.id
sequences = self.read(cr, uid, ids, ['name','company_id','implementation','number_next','prefix','suffix','padding'])
preferred_sequences = [s for s in sequences if s['company_id'] and s['company_id'][0] == force_company ]
seq = preferred_sequences[0] if preferred_sequences else sequences[0]
if seq['implementation'] == 'standard':
cr.execute("SELECT nextval('ir_sequence_%03d')" % seq['id'])
seq['number_next'] = cr.fetchone()
else:
cr.execute("SELECT number_next FROM ir_sequence WHERE id=%s FOR UPDATE NOWAIT", (seq['id'],))
cr.execute("UPDATE ir_sequence SET number_next=number_next+number_increment WHERE id=%s ", (seq['id'],))
self.invalidate_cache(cr, uid, ['number_next'], [seq['id']], context=context)
d = self._interpolation_dict()
try:
interpolated_prefix = self._interpolate(seq['prefix'], d)
interpolated_suffix = self._interpolate(seq['suffix'], d)
except ValueError:
raise osv.except_osv(_('Warning'), _('Invalid prefix or suffix for sequence \'%s\'') % (seq.get('name')))
return interpolated_prefix + '%%0%sd' % seq['padding'] % seq['number_next'] + interpolated_suffix
def next_by_id(self, cr, uid, sequence_id, context=None):
""" Draw an interpolated string using the specified sequence."""
self.check_access_rights(cr, uid, 'read')
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False]
ids = self.search(cr, uid, ['&',('id','=', sequence_id),('company_id','in',company_ids)])
return self._next(cr, uid, ids, context)
def next_by_code(self, cr, uid, sequence_code, context=None):
""" Draw an interpolated string using a sequence with the requested code.
If several sequences with the correct code are available to the user
(multi-company cases), the one from the user's current company will
be used.
:param dict context: context dictionary may contain a
``force_company`` key with the ID of the company to
use instead of the user's current company for the
sequence selection. A matching sequence for that
specific company will get higher priority.
"""
self.check_access_rights(cr, uid, 'read')
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False]
ids = self.search(cr, uid, ['&', ('code', '=', sequence_code), ('company_id', 'in', company_ids)])
return self._next(cr, uid, ids, context)
def get_id(self, cr, uid, sequence_code_or_id, code_or_id='id', context=None):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by the ``sequence_code_or_id``
argument, which can be a code or an id (as controlled by the
``code_or_id`` argument. This method is deprecated.
"""
# TODO: bump up to warning after 6.1 release
_logger.debug("ir_sequence.get() and ir_sequence.get_id() are deprecated. "
"Please use ir_sequence.next_by_code() or ir_sequence.next_by_id().")
if code_or_id == 'id':
return self.next_by_id(cr, uid, sequence_code_or_id, context)
else:
return self.next_by_code(cr, uid, sequence_code_or_id, context)
def get(self, cr, uid, code, context=None):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by its code. This method is
deprecated.
"""
return self.get_id(cr, uid, code, 'code', context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/mis.py | 4 | 2762 | # -*- coding: utf-8 -*-
# $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $
# Leo Lopes <leo.lopes@monash.edu>
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Leo Lopes <leo.lopes@monash.edu>
# Loïc Séguin-C. <loicseguin@gmail.com>
"""
Algorithm to find a maximal (not maximum) independent set.
"""
import networkx as nx
from networkx.utils import not_implemented_for
from networkx.utils import py_random_state
__all__ = ['maximal_independent_set']
@py_random_state(2)
@not_implemented_for('directed')
def maximal_independent_set(G, nodes=None, seed=None):
"""Return a random maximal independent set guaranteed to contain
a given set of nodes.
An independent set is a set of nodes such that the subgraph
of G induced by these nodes contains no edges. A maximal
independent set is an independent set such that it is not possible
to add a new node and still get an independent set.
Parameters
----------
G : NetworkX graph
nodes : list or iterable
Nodes that must be part of the independent set. This set of nodes
must be independent.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
indep_nodes : list
List of nodes that are part of a maximal independent set.
Raises
------
NetworkXUnfeasible
If the nodes in the provided list are not part of the graph or
do not form an independent set, an exception is raised.
NetworkXNotImplemented
If `G` is directed.
Examples
--------
>>> G = nx.path_graph(5)
>>> nx.maximal_independent_set(G) # doctest: +SKIP
[4, 0, 2]
>>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP
[1, 3]
Notes
-----
This algorithm does not solve the maximum independent set problem.
"""
if not nodes:
nodes = set([seed.choice(list(G))])
else:
nodes = set(nodes)
if not nodes.issubset(G):
raise nx.NetworkXUnfeasible(
"%s is not a subset of the nodes of G" % nodes)
neighbors = set.union(*[set(G.adj[v]) for v in nodes])
if set.intersection(neighbors, nodes):
raise nx.NetworkXUnfeasible(
"%s is not an independent set of G" % nodes)
indep_nodes = list(nodes)
available_nodes = set(G.nodes()).difference(neighbors.union(nodes))
while available_nodes:
node = seed.choice(list(available_nodes))
indep_nodes.append(node)
available_nodes.difference_update(list(G.adj[node]) + [node])
return indep_nodes
| gpl-3.0 |
CamLib/AltmetricClient | altmetric_client/output_writer_csv/csv_writer_subject.py | 1 | 2120 | from csv import DictWriter
from altmetric_client.output_writer_csv.csv_writer_base import CSVWriterBase
class CSVWriterSubject(CSVWriterBase):
def __init__(self,
output_file_name=None,
output_directory_name=None,
subjects_list=None):
CSVWriterBase.__init__(self, output_file_name, output_directory_name)
self.__subjects_list = subjects_list
@property
def subjects_list(self):
return self.__subjects_list
@subjects_list.setter
def subjects_list(self, subjects_list):
self.__subjects_list = subjects_list
def write_subjects(self):
output_file_path = '{0}{1}'.format(self.output_directory_name, self.output_file_name)
write_mode = self._get_write_mode(output_file_path)
fieldnames = ['doi', 'subject_scheme', 'subject_name']
try:
with open(output_file_path, write_mode) as output_csv:
output_writer = DictWriter(output_csv, fieldnames=fieldnames)
if write_mode == 'w':
output_writer.writeheader()
for subject in self.__subjects_list:
output_dict = dict(doi=subject.doi,
subject_scheme=subject.scheme,
subject_name=subject.name)
output_writer.writerow(output_dict)
except:
print('Something totally unexpected happened when trying to write to a Subjects CSV file')
print('The writer was setup to write to a file called {0}{1}'.format(self.output_directory_name,
self.output_file_name))
if write_mode == 'a':
print('The writer thought this file existed and was trying to append to it.')
elif write_mode == 'w':
print('The writer thought this was a brand new file and was trying to create it.')
else:
print('The writer could not determine whether or not the file existed.')
| gpl-3.0 |
kracwarlock/neon | neon/transforms/tests/test_rectified.py | 13 | 3070 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from nose.plugins.attrib import attr
import numpy as np
from neon.backends.cpu import CPU, CPUTensor
from neon.transforms.rectified import RectLin
from neon.util.testing import assert_tensor_equal
def compare_cpu_tensors(inputs, outputs, deriv=False):
rlin = RectLin()
be = CPU()
temp = be.zeros(inputs.shape)
if deriv is True:
rlin.apply_derivative(be, CPUTensor(inputs), temp)
else:
rlin.apply_function(be, CPUTensor(inputs), temp)
be.subtract(temp, CPUTensor(outputs), temp)
assert_tensor_equal(temp, be.zeros(inputs.shape))
def compare_cc2_tensors(inputs, outputs, deriv=False):
from neon.backends.cc2 import GPU, GPUTensor
rlin = RectLin()
be = GPU()
temp = be.zeros(inputs.shape)
if deriv is True:
rlin.apply_derivative(be, GPUTensor(inputs), temp)
else:
rlin.apply_function(be, GPUTensor(inputs), temp)
be.subtract(temp, GPUTensor(outputs), temp)
assert_tensor_equal(temp, be.zeros(inputs.shape))
def test_rectlin_positives():
inputs = np.array([1, 3, 2])
outputs = np.array([1, 3, 2])
compare_cpu_tensors(inputs, outputs)
def test_rectlin_negatives():
inputs = np.array([[-1, -3], [-2, -4]])
outputs = np.array([[0, 0], [0, 0]])
compare_cpu_tensors(inputs, outputs)
def test_rectlin_mixed():
inputs = np.array([[4, 0], [-2, 9]])
outputs = np.array([[4, 0], [0, 9]])
compare_cpu_tensors(inputs, outputs)
@attr('cuda')
def test_rectlin_cc2tensor():
inputs = np.array([[4, 0], [-2, 9]])
outputs = np.array([[4, 0], [0, 9]])
compare_cc2_tensors(inputs, outputs)
def test_rectlin_derivative_positives():
inputs = np.array([1, 3, 2])
outputs = np.array([1, 1, 1])
compare_cpu_tensors(inputs, outputs, deriv=True)
def test_rectlin_derivative_negatives():
inputs = np.array([[-1, -3], [-2, -4]])
outputs = np.array([[0, 0], [0, 0]])
compare_cpu_tensors(inputs, outputs, deriv=True)
def test_rectlin_derivative_mixed():
inputs = np.array([[4, 0], [-2, 9]])
outputs = np.array([[1, 0], [0, 1]])
compare_cpu_tensors(inputs, outputs, deriv=True)
@attr('cuda')
def test_rectlin_derivative_cc2tensor():
inputs = np.array([[4, 0], [-2, 9]])
outputs = np.array([[1, 0], [0, 1]])
compare_cc2_tensors(inputs, outputs, deriv=True)
| apache-2.0 |
Eigenlabs/EigenD | tools/packages/SCons/Tool/sunar.py | 2 | 2551 | """engine.SCons.Tool.sunar
Tool-specific initialization for Solaris (Forte) ar (library archive). If CC
exists, static libraries should be built with it, so that template
instantians can be resolved.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunar.py 4577 2009/12/27 19:43:56 scons"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
if env.Detect('CC'):
env['AR'] = 'CC'
env['ARFLAGS'] = SCons.Util.CLVar('-xar')
env['ARCOM'] = '$AR $ARFLAGS -o $TARGET $SOURCES'
else:
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('r')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -G')
env['SHLINKCOM'] = '$SHLINK $SHLINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
def exists(env):
return env.Detect('CC') or env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
PythonCharmers/bokeh | examples/app/sliders_applet/sliders_app.py | 43 | 4913 | """
This file demonstrates a bokeh applet, which can be viewed directly
on a bokeh-server. See the README.md file in this directory for
instructions on running.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import numpy as np
from bokeh.plotting import figure
from bokeh.models import Plot, ColumnDataSource
from bokeh.properties import Instance
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.models.widgets import HBox, Slider, TextInput, VBoxForm
class SlidersApp(HBox):
"""An example of a browser-based, interactive plot with slider controls."""
extra_generated_classes = [["SlidersApp", "SlidersApp", "HBox"]]
inputs = Instance(VBoxForm)
text = Instance(TextInput)
offset = Instance(Slider)
amplitude = Instance(Slider)
phase = Instance(Slider)
freq = Instance(Slider)
plot = Instance(Plot)
source = Instance(ColumnDataSource)
@classmethod
def create(cls):
"""One-time creation of app's objects.
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
obj = cls()
obj.source = ColumnDataSource(data=dict(x=[], y=[]))
obj.text = TextInput(
title="title", name='title', value='my sine wave'
)
obj.offset = Slider(
title="offset", name='offset',
value=0.0, start=-5.0, end=5.0, step=0.1
)
obj.amplitude = Slider(
title="amplitude", name='amplitude',
value=1.0, start=-5.0, end=5.0
)
obj.phase = Slider(
title="phase", name='phase',
value=0.0, start=0.0, end=2*np.pi
)
obj.freq = Slider(
title="frequency", name='frequency',
value=1.0, start=0.1, end=5.1
)
toolset = "crosshair,pan,reset,resize,save,wheel_zoom"
# Generate a figure container
plot = figure(title_text_font_size="12pt",
plot_height=400,
plot_width=400,
tools=toolset,
title=obj.text.value,
x_range=[0, 4*np.pi],
y_range=[-2.5, 2.5]
)
# Plot the line by the x,y values in the source property
plot.line('x', 'y', source=obj.source,
line_width=3,
line_alpha=0.6
)
obj.plot = plot
obj.update_data()
obj.inputs = VBoxForm(
children=[
obj.text, obj.offset, obj.amplitude, obj.phase, obj.freq
]
)
obj.children.append(obj.inputs)
obj.children.append(obj.plot)
return obj
def setup_events(self):
"""Attaches the on_change event to the value property of the widget.
The callback is set to the input_change method of this app.
"""
super(SlidersApp, self).setup_events()
if not self.text:
return
# Text box event registration
self.text.on_change('value', self, 'input_change')
# Slider event registration
for w in ["offset", "amplitude", "phase", "freq"]:
getattr(self, w).on_change('value', self, 'input_change')
def input_change(self, obj, attrname, old, new):
"""Executes whenever the input form changes.
It is responsible for updating the plot, or anything else you want.
Args:
obj : the object that changed
attrname : the attr that changed
old : old value of attr
new : new value of attr
"""
self.update_data()
self.plot.title = self.text.value
def update_data(self):
"""Called each time that any watched property changes.
This updates the sin wave data with the most recent values of the
sliders. This is stored as two numpy arrays in a dict into the app's
data source property.
"""
N = 200
# Get the current slider values
a = self.amplitude.value
b = self.offset.value
w = self.phase.value
k = self.freq.value
# Generate the sine wave
x = np.linspace(0, 4*np.pi, N)
y = a*np.sin(k*x + w) + b
logging.debug(
"PARAMS: offset: %s amplitude: %s", self.offset.value,
self.amplitude.value
)
self.source.data = dict(x=x, y=y)
# The following code adds a "/bokeh/sliders/" url to the bokeh-server. This
# URL will render this sine wave sliders app. If you don't want to serve this
# applet from a Bokeh server (for instance if you are embedding in a separate
# Flask application), then just remove this block of code.
@bokeh_app.route("/bokeh/sliders/")
@object_page("sin")
def make_sliders():
app = SlidersApp.create()
return app
| bsd-3-clause |
talhajaved/nyuadmarket | flask/lib/python2.7/site-packages/whoosh/legacy.py | 95 | 3459 | # Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains code for maintaining backwards compatibility with old
index formats.
"""
from whoosh.util.loading import RenamingUnpickler
def load_110_toc(stream, gen, schema, version):
# Between version -110 and version -111, I reorganized the modules and
# changed the implementation of the NUMERIC field, so we have to change the
# classes the unpickler tries to load if we need to read an old schema
# Read the length of the pickled schema
picklen = stream.read_varint()
if schema:
# If the user passed us a schema, use it and skip the one on disk
stream.seek(picklen, 1)
else:
# Remap the old classes and functions to their moved versions as we
# unpickle the schema
scuts = {"wf": "whoosh.fields",
"wsn": "whoosh.support.numeric",
"wcw2": "whoosh.codec.whoosh2"}
objmap = {"%(wf)s.NUMERIC": "%(wcw2)s.OLD_NUMERIC",
"%(wf)s.DATETIME": "%(wcw2)s.OLD_DATETIME",
"%(wsn)s.int_to_text": "%(wcw2)s.int_to_text",
"%(wsn)s.text_to_int": "%(wcw2)s.text_to_int",
"%(wsn)s.long_to_text": "%(wcw2)s.long_to_text",
"%(wsn)s.text_to_long": "%(wcw2)s.text_to_long",
"%(wsn)s.float_to_text": "%(wcw2)s.float_to_text",
"%(wsn)s.text_to_float": "%(wcw2)s.text_to_float", }
ru = RenamingUnpickler(stream, objmap, shortcuts=scuts)
schema = ru.load()
# Read the generation number
index_gen = stream.read_int()
assert gen == index_gen
# Unused number
_ = stream.read_int()
# Unpickle the list of segment objects
segments = stream.read_pickle()
return schema, segments
# Map TOC version numbers to functions to load that version
toc_loaders = {-110: load_110_toc}
# Map segment class names to functions to load the segment
segment_loaders = {}
| mit |
evaschalde/odoo | addons/account_test/report/account_test_report.py | 194 | 3819 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from openerp.tools.safe_eval import safe_eval as eval
#
# Use period and Journal for selection or resources
#
class report_assert_account(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_assert_account, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'datetime': datetime,
'execute_code': self.execute_code,
})
def execute_code(self, code_exec):
def reconciled_inv():
"""
returns the list of invoices that are set as reconciled = True
"""
return self.pool.get('account.invoice').search(self.cr, self.uid, [('reconciled','=',True)])
def order_columns(item, cols=None):
"""
This function is used to display a dictionary as a string, with its columns in the order chosen.
:param item: dict
:param cols: list of field names
:returns: a list of tuples (fieldname: value) in a similar way that would dict.items() do except that the
returned values are following the order given by cols
:rtype: [(key, value)]
"""
if cols is None:
cols = item.keys()
return [(col, item.get(col)) for col in cols if col in item.keys()]
localdict = {
'cr': self.cr,
'uid': self.uid,
'reconciled_inv': reconciled_inv, #specific function used in different tests
'result': None, #used to store the result of the test
'column_order': None, #used to choose the display order of columns (in case you are returning a list of dict)
}
eval(code_exec, localdict, mode="exec", nocopy=True)
result = localdict['result']
column_order = localdict.get('column_order', None)
if not isinstance(result, (tuple, list, set)):
result = [result]
if not result:
result = [_('The test was passed successfully')]
else:
def _format(item):
if isinstance(item, dict):
return ', '.join(["%s: %s" % (tup[0], tup[1]) for tup in order_columns(item, column_order)])
else:
return item
result = [_(_format(rec)) for rec in result]
return result
class report_accounttest(osv.AbstractModel):
_name = 'report.account_test.report_accounttest'
_inherit = 'report.abstract_report'
_template = 'account_test.report_accounttest'
_wrapped_report_class = report_assert_account
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Haleyo/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_sort_k_test.py | 13 | 6107 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test interface functionality of frame.sort"""
import unittest
from sparktkregtests.lib import sparktk_test
# Related bugs:
# @DPNG-9405 - multiple params with different ascending/descending values
# @DPNG-9407 - tuples
class FrameSortTest(sparktk_test.SparkTKTestCase):
def setUp(self):
super(FrameSortTest, self).setUp()
dataset = self.get_file("dogs.csv")
schema = [("age", int),
("name", str),
("owner", str),
("weight", int),
("hair_type", str)]
self.frame = self.context.frame.import_csv(dataset, schema=schema, header=True)
def test_frame_sortedk_col_single_descending(self):
""" Test single-column sorting descending"""
topk_frame = self.frame.sorted_k(5, [("weight", False)])
down_take = topk_frame.take(20)
for i in range(0, len(down_take) - 1):
self.assertGreaterEqual(
down_take[i][3], down_take[i + 1][3])
def test_frame_sortedk_col_single_ascending(self):
""" Test single-column sorting ascending"""
topk_frame = self.frame.sorted_k(5, [("weight", True)])
up_take_expl = topk_frame.take(20)
for i in range(0, len(up_take_expl) - 1):
self.assertLessEqual(
up_take_expl[i][3], up_take_expl[i+1][3])
def test_frame_sortedk_col_multiple_ascending(self):
""" Test multiple-column sorting, ascending"""
topk_frame = self.frame.sorted_k(
5, [("weight", True), ("hair_type", True)])
up_take = topk_frame.take(20)
for i in range(0, len(up_take) - 1):
# If 1st sort key is equal, compare the 2nd
if up_take[i][3] == up_take[i + 1][3]:
self.assertLessEqual(up_take[i][4],
up_take[i + 1][4])
else:
self.assertLessEqual(
up_take[i][3], up_take[i + 1][3])
def test_frame_sortedk_col_multiple_descending(self):
""" Test multiple-column sorting, descending"""
topk_frame = self.frame.sorted_k(
5, [("weight", False), ("hair_type", False)])
down_take = topk_frame.take(20)
for i in range(0, len(down_take) - 1):
# If 1st sort key is equal, compare the 2nd
if down_take[i][3] == down_take[i + 1][3]:
self.assertGreaterEqual(
down_take[i][4], down_take[i + 1][4])
else:
self.assertGreaterEqual(
down_take[i][3], down_take[i + 1][3])
def test_frame_sortedk_col_multiple_mixed(self):
""" Test multiple-column sorting, mixed ascending/descending"""
topk_frame = self.frame.sorted_k(
5, [("age", False), ("hair_type", True), ("weight", True)])
mixed_take = topk_frame.take(20)
for i in range(0, len(mixed_take) - 1):
# If 1st sort key is equal, compare the 2nd
if mixed_take[i][0] == mixed_take[i + 1][0]:
# If 2nd sort key is also equal, compare the 3rd
if mixed_take[i][4] == mixed_take[i + 1][4]:
self.assertLessEqual(
mixed_take[i][3], mixed_take[i + 1][3])
else:
self.assertLessEqual(
mixed_take[i][4], mixed_take[i + 1][4])
else:
self.assertGreaterEqual(
mixed_take[i][0], mixed_take[i + 1][0])
def test_frame_sortedk_bad_k(self):
"""Test sortedk with a bad type of k"""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.frame.sorted_k("5", [("weight", False)])
def test_frame_sortedk_negative_k(self):
"""Test sortedk with a negative k value"""
with self.assertRaisesRegexp(Exception, "k should be greater than zero"):
self.frame.sorted_k(-1, [("weight", False)])
def test_frame_sortedk_k_0(self):
"""Test sorted k with k equal to 0"""
with self.assertRaisesRegexp(Exception, "k should be greater than zero"):
self.frame.sorted_k(0, [("weight", False)])
def test_frame_sortedk_bad_depth(self):
"""Test sorted k with a tree depth type error"""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.frame.sorted_k(5, [("weight", False)], reduce_tree_depth="5")
def test_frame_sortedk_negative_depth(self):
"""Test sortedk with a negative depth"""
with self.assertRaisesRegexp(Exception, "Depth of reduce tree"):
self.frame.sorted_k(5, [("weight", False)], reduce_tree_depth=-1)
def test_frame_sorted_k_0_depth(self):
"""test sorted k with a depth of 0"""
with self.assertRaisesRegexp(Exception, "Depth of reduce tree"):
self.frame.sorted_k(5, [("weight", False)], reduce_tree_depth=0)
def test_frame_sortedk_bad_column(self):
"""Test sorted k errors on bad column"""
with self.assertRaisesRegexp(Exception, "Invalid column name"):
self.frame.sorted_k(5, [('no-such-column', True)])
def test_frame_sort_typerror(self):
"""Test sort with no arguments raises a type error"""
with self.assertRaisesRegexp(TypeError, "2 arguments"):
self.frame.sort()
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.