repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
yandexdataschool/Practical_RL | week03_model_free/crawler_and_pacman/seminar_py3/layout.py | 1 | 5690 | # layout.py
# ---------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from util import manhattanDistance
from game import Grid
import os
import random
from functools import reduce
VISIBILITY_MATRIX_CACHE = {}
class Layout:
"""
A Layout manages the static information about the game board.
"""
def __init__(self, layoutText):
self.width = len(layoutText[0])
self.height = len(layoutText)
self.walls = Grid(self.width, self.height, False)
self.food = Grid(self.width, self.height, False)
self.capsules = []
self.agentPositions = []
self.numGhosts = 0
self.processLayoutText(layoutText)
self.layoutText = layoutText
# self.initializeVisibilityMatrix()
def getNumGhosts(self):
return self.numGhosts
def initializeVisibilityMatrix(self):
global VISIBILITY_MATRIX_CACHE
if reduce(str.__add__, self.layoutText) not in VISIBILITY_MATRIX_CACHE:
from game import Directions
vecs = [(-0.5, 0), (0.5, 0), (0, -0.5), (0, 0.5)]
dirs = [Directions.NORTH, Directions.SOUTH,
Directions.WEST, Directions.EAST]
vis = Grid(self.width, self.height, {Directions.NORTH: set(), Directions.SOUTH: set(
), Directions.EAST: set(), Directions.WEST: set(), Directions.STOP: set()})
for x in range(self.width):
for y in range(self.height):
if self.walls[x][y] == False:
for vec, direction in zip(vecs, dirs):
dx, dy = vec
nextx, nexty = x + dx, y + dy
while (nextx + nexty) != int(nextx) + int(nexty) or not self.walls[int(nextx)][int(nexty)]:
vis[x][y][direction].add((nextx, nexty))
nextx, nexty = x + dx, y + dy
self.visibility = vis
VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)] = vis
else:
self.visibility = VISIBILITY_MATRIX_CACHE[reduce(
str.__add__, self.layoutText)]
def isWall(self, pos):
x, col = pos
return self.walls[x][col]
def getRandomLegalPosition(self):
x = random.choice(list(range(self.width)))
y = random.choice(list(range(self.height)))
while self.isWall((x, y)):
x = random.choice(list(range(self.width)))
y = random.choice(list(range(self.height)))
return (x, y)
def getRandomCorner(self):
poses = [(1, 1), (1, self.height - 2), (self.width - 2, 1),
(self.width - 2, self.height - 2)]
return random.choice(poses)
def getFurthestCorner(self, pacPos):
poses = [(1, 1), (1, self.height - 2), (self.width - 2, 1),
(self.width - 2, self.height - 2)]
dist, pos = max([(manhattanDistance(p, pacPos), p) for p in poses])
return pos
def isVisibleFrom(self, ghostPos, pacPos, pacDirection):
row, col = [int(x) for x in pacPos]
return ghostPos in self.visibility[row][col][pacDirection]
def __str__(self):
return "\n".join(self.layoutText)
def deepCopy(self):
return Layout(self.layoutText[:])
def processLayoutText(self, layoutText):
"""
Coordinates are flipped from the input format to the (x,y) convention here
The shape of the maze. Each character
represents a different type of object.
% - Wall
. - Food
o - Capsule
G - Ghost
P - Pacman
Other characters are ignored.
"""
maxY = self.height - 1
for y in range(self.height):
for x in range(self.width):
layoutChar = layoutText[maxY - y][x]
self.processLayoutChar(x, y, layoutChar)
self.agentPositions.sort()
self.agentPositions = [(i == 0, pos) for i, pos in self.agentPositions]
def processLayoutChar(self, x, y, layoutChar):
if layoutChar == '%':
self.walls[x][y] = True
elif layoutChar == '.':
self.food[x][y] = True
elif layoutChar == 'o':
self.capsules.append((x, y))
elif layoutChar == 'P':
self.agentPositions.append((0, (x, y)))
elif layoutChar in ['G']:
self.agentPositions.append((1, (x, y)))
self.numGhosts += 1
elif layoutChar in ['1', '2', '3', '4']:
self.agentPositions.append((int(layoutChar), (x, y)))
self.numGhosts += 1
def getLayout(name, back=2):
if name.endswith('.lay'):
layout = tryToLoad('layouts/' + name)
if layout is None:
layout = tryToLoad(name)
else:
layout = tryToLoad('layouts/' + name + '.lay')
if layout is None:
layout = tryToLoad(name + '.lay')
if layout is None and back >= 0:
curdir = os.path.abspath('.')
os.chdir('..')
layout = getLayout(name, back - 1)
os.chdir(curdir)
return layout
def tryToLoad(fullname):
if(not os.path.exists(fullname)):
return None
f = open(fullname)
try:
return Layout([line.strip() for line in f])
finally:
f.close()
| unlicense |
nathanaevitas/odoo | openerp/addons/sale/edi/__init__.py | 454 | 1065 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
arjunbm13/youtube-dl | youtube_dl/extractor/nba.py | 101 | 2338 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
remove_end,
parse_duration,
)
class NBAIE(InfoExtractor):
_VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$'
_TESTS = [{
'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
'md5': 'c0edcfc37607344e2ff8f13c378c88a4',
'info_dict': {
'id': '0021200253-okc-bkn-recap.nba',
'ext': 'mp4',
'title': 'Thunder vs. Nets',
'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
'duration': 181,
},
}, {
'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/',
'only_matching': True,
}, {
'url': 'http://watch.nba.com/nba/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba',
'info_dict': {
'id': '0041400301-cle-atl-recap.nba',
'ext': 'mp4',
'title': 'NBA GAME TIME | Video: Hawks vs. Cavaliers Game 1',
'description': 'md5:8094c3498d35a9bd6b1a8c396a071b4d',
'duration': 228,
},
'params': {
'skip_download': True,
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
shortened_video_id = video_id.rpartition('/')[2]
title = remove_end(
self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com')
description = self._og_search_description(webpage)
duration_str = self._html_search_meta(
'duration', webpage, 'duration', default=None)
if not duration_str:
duration_str = self._html_search_regex(
r'Duration:</b>\s*(\d+:\d+)', webpage, 'duration', fatal=False)
duration = parse_duration(duration_str)
return {
'id': shortened_video_id,
'url': video_url,
'title': title,
'description': description,
'duration': duration,
}
| unlicense |
donspaulding/adspygoogle | examples/adspygoogle/dfp/v201211/get_user.py | 4 | 1504 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets a user by its id. To create users, run
create_user.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201211')
# Set the id of the user to get.
user_id = 'INSERT_USER_ID_HERE'
# Get user.
user = user_service.GetUser(user_id)[0]
# Display results.
print ('User with id \'%s\', email \'%s\', and role \'%s\' was found.'
% (user['id'], user['email'], user['roleName']))
| apache-2.0 |
seadsystem/website | web2py/scripts/tickets2slack.py | 10 | 2321 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Post error tickets to slack on a 5 minute schedule.
#
# Proper use depends on having created a web-hook through Slack, and having set
# that value in your app's model as the value of global_settings.slack_hook.
# Details on creating web-hooks can be found at https://slack.com/integrations
#
# requires the Requests module for posting to slack, other requirements are
# standard or provided by web2py
#
# Usage (on Unices), replace myapp with the name of your application and run:
# nohup python web2py.py -S myapp -M -R scripts/tickets2slack.py &
import sys
import os
import time
import pickle
import json
try:
import requests
except ImportError as e:
print "missing module 'Requests', aborting."
sys.exit(1)
from gluon import URL
from gluon.utils import md5_hash
from gluon.restricted import RestrictedError
from gluon.settings import global_settings
path = os.path.join(request.folder, 'errors')
sent_errors_file = os.path.join(path, 'slack_errors.pickle')
hashes = {}
if os.path.exists(sent_errors_file):
try:
with open(sent_errors_file, 'rb') as f:
hashes = pickle.load(f)
except Exception as _:
pass
# ## CONFIGURE HERE
SLEEP_MINUTES = 5
ALLOW_DUPLICATES = False
global_settings.slack_hook = global_settings.slack_hook or \
'https://hooks.slack.com/services/your_service'
# ## END CONFIGURATION
while 1:
for file_name in os.listdir(path):
if file_name == 'slack_errors.pickle':
continue
if not ALLOW_DUPLICATES:
key = md5_hash(file_name)
if key in hashes:
continue
hashes[key] = 1
error = RestrictedError()
try:
error.load(request, request.application, file_name)
except Exception as _:
continue # not an exception file?
url = URL(a='admin', f='ticket', args=[request.application, file],
scheme=True)
payload = json.dumps(dict(text="Error in %(app)s.\n%(url)s" %
dict(app=request.application, url=url)))
requests.post(global_settings.slack_hook, data=dict(payload=payload))
with open(sent_errors_file, 'wb') as f:
pickle.dump(hashes, f)
time.sleep(SLEEP_MINUTES * 60)
| mit |
SlateScience/MozillaJS | js/src/gdb/taskpool.py | 9 | 8808 | import fcntl, os, select, time
from subprocess import Popen, PIPE
# Run a series of subprocesses. Try to keep up to a certain number going in
# parallel at any given time. Enforce time limits.
#
# This is implemented using non-blocking I/O, and so is Unix-specific.
#
# We assume that, if a task closes its standard error, then it's safe to
# wait for it to terminate. So an ill-behaved task that closes its standard
# output and then hangs will hang us, as well. However, as it takes special
# effort to close one's standard output, this seems unlikely to be a
# problem in practice.
class TaskPool(object):
# A task we should run in a subprocess. Users should subclass this and
# fill in the methods as given.
class Task(object):
def __init__(self):
self.pipe = None
self.start_time = None
# Record that this task is running, with |pipe| as its Popen object,
# and should time out at |deadline|.
def start(self, pipe, deadline):
self.pipe = pipe
self.deadline = deadline
# Return a shell command (a string or sequence of arguments) to be
# passed to Popen to run the task. The command will be given
# /dev/null as its standard input, and pipes as its standard output
# and error.
def cmd(self):
raise NotImplementedError
# TaskPool calls this method to report that the process wrote
# |string| to its standard output.
def onStdout(self, string):
raise NotImplementedError
# TaskPool calls this method to report that the process wrote
# |string| to its standard error.
def onStderr(self, string):
raise NotImplementedError
# TaskPool calls this method to report that the process terminated,
# yielding |returncode|.
def onFinished(self, returncode):
raise NotImplementedError
# TaskPool calls this method to report that the process timed out and
# was killed.
def onTimeout(self):
raise NotImplementedError
# If a task output handler (onStdout, onStderr) throws this, we terminate
# the task.
class TerminateTask(Exception):
pass
def __init__(self, tasks, cwd='.', job_limit=4, timeout=150):
self.pending = iter(tasks)
self.cwd = cwd
self.job_limit = job_limit
self.timeout = timeout
self.next_pending = self.get_next_pending()
# Set self.next_pending to the next task that has not yet been executed.
def get_next_pending(self):
try:
return self.pending.next()
except StopIteration:
return None
def run_all(self):
# The currently running tasks: a set of Task instances.
running = set()
with open(os.devnull, 'r') as devnull:
while True:
while len(running) < self.job_limit and self.next_pending:
t = self.next_pending
p = Popen(t.cmd(), bufsize=16384,
stdin=devnull, stdout=PIPE, stderr=PIPE,
cwd=self.cwd)
# Put the stdout and stderr pipes in non-blocking mode. See
# the post-'select' code below for details.
flags = fcntl.fcntl(p.stdout, fcntl.F_GETFL)
fcntl.fcntl(p.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl.fcntl(p.stderr, fcntl.F_GETFL)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, flags | os.O_NONBLOCK)
t.start(p, time.time() + self.timeout)
running.add(t)
self.next_pending = self.get_next_pending()
# If we have no tasks running, and the above wasn't able to
# start any new ones, then we must be done!
if not running:
break
# How many seconds do we have until the earliest deadline?
now = time.time()
secs_to_next_deadline = max(min([t.deadline for t in running]) - now, 0)
# Wait for output or a timeout.
stdouts_and_stderrs = ([t.pipe.stdout for t in running]
+ [t.pipe.stderr for t in running])
(readable,w,x) = select.select(stdouts_and_stderrs, [], [], secs_to_next_deadline)
finished = set()
terminate = set()
for t in running:
# Since we've placed the pipes in non-blocking mode, these
# 'read's will simply return as many bytes as are available,
# rather than blocking until they have accumulated the full
# amount requested (or reached EOF). The 'read's should
# never throw, since 'select' has told us there was
# something available.
if t.pipe.stdout in readable:
output = t.pipe.stdout.read(16384)
if output != "":
try:
t.onStdout(output)
except TerminateTask:
terminate.add(t)
if t.pipe.stderr in readable:
output = t.pipe.stderr.read(16384)
if output != "":
try:
t.onStderr(output)
except TerminateTask:
terminate.add(t)
else:
# We assume that, once a task has closed its stderr,
# it will soon terminate. If a task closes its
# stderr and then hangs, we'll hang too, here.
t.pipe.wait()
t.onFinished(t.pipe.returncode)
finished.add(t)
# Remove the finished tasks from the running set. (Do this here
# to avoid mutating the set while iterating over it.)
running -= finished
# Terminate any tasks whose handlers have asked us to do so.
for t in terminate:
t.pipe.terminate()
t.pipe.wait()
running.remove(t)
# Terminate any tasks which have missed their deadline.
finished = set()
for t in running:
if now >= t.deadline:
t.pipe.terminate()
t.pipe.wait()
t.onTimeout()
finished.add(t)
# Remove the finished tasks from the running set. (Do this here
# to avoid mutating the set while iterating over it.)
running -= finished
return None
def get_cpu_count():
"""
Guess at a reasonable parallelism count to set as the default for the
current machine and run.
"""
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError,NotImplementedError):
pass
# POSIX
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res
except (AttributeError,ValueError):
pass
# Windows
try:
res = int(os.environ['NUMBER_OF_PROCESSORS'])
if res > 0:
return res
except (KeyError, ValueError):
pass
return 1
if __name__ == '__main__':
# Test TaskPool by using it to implement the unique 'sleep sort' algorithm.
def sleep_sort(ns, timeout):
sorted=[]
class SortableTask(TaskPool.Task):
def __init__(self, n):
super(SortableTask, self).__init__()
self.n = n
def start(self, pipe, deadline):
super(SortableTask, self).start(pipe, deadline)
def cmd(self):
return ['sh', '-c', 'echo out; sleep %d; echo err>&2' % (self.n,)]
def onStdout(self, text):
print '%d stdout: %r' % (self.n, text)
def onStderr(self, text):
print '%d stderr: %r' % (self.n, text)
def onFinished(self, returncode):
print '%d (rc=%d)' % (self.n, returncode)
sorted.append(self.n)
def onTimeout(self):
print '%d timed out' % (self.n,)
p = TaskPool([SortableTask(_) for _ in ns], job_limit=len(ns), timeout=timeout)
p.run_all()
return sorted
print repr(sleep_sort([1,1,2,3,5,8,13,21,34], 15))
| mpl-2.0 |
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/jinja2/parser.py | 89 | 35875 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.lexer import describe_token, describe_token_expr
from jinja2._compat import imap
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set', 'with', 'autoescape'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
_math_nodes = {
'add': nodes.Add,
'sub': nodes.Sub,
'mul': nodes.Mul,
'div': nodes.Div,
'floordiv': nodes.FloorDiv,
'mod': nodes.Mod,
}
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
if self.stream.skip_if('assign'):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
body = self.parse_statements(('name:endset',),
drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
node.elif_ = []
node.else_ = []
token = next(self.stream)
if token.test('name:elif'):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
elif token.test('name:else'):
result.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
break
return result
def parse_with(self):
node = nodes.With(lineno=next(self.stream).lineno)
targets = []
values = []
while self.stream.current.type != 'block_end':
lineno = self.stream.current.lineno
if targets:
self.stream.expect('comma')
target = self.parse_assign_target()
target.set_ctx('param')
targets.append(target)
self.stream.expect('assign')
values.append(self.parse_expression())
node.targets = targets
node.values = values
node.body = self.parse_statements(('name:endwith',),
drop_needle=True)
return node
def parse_autoescape(self):
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
node.options = [
nodes.Keyword('autoescape', self.parse_expression())
]
node.body = self.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
self.stream.expect('name')
if not hasattr(node, 'with_context'):
node.with_context = False
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
elif defaults:
self.fail('non-default argument follows default argument')
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None, with_namespace=False):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
if with_namespace and self.stream.look().type == 'dot':
token = self.stream.expect('name')
next(self.stream) # dot
attr = self.stream.expect('name')
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_math1()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_math1()))
elif (self.stream.current.test('name:not') and
self.stream.look().test('name:in')):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_math1(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type in ('add', 'sub'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_math2()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_math2(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif (self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not
self.stream.current.test_any('name:else', 'name:or',
'name:and')):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_primary()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| apache-2.0 |
renyi533/tensorflow | tensorflow/python/kernel_tests/as_string_op_test.py | 30 | 9324 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for as_string_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class AsStringOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testFloat(self):
float_inputs_ = [
0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"), float("-INF")
]
with self.cached_session():
for dtype in (dtypes.float32, dtypes.float64):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
s = lambda strs: [x.decode("ascii") for x in strs]
self.assertAllEqual(s(result), ["%g" % x for x in float_inputs_])
output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%e" % x for x in float_inputs_])
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%3f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03g" % x for x in float_inputs_])
output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10f" % x for x in float_inputs_])
output = string_ops.as_string(
input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10g" % x for x in float_inputs_])
with self.assertRaisesOpError("Cannot select both"):
output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: float_inputs_})
with self.assertRaisesOpError("Fill string must be one or fewer"):
output = string_ops.as_string(input_, fill="ab")
output.eval(feed_dict={input_: float_inputs_})
@test_util.run_deprecated_v1
def testInt(self):
# Cannot use values outside -128..127 for test, because we're also
# testing int8
int_inputs_ = [0, -1, 1, -128, 127, -101, 101, -0]
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
for dtype in (dtypes.int32, dtypes.int64, dtypes.int8):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_])
output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_])
with self.assertRaisesOpError("scientific and shortest"):
output = string_ops.as_string(input_, scientific=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("scientific and shortest"):
output = string_ops.as_string(input_, shortest=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("precision not supported"):
output = string_ops.as_string(input_, precision=0)
output.eval(feed_dict={input_: int_inputs_})
@test_util.run_deprecated_v1
def testLargeInt(self):
# Cannot use values outside -128..127 for test, because we're also
# testing int8
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
input_ = array_ops.placeholder(dtypes.int32)
int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
input_ = array_ops.placeholder(dtypes.int64)
int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
@test_util.run_deprecated_v1
def testHalfInt(self):
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
input_ = array_ops.placeholder(dtypes.int16)
int_inputs_ = [np.iinfo(np.int16).min, np.iinfo(np.int16).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
@test_util.run_deprecated_v1
def testBool(self):
bool_inputs_ = [False, True]
s = lambda strs: [x.decode("ascii") for x in strs]
with self.cached_session():
for dtype in (dtypes.bool,):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: bool_inputs_})
self.assertAllEqual(s(result), ["false", "true"])
@test_util.run_deprecated_v1
def testComplex(self):
float_inputs_ = [
0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
complex("-INF")
]
complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_]
with self.cached_session():
for dtype in (dtypes.complex64, dtypes.complex128):
input_ = array_ops.placeholder(dtype)
def clean_nans(s_l):
return [s.decode("ascii").replace("-nan", "nan") for s in s_l]
output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03.10f,%03.10f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(
input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03.10g,%03.10g)" % (x.real, x.imag) for x in complex_inputs_])
with self.assertRaisesOpError("Cannot select both"):
output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: complex_inputs_})
if __name__ == "__main__":
test.main()
| apache-2.0 |
javelinanddart/Canuck | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
romain-li/edx-platform | lms/djangoapps/verify_student/tests/test_signals.py | 47 | 1999 | """
Unit tests for the VerificationDeadline signals
"""
from datetime import datetime, timedelta
from pytz import UTC
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from lms.djangoapps.verify_student.models import VerificationDeadline
from lms.djangoapps.verify_student.signals import _listen_for_course_publish
class VerificationDeadlineSignalTest(ModuleStoreTestCase):
"""
Tests for the VerificationDeadline signal
"""
def setUp(self):
super(VerificationDeadlineSignalTest, self).setUp()
self.end = datetime.now(tz=UTC).replace(microsecond=0) + timedelta(days=7)
self.course = CourseFactory.create(end=self.end)
VerificationDeadline.objects.all().delete()
def test_no_deadline(self):
""" Verify the signal sets deadline to course end when no deadline exists."""
_listen_for_course_publish('store', self.course.id)
self.assertEqual(VerificationDeadline.deadline_for_course(self.course.id), self.course.end)
def test_deadline(self):
""" Verify deadline is set to course end date by signal when changed. """
deadline = datetime.now(tz=UTC) - timedelta(days=7)
VerificationDeadline.set_deadline(self.course.id, deadline)
_listen_for_course_publish('store', self.course.id)
self.assertEqual(VerificationDeadline.deadline_for_course(self.course.id), self.course.end)
def test_deadline_explicit(self):
""" Verify deadline is unchanged by signal when explicitly set. """
deadline = datetime.now(tz=UTC) - timedelta(days=7)
VerificationDeadline.set_deadline(self.course.id, deadline, is_explicit=True)
_listen_for_course_publish('store', self.course.id)
actual_deadline = VerificationDeadline.deadline_for_course(self.course.id)
self.assertNotEqual(actual_deadline, self.course.end)
self.assertEqual(actual_deadline, deadline)
| agpl-3.0 |
cogeorg/black_rhino | examples/degroot/networkx/tests/benchmark.py | 37 | 10636 | from timeit import Timer
# This is gratefully modeled after the benchmarks found in
# the numpy svn repository. http://svn.scipy.org/svn/numpy/trunk
class Benchmark(object):
"""
Benchmark a method or simple bit of code using different Graph classes.
If the test code is the same for each graph class, then you can set it
during instantiation through the argument test_string.
The argument test_string can also be a tuple of test code and setup code.
The code is entered as a string valid for use with the timeit module.
Example:
>>> b=Benchmark(['Graph','XGraph'])
>>> b['Graph']=('G.add_nodes_from(nlist)','nlist=range(100)')
>>> b.run()
"""
def __init__(self,graph_classes,title='',test_string=None,runs=3,reps=1000):
self.runs = runs
self.reps = reps
self.title = title
self.class_tests = dict((gc,'') for gc in graph_classes)
# set up the test string if it is the same for all classes.
if test_string is not None:
if isinstance(test_string,tuple):
self['all']=test_string
else:
self['all']=(test_string,'')
def __setitem__(self,graph_class,some_strs):
"""
Set a simple bit of code and setup string for the test.
Use this for cases where the code differs from one class to another.
"""
test_str, setup_str = some_strs
if graph_class == 'all':
graph_class = self.class_tests.keys()
elif not isinstance(graph_class,list):
graph_class = [graph_class]
for GC in graph_class:
setup_string='import networkx as NX\nG=NX.%s.%s()\n'%\
(GC.lower(),GC) + setup_str
self.class_tests[GC] = Timer(test_str, setup_string)
def run(self):
"""Run the benchmark for each class and print results."""
column_len = max(len(G) for G in self.class_tests)
print('='*72)
if self.title:
print("%s: %s runs, %s reps"% (self.title,self.runs,self.reps))
print('='*72)
times=[]
for GC,timer in self.class_tests.items():
name = GC.ljust(column_len)
try:
t=sum(timer.repeat(self.runs,self.reps))/self.runs
# print "%s: %s" % (name, timer.repeat(self.runs,self.reps))
times.append((t,name))
except Exception as e:
print("%s: Failed to benchmark (%s)." % (name,e))
times.sort()
tmin=times[0][0]
for t,name in times:
print("%s: %5.2f %s" % (name, t/tmin*100.,t))
print('-'*72)
print()
if __name__ == "__main__":
# set up for all routines:
classes=['Graph','MultiGraph','DiGraph','MultiDiGraph']
all_tests=['add_nodes','add_edges','remove_nodes','remove_edges',\
'neighbors','edges','degree','dijkstra','shortest path',\
'subgraph','edgedata_subgraph','laplacian']
# Choose which tests to run
tests=all_tests
tests=['subgraph','edgedata_subgraph']
#tests=all_tests[-1:]
N=100
if 'add_nodes' in tests:
title='Benchmark: Adding nodes'
test_string=('G.add_nodes_from(nlist)','nlist=range(%i)'%N)
b=Benchmark(classes,title,test_string,runs=3,reps=1000)
b.run()
if 'add_edges' in tests:
title='Benchmark: Adding edges'
setup='elist=[(i,i+3) for i in range(%s-3)]\nG.add_nodes_from(range(%i))'%(N,N)
test_string=('G.add_edges_from(elist)',setup)
b=Benchmark(classes,title,test_string,runs=3,reps=1000)
b.run()
if 'remove_nodes' in tests:
title='Benchmark: Adding and Deleting nodes'
setup='nlist=range(%i)'%N
test_string=('G.add_nodes_from(nlist)\nG.remove_nodes_from(nlist)',setup)
b=Benchmark(classes,title,test_string,runs=3,reps=1000)
b.run()
if 'remove_edges' in tests:
title='Benchmark: Adding and Deleting edges'
setup='elist=[(i,i+3) for i in range(%s-3)]'%N
test_string=('G.add_edges_from(elist)\nG.remove_edges_from(elist)',setup)
b=Benchmark(classes,title,test_string,runs=3,reps=1000)
b.run()
if 'neighbors' in tests:
N=500
p=0.3
title='Benchmark: reporting neighbors'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='for n in G:\n for nbr in G.neighbors(n):\n pass'
all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)\n'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'edges' in tests:
N=500
p=0.3
title='Benchmark: reporting edges'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='for n in G:\n for e in G.edges(n):\n pass'
all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)\n'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'degree' in tests:
N=500
p=0.3
title='Benchmark: reporting degree'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='for d in G.degree():\n pass'
all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)\n'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'dijkstra' in tests:
N=500
p=0.3
title='dijkstra single source shortest path'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='p=NX.single_source_dijkstra(G,i)'
all_setup='i=6\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'shortest path' in tests:
N=500
p=0.3
title='single source shortest path'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='p=NX.single_source_shortest_path(G,i)'
all_setup='i=6\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'subgraph' in tests:
N=500
p=0.3
title='subgraph method'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='G.subgraph(nlist)'
all_setup='nlist=range(100,150)\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'edgedata_subgraph' in tests:
N=500
p=0.3
title='subgraph method with edge data present'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='G.subgraph(nlist)'
all_setup='nlist=range(100,150)\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v,hi=3)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)],hi=2)'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v,hi=1)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)],hi=2)'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'laplacian' in tests:
N=500
p=0.3
title='creation of laplacian matrix'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='NX.laplacian(G)'
all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
| gpl-3.0 |
allthroughthenight/aces | web2py/scripts/extract_sqlite_models.py | 11 | 3960 | # -*- coding: utf-8 -*-
'''
Create the web2py model code needed to access your sqlite legacy db.
Usage:
python extract_sqlite_models.py
Access your tables with:
legacy_db(legacy_db.mytable.id>0).select()
extract_sqlite_models.py -- Copyright (C) Michele Comitini
This code is distributed with web2py.
The regexp code and the dictionary type map was extended from
extact_mysql_models.py that comes with web2py. extact_mysql_models.py is Copyright (C) Falko Krause.
'''
import re
import sys
import sqlite3
data_type_map = dict(
varchar='string',
int='integer',
integer='integer',
tinyint='integer',
smallint='integer',
mediumint='integer',
bigint='integer',
float='double',
double='double',
char='string',
decimal='integer',
date='date',
time='time',
timestamp='datetime',
datetime='datetime',
binary='blob',
blob='blob',
tinyblob='blob',
mediumblob='blob',
longblob='blob',
text='text',
tinytext='text',
mediumtext='text',
longtext='text',
bit='boolean',
nvarchar='text',
numeric='decimal(30,15)',
real='decimal(30,15)',
)
def get_foreign_keys(sql_lines):
fks = dict()
for line in sql_lines[1:-1]:
hit = re.search(r'FOREIGN\s+KEY\s+\("(\S+)"\)\s+REFERENCES\s+"(\S+)"\s+\("(\S+)"\)', line)
if hit:
fks[hit.group(1)] = hit.groups()[1:]
return fks
def sqlite(database_name):
conn = sqlite3.connect(database_name)
c = conn.cursor()
r = c.execute(r"select name,sql from sqlite_master where type='table' and not name like '\_%' and not lower(name) like 'sqlite_%'")
tables = r.fetchall()
connection_string = "legacy_db = DAL('sqlite://%s')" % database_name.split('/')[-1]
legacy_db_table_web2py_code = []
for table_name, sql_create_stmnt in tables:
if table_name.startswith('_'):
continue
if 'CREATE' in sql_create_stmnt: # check if the table exists
#remove garbage lines from sql statement
sql_lines = sql_create_stmnt.split('\n')
sql_lines = [x for x in sql_lines if not(
x.startswith('--') or x.startswith('/*') or x == '')]
#generate the web2py code from the create statement
web2py_table_code = ''
fields = []
fks = get_foreign_keys(sql_lines)
for line in sql_lines[1:-1]:
if re.search('KEY', line) or re.search('PRIMARY', line) or re.search('"ID"', line) or line.startswith(')'):
continue
hit = re.search(r'\[(\S+)\]\s+(\w+(\(\S+\))?),?( .*)?', line)
if hit is not None:
name, d_type = hit.group(1), hit.group(2)
d_type = re.sub(r'(\w+)\(.*', r'\1', d_type)
name = unicode(re.sub('`', '', name))
if name in fks.keys():
if fks[name][1].lower() == 'id':
field_type = 'reference %s' % (fks[name][0])
else:
field_type = 'reference %s.%s' % (fks[name][0], fks[name][1])
else:
field_type = data_type_map[d_type.lower()]
web2py_table_code += "\n Field('%s','%s')," % (
name, field_type)
web2py_table_code = "legacy_db.define_table('%s',%s\n migrate=False)" % (table_name, web2py_table_code)
legacy_db_table_web2py_code.append(web2py_table_code)
#----------------------------------------
#write the legacy db to file
legacy_db_web2py_code = connection_string + "\n\n"
legacy_db_web2py_code += "\n\n#--------\n".join(
legacy_db_table_web2py_code)
return legacy_db_web2py_code
if len(sys.argv) < 2:
print 'USAGE:\n\n extract_mysql_models.py data_basename\n\n'
else:
print "# -*- coding: utf-8 -*-"
print sqlite(sys.argv[1])
| gpl-3.0 |
TathagataChakraborti/resource-conflicts | PLANROB-2015/py2.5/lib/python2.5/email/mime/image.py | 573 | 1764 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Class representing image/* type MIME documents."""
__all__ = ['MIMEImage']
import imghdr
from email import encoders
from email.mime.nonmultipart import MIMENonMultipart
class MIMEImage(MIMENonMultipart):
"""Class for generating image/* type MIME documents."""
def __init__(self, _imagedata, _subtype=None,
_encoder=encoders.encode_base64, **_params):
"""Create an image/* type MIME document.
_imagedata is a string containing the raw image data. If this data
can be decoded by the standard Python `imghdr' module, then the
subtype will be automatically included in the Content-Type header.
Otherwise, you can specify the specific image subtype via the _subtype
parameter.
_encoder is a function which will perform the actual encoding for
transport of the image data. It takes one argument, which is this
Image instance. It should use get_payload() and set_payload() to
change the payload to the encoded form. It should also add any
Content-Transfer-Encoding or other headers to the message as
necessary. The default encoding is Base64.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
_subtype = imghdr.what(None, _imagedata)
if _subtype is None:
raise TypeError('Could not guess image MIME subtype')
MIMENonMultipart.__init__(self, 'image', _subtype, **_params)
self.set_payload(_imagedata)
_encoder(self)
| mit |
MihaiMoldovanu/ansible | lib/ansible/modules/network/f5/bigip_gtm_datacenter.py | 26 | 11100 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_gtm_datacenter
short_description: Manage Datacenter configuration in BIG-IP
description:
- Manage BIG-IP data center configuration. A data center defines the location
where the physical network components reside, such as the server and link
objects that share the same subnet on the network. This module is able to
manipulate the data center definitions in a BIG-IP
version_added: "2.2"
options:
contact:
description:
- The name of the contact for the data center.
description:
description:
- The description of the data center.
enabled:
description:
- Whether the data center should be enabled. At least one of C(state) and
C(enabled) are required.
choices:
- yes
- no
location:
description:
- The location of the data center.
name:
description:
- The name of the data center.
required: true
state:
description:
- The state of the datacenter on the BIG-IP. When C(present), guarantees
that the data center exists. When C(absent) removes the data center
from the BIG-IP. C(enabled) will enable the data center and C(disabled)
will ensure the data center is disabled. At least one of state and
enabled are required.
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create data center "New York"
bigip_gtm_datacenter:
server: "big-ip"
name: "New York"
location: "222 West 23rd"
delegate_to: localhost
'''
RETURN = '''
contact:
description: The contact that was set on the datacenter
returned: changed
type: string
sample: "admin@root.local"
description:
description: The description that was set for the datacenter
returned: changed
type: string
sample: "Datacenter in NYC"
enabled:
description: Whether the datacenter is enabled or not
returned: changed
type: bool
sample: true
location:
description: The location that is set for the datacenter
returned: changed
type: string
sample: "222 West 23rd"
name:
description: Name of the datacenter being manipulated
returned: changed
type: string
sample: "foo"
'''
try:
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import F5ModuleError, f5_argument_spec
class BigIpGtmDatacenter(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def create(self):
params = dict()
check_mode = self.params['check_mode']
contact = self.params['contact']
description = self.params['description']
location = self.params['location']
name = self.params['name']
partition = self.params['partition']
enabled = self.params['enabled']
# Specifically check for None because a person could supply empty
# values which would technically still be valid
if contact is not None:
params['contact'] = contact
if description is not None:
params['description'] = description
if location is not None:
params['location'] = location
if enabled is not None:
params['enabled'] = True
else:
params['disabled'] = False
params['name'] = name
params['partition'] = partition
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return True
d = self.api.tm.gtm.datacenters.datacenter
d.create(**params)
if not self.exists():
raise F5ModuleError("Failed to create the datacenter")
return True
def read(self):
"""Read information and transform it
The values that are returned by BIG-IP in the f5-sdk can have encoding
attached to them as well as be completely missing in some cases.
Therefore, this method will transform the data from the BIG-IP into a
format that is more easily consumable by the rest of the class and the
parameters that are supported by the module.
"""
p = dict()
name = self.params['name']
partition = self.params['partition']
r = self.api.tm.gtm.datacenters.datacenter.load(
name=name,
partition=partition
)
if hasattr(r, 'servers'):
# Deliberately using sets to suppress duplicates
p['servers'] = set([str(x) for x in r.servers])
if hasattr(r, 'contact'):
p['contact'] = str(r.contact)
if hasattr(r, 'location'):
p['location'] = str(r.location)
if hasattr(r, 'description'):
p['description'] = str(r.description)
if r.enabled:
p['enabled'] = True
else:
p['enabled'] = False
p['name'] = name
return p
def update(self):
changed = False
params = dict()
current = self.read()
check_mode = self.params['check_mode']
contact = self.params['contact']
description = self.params['description']
location = self.params['location']
name = self.params['name']
partition = self.params['partition']
enabled = self.params['enabled']
if contact is not None:
if 'contact' in current:
if contact != current['contact']:
params['contact'] = contact
else:
params['contact'] = contact
if description is not None:
if 'description' in current:
if description != current['description']:
params['description'] = description
else:
params['description'] = description
if location is not None:
if 'location' in current:
if location != current['location']:
params['location'] = location
else:
params['location'] = location
if enabled is not None:
if current['enabled'] != enabled:
if enabled is True:
params['enabled'] = True
params['disabled'] = False
else:
params['disabled'] = True
params['enabled'] = False
if params:
changed = True
if check_mode:
return changed
self.cparams = camel_dict_to_snake_dict(params)
else:
return changed
r = self.api.tm.gtm.datacenters.datacenter.load(
name=name,
partition=partition
)
r.update(**params)
r.refresh()
return True
def delete(self):
params = dict()
check_mode = self.params['check_mode']
params['name'] = self.params['name']
params['partition'] = self.params['partition']
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return True
dc = self.api.tm.gtm.datacenters.datacenter.load(**params)
dc.delete()
if self.exists():
raise F5ModuleError("Failed to delete the datacenter")
return True
def present(self):
changed = False
if self.exists():
changed = self.update()
else:
changed = self.create()
return changed
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def exists(self):
name = self.params['name']
partition = self.params['partition']
return self.api.tm.gtm.datacenters.datacenter.exists(
name=name,
partition=partition
)
def flush(self):
result = dict()
state = self.params['state']
enabled = self.params['enabled']
if state is None and enabled is None:
raise F5ModuleError("Neither 'state' nor 'enabled' set")
try:
if state == "present":
changed = self.present()
# Ensure that this field is not returned to the user since it
# is not a valid parameter to the module.
if 'disabled' in self.cparams:
del self.cparams['disabled']
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
contact=dict(required=False, default=None),
description=dict(required=False, default=None),
enabled=dict(required=False, type='bool', default=None),
location=dict(required=False, default=None),
name=dict(required=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
obj = BigIpGtmDatacenter(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
endlessm/chromium-browser | third_party/catapult/dashboard/dashboard/bug_details_test.py | 1 | 3154 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import unittest
import mock
import webapp2
import webtest
# pylint: disable=unused-import
from dashboard import mock_oauth2_decorator
# pylint: enable=unused-import
from dashboard import bug_details
from dashboard.common import testing_common
from dashboard.services import issue_tracker_service
GET_ISSUE_DATA = {
'owner': {'name': 'sullivan@chromium.org'},
'state': 'open',
'status': 'Untriaged',
'summary': 'Regression in sunspider',
'published': '2017-02-17T23:08:44',
}
GET_COMMENTS_DATA = [
{
'author': 'foo@chromium.org',
'content': 'This is the first comment',
'published': '2017-02-17T09:59:55',
}, {
'author': 'bar@chromium.org',
'content': 'This is the second comment',
'published': '2017-02-17T10:00:0',
}, {
'author': 'bugdroid1@chromium.org',
'content': 'The following revision refers to this bug:\n'
' https://chromium.googlesource.com/chromium/src.git/+/'
'9ac6e6466cc0df7e1a3ad4488c5c8bdc2db4da36\n\n'
'Review-Url: https://codereview.chromium.org/2707483002\n\n',
'published': '2017-02-17T23:08:44',
}
]
class BugDetailsHandlerTest(testing_common.TestCase):
def setUp(self):
super(BugDetailsHandlerTest, self).setUp()
app = webapp2.WSGIApplication([(
'/bug_details', bug_details.BugDetailsHandler)])
self.testapp = webtest.TestApp(app)
# Mocks fetching bugs from issue tracker.
@unittest.skipIf(sys.platform.startswith('linux'), 'oauth2 mock error')
@mock.patch('services.issue_tracker_service.discovery.build',
mock.MagicMock())
@mock.patch.object(
issue_tracker_service.IssueTrackerService, 'GetIssue',
mock.MagicMock(return_value=GET_ISSUE_DATA))
@mock.patch.object(
issue_tracker_service.IssueTrackerService, 'GetIssueComments',
mock.MagicMock(return_value=GET_COMMENTS_DATA))
def testPost(self):
response = self.testapp.post('/bug_details', {'bug_id': '12345'})
self.assertEqual(
'Regression in sunspider',
self.GetJsonValue(response, 'summary'))
self.assertEqual(
'sullivan@chromium.org',
self.GetJsonValue(response, 'owner'))
self.assertEqual(
'2017-02-17T23:08:44',
self.GetJsonValue(response, 'published'))
self.assertEqual(
'open',
self.GetJsonValue(response, 'state'))
self.assertEqual(
'Untriaged',
self.GetJsonValue(response, 'status'))
comments = self.GetJsonValue(response, 'comments')
self.assertEqual(3, len(comments))
self.assertEqual('This is the second comment', comments[1]['content'])
self.assertItemsEqual(
['https://codereview.chromium.org/2707483002'],
self.GetJsonValue(response, 'review_urls'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
michaelpacer/networkx | networkx/algorithms/connectivity/kcutsets.py | 12 | 7220 | # -*- coding: utf-8 -*-
"""
Kanevsky all minimum node k cutsets algorithm.
"""
from operator import itemgetter
import networkx as nx
from .utils import build_auxiliary_node_connectivity
from networkx.algorithms.flow import (
build_residual_network,
edmonds_karp,
shortest_augmenting_path,
)
default_flow_func = edmonds_karp
__author__ = '\n'.join(['Jordi Torrents <jtorrents@milnou.net>'])
__all__ = ['all_node_cuts']
def all_node_cuts(G, k=None, flow_func=None):
r"""Returns all minimum k cutsets of an undirected graph G.
This implementation is based on Kanevsky's algorithm [1]_ for finding all
minimum-size node cut-sets of an undirected graph G; ie the set (or sets)
of nodes of cardinality equal to the node connectivity of G. Thus if
removed, would break G into two or more connected components.
Parameters
----------
G : NetworkX graph
Undirected graph
k : Integer
Node connectivity of the input graph. If k is None, then it is
computed. Default value: None.
flow_func : function
Function to perform the underlying flow computations. Default value
edmonds_karp. This function performs better in sparse graphs with
right tailed degree distributions. shortest_augmenting_path will
perform better in denser graphs.
Returns
-------
cuts : a generator of node cutsets
Each node cutset has cardinality equal to the node connectivity of
the input graph.
Examples
--------
>>> # A two-dimensional grid graph has 4 cutsets of cardinality 2
>>> G = nx.grid_2d_graph(5, 5)
>>> cutsets = list(nx.all_node_cuts(G))
>>> len(cutsets)
4
>>> all(2 == len(cutset) for cutset in cutsets)
True
>>> nx.node_connectivity(G)
2
Notes
-----
This implementation is based on the sequential algorithm for finding all
minimum-size separating vertex sets in a graph [1]_. The main idea is to
compute minimum cuts using local maximum flow computations among a set
of nodes of highest degree and all other non-adjacent nodes in the Graph.
Once we find a minimum cut, we add an edge between the high degree
node and the target node of the local maximum flow computation to make
sure that we will not find that minimum cut again.
See also
--------
node_connectivity
edmonds_karp
shortest_augmenting_path
References
----------
.. [1] Kanevsky, A. (1993). Finding all minimum-size separating vertex
sets in a graph. Networks 23(6), 533--541.
http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract
"""
if not nx.is_connected(G):
raise nx.NetworkXError('Input graph is disconnected.')
# Initialize data structures.
# Keep track of the cuts already computed so we do not repeat them.
seen = []
# Even-Tarjan reduction is what we call auxiliary digraph
# for node connectivity.
H = build_auxiliary_node_connectivity(G)
mapping = H.graph['mapping']
R = build_residual_network(H, 'capacity')
kwargs = dict(capacity='capacity', residual=R)
# Define default flow function
if flow_func is None:
flow_func = default_flow_func
if flow_func is shortest_augmenting_path:
kwargs['two_phase'] = True
# Begin the actual algorithm
# step 1: Find node connectivity k of G
if k is None:
k = nx.node_connectivity(G, flow_func=flow_func)
# step 2:
# Find k nodes with top degree, call it X:
X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]}
# Check if X is a k-node-cutset
if _is_separating_set(G, X):
seen.append(X)
yield X
for x in X:
# step 3: Compute local connectivity flow of x with all other
# non adjacent nodes in G
non_adjacent = set(G) - X - set(G[x])
for v in non_adjacent:
# step 4: compute maximum flow in an Even-Tarjan reduction H of G
# and step:5 build the associated residual network R
R = flow_func(H, '%sB' % mapping[x], '%sA' % mapping[v], **kwargs)
flow_value = R.graph['flow_value']
if flow_value == k:
## Remove saturated edges form the residual network
saturated_edges = [(u, w, d) for (u, w, d) in
R.edges(data=True)
if d['capacity'] == d['flow']]
R.remove_edges_from(saturated_edges)
# step 6: shrink the strongly connected components of
# residual flow network R and call it L
L = nx.condensation(R)
cmap = L.graph['mapping']
# step 7: Compute antichains of L; they map to closed sets in H
# Any edge in H that links a closed set is part of a cutset
for antichain in nx.antichains(L):
# Nodes in an antichain of the condensation graph of
# the residual network map to a closed set of nodes that
# define a node partition of the auxiliary digraph H.
S = {n for n, scc in cmap.items() if scc in antichain}
# Find the cutset that links the node partition (S,~S) in H
cutset = set()
for u in S:
cutset.update((u, w) for w in H[u] if w not in S)
# The edges in H that form the cutset are internal edges
# (ie edges that represent a node of the original graph G)
node_cut = {H.node[n]['id'] for edge in cutset for n in edge}
if len(node_cut) == k:
if node_cut not in seen:
yield node_cut
seen.append(node_cut)
# Add an edge (x, v) to make sure that we do not
# find this cutset again. This is equivalent
# of adding the edge in the input graph
# G.add_edge(x, v) and then regenerate H and R:
# Add edges to the auxiliary digraph.
H.add_edge('%sB' % mapping[x], '%sA' % mapping[v],
capacity=1)
H.add_edge('%sB' % mapping[v], '%sA' % mapping[x],
capacity=1)
# Add edges to the residual network.
R.add_edge('%sB' % mapping[x], '%sA' % mapping[v],
capacity=1)
R.add_edge('%sA' % mapping[v], '%sB' % mapping[x],
capacity=1)
break
# Add again the saturated edges to reuse the residual network
R.add_edges_from(saturated_edges)
def _is_separating_set(G, cut):
"""Assumes that the input graph is connected"""
if len(cut) == len(G) - 1:
return True
H = G.copy()
H.remove_nodes_from(cut)
if nx.is_connected(H):
return False
return True
| bsd-3-clause |
naousse/odoo | addons/sale/wizard/sale_make_invoice.py | 344 | 3410 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_make_invoice(osv.osv_memory):
_name = "sale.make.invoice"
_description = "Sales Make Invoice"
_columns = {
'grouped': fields.boolean('Group the invoices', help='Check the box to group the invoices for the same customers'),
'invoice_date': fields.date('Invoice Date'),
}
_defaults = {
'grouped': False,
'invoice_date': fields.date.context_today,
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
record_id = context and context.get('active_id', False)
order = self.pool.get('sale.order').browse(cr, uid, record_id, context=context)
if order.state == 'draft':
raise osv.except_osv(_('Warning!'), _('You cannot create invoice when sales order is not confirmed.'))
return False
def make_invoices(self, cr, uid, ids, context=None):
order_obj = self.pool.get('sale.order')
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
newinv = []
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
for sale_order in order_obj.browse(cr, uid, context.get(('active_ids'), []), context=context):
if sale_order.state != 'manual':
raise osv.except_osv(_('Warning!'), _("You shouldn't manually invoice the following sale order %s") % (sale_order.name))
order_obj.action_invoice_create(cr, uid, context.get(('active_ids'), []), data['grouped'], date_invoice=data['invoice_date'])
orders = order_obj.browse(cr, uid, context.get(('active_ids'), []), context=context)
for o in orders:
for i in o.invoice_ids:
newinv.append(i.id)
# Dummy call to workflow, will not create another invoice but bind the new invoice to the subflow
order_obj.signal_workflow(cr, uid, [o.id for o in orders if o.order_policy == 'manual'], 'manual_invoice')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['domain'] = "[('id','in', [" + ','.join(map(str, newinv)) + "])]"
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ckprice/bedrock | vendor-local/lib/python/pyquery/pyquery.py | 10 | 38409 | #-*- coding:utf-8 -*-
#
# Copyright (C) 2008 - Olivier Lauzanne <olauzanne@gmail.com>
#
# Distributed under the BSD license, see LICENSE.txt
from .cssselectpatch import selector_to_xpath
from copy import deepcopy
from lxml import etree
import lxml.html
import sys
PY3k = sys.version_info >= (3,)
if PY3k:
from urllib.request import urlopen
from urllib.parse import urlencode
from urllib.parse import urljoin
basestring = (str, bytes)
unicode = str
else:
from urllib2 import urlopen
from urllib import urlencode
from urlparse import urljoin
def func_globals(f):
return f.__globals__ if PY3k else f.func_globals
def func_code(f):
return f.__code__ if PY3k else f.func_code
def fromstring(context, parser=None, custom_parser=None):
"""use html parser if we don't have clean xml
"""
if hasattr(context, 'read') and hasattr(context.read, '__call__'):
meth = 'parse'
else:
meth = 'fromstring'
if custom_parser is None:
if parser is None:
try:
result = getattr(etree, meth)(context)
except etree.XMLSyntaxError:
result = getattr(lxml.html, meth)(context)
if isinstance(result, etree._ElementTree):
return [result.getroot()]
else:
return [result]
elif parser == 'xml':
custom_parser = getattr(etree, meth)
elif parser == 'html':
custom_parser = getattr(lxml.html, meth)
elif parser == 'soup':
from lxml.html import soupparser
custom_parser = getattr(lxml.html.soupparser, meth)
elif parser == 'html_fragments':
custom_parser = lxml.html.fragments_fromstring
else:
ValueError('No such parser: "%s"' % parser)
result = custom_parser(context)
if type(result) is list:
return result
elif isinstance(result, etree._ElementTree):
return [result.getroot()]
else:
return [result]
def callback(func, *args):
return func(*args[:func_code(func).co_argcount])
class NoDefault(object):
def __repr__(self):
"""clean representation in Sphinx"""
return '<NoDefault>'
no_default = NoDefault()
del NoDefault
class FlexibleElement(object):
"""property to allow a flexible api"""
def __init__(self, pget, pset=no_default, pdel=no_default):
self.pget = pget
self.pset = pset
self.pdel = pdel
def __get__(self, instance, klass):
class _element(object):
"""real element to support set/get/del attr and item and js call
style"""
def __call__(prop, *args, **kwargs):
return self.pget(instance, *args, **kwargs)
__getattr__ = __getitem__ = __setattr__ = __setitem__ = __call__
def __delitem__(prop, name):
if self.pdel is not no_default:
return self.pdel(instance, name)
else:
raise NotImplementedError()
__delattr__ = __delitem__
def __repr__(prop):
return '<flexible_element %s>' % self.pget.__name__
return _element()
def __set__(self, instance, value):
if self.pset is not no_default:
self.pset(instance, value)
else:
raise NotImplementedError()
class PyQuery(list):
"""The main class
"""
def __init__(self, *args, **kwargs):
html = None
elements = []
self._base_url = None
self.parser = kwargs.get('parser', None)
if 'parser' in kwargs:
del kwargs['parser']
if len(args) >= 1 and isinstance(args[0], basestring) \
and args[0].startswith('http://'):
kwargs['url'] = args[0]
if len(args) >= 2:
kwargs['data'] = args[1]
args = []
if 'parent' in kwargs:
self._parent = kwargs.pop('parent')
else:
self._parent = no_default
if kwargs:
# specific case to get the dom
if 'filename' in kwargs:
html = open(kwargs['filename'])
elif 'url' in kwargs:
url = kwargs.pop('url')
if 'opener' in kwargs:
opener = kwargs.pop('opener')
html = opener(url)
else:
method = kwargs.get('method')
data = kwargs.get('data')
if type(data) in (dict, list, tuple):
data = urlencode(data)
if isinstance(method, basestring) and method.lower() == 'get' and data:
if '?' not in url:
url += '?'
elif url[-1] not in ('?', '&'):
url += '&'
url += data
data = None
if data and PY3k:
data = data.encode('utf-8')
html = urlopen(url, data)
if not self.parser:
self.parser = 'html'
self._base_url = url
else:
raise ValueError('Invalid keyword arguments %s' % kwargs)
elements = fromstring(html, self.parser)
else:
# get nodes
# determine context and selector if any
selector = context = no_default
length = len(args)
if len(args) == 1:
context = args[0]
elif len(args) == 2:
selector, context = args
else:
raise ValueError("You can't do that." +\
" Please, provide arguments")
# get context
if isinstance(context, basestring):
try:
elements = fromstring(context, self.parser)
except Exception:
raise ValueError(context)
elif isinstance(context, self.__class__):
# copy
elements = context[:]
elif isinstance(context, list):
elements = context
elif isinstance(context, etree._Element):
elements = [context]
# select nodes
if elements and selector is not no_default:
xpath = selector_to_xpath(selector)
results = [tag.xpath(xpath) for tag in elements]
# Flatten the results
elements = []
for r in results:
elements.extend(r)
list.__init__(self, elements)
def __call__(self, *args):
"""return a new PyQuery instance
"""
length = len(args)
if length == 0:
raise ValueError('You must provide at least a selector')
if args[0] == '':
return self.__class__([])
if len(args) == 1 and isinstance(args[0], str) and not args[0].startswith('<'):
args += (self,)
result = self.__class__(*args, **dict(parent=self))
return result
# keep original list api prefixed with _
_append = list.append
_extend = list.extend
# improve pythonic api
def __add__(self, other):
assert isinstance(other, self.__class__)
return self.__class__(self[:] + other[:])
def extend(self, other):
assert isinstance(other, self.__class__)
self._extend(other[:])
def __str__(self):
"""xml representation of current nodes::
>>> xml = PyQuery('<script><![[CDATA[ ]></script>', parser='html_fragments')
>>> print(str(xml))
<script><![[CDATA[ ]></script>
"""
if PY3k:
return ''.join([etree.tostring(e, encoding=str) for e in self])
else:
return ''.join([etree.tostring(e) for e in self])
def __unicode__(self):
"""xml representation of current nodes"""
return unicode('').join([etree.tostring(e, encoding=unicode) for e in self])
def __html__(self):
"""html representation of current nodes::
>>> html = PyQuery('<script><![[CDATA[ ]></script>', parser='html_fragments')
>>> print(html.__html__())
<script><![[CDATA[ ]></script>
"""
return unicode('').join([lxml.html.tostring(e, encoding=unicode) for e in self])
def __repr__(self):
r = []
try:
for el in self:
c = el.get('class')
c = c and '.' + '.'.join(c.split(' ')) or ''
id = el.get('id')
id = id and '#' + id or ''
r.append('<%s%s%s>' % (el.tag, id, c))
return '[' + (', '.join(r)) + ']'
except AttributeError:
if PY3k:
return list.__repr__(self)
else:
for el in self:
if isinstance(el, unicode):
r.append(el.encode('utf-8'))
else:
r.append(el)
return repr(r)
@property
def root(self):
"""return the xml root element
"""
if self._parent is not no_default:
return self._parent.getroottree()
return self[0].getroottree()
@property
def encoding(self):
"""return the xml encoding of the root element
"""
root = self.root
if root is not None:
return self.root.docinfo.encoding
##############
# Traversing #
##############
def _filter_only(self, selector, elements, reverse=False, unique=False):
"""Filters the selection set only, as opposed to also including
descendants.
"""
if selector is None:
results = elements
else:
xpath = selector_to_xpath(selector, 'self::')
results = []
for tag in elements:
results.extend(tag.xpath(xpath))
if reverse:
results.reverse()
if unique:
result_list = results
results = []
for item in result_list:
if not item in results:
results.append(item)
return self.__class__(results, **dict(parent=self))
def parent(self, selector=None):
return self._filter_only(selector, [e.getparent() for e in self if e.getparent() is not None], unique = True)
def prev(self, selector=None):
return self._filter_only(selector, [e.getprevious() for e in self if e.getprevious() is not None])
def next(self, selector=None):
return self._filter_only(selector, [e.getnext() for e in self if e.getnext() is not None])
def _traverse(self, method):
for e in self:
current = getattr(e, method)()
while current is not None:
yield current
current = getattr(current, method)()
def _traverse_parent_topdown(self):
for e in self:
this_list = []
current = e.getparent()
while current is not None:
this_list.append(current)
current = current.getparent()
this_list.reverse()
for j in this_list:
yield j
def _nextAll(self):
return [e for e in self._traverse('getnext')]
def nextAll(self, selector=None):
"""
>>> d = PyQuery('<span><p class="hello">Hi</p><p>Bye</p><img scr=""/></span>')
>>> d('p:last').nextAll()
[<img>]
"""
return self._filter_only(selector, self._nextAll())
def _prevAll(self):
return [e for e in self._traverse('getprevious')]
def prevAll(self, selector=None):
"""
>>> d = PyQuery('<span><p class="hello">Hi</p><p>Bye</p><img scr=""/></span>')
>>> d('p:last').prevAll()
[<p.hello>]
"""
return self._filter_only(selector, self._prevAll(), reverse = True)
def siblings(self, selector=None):
"""
>>> d = PyQuery('<span><p class="hello">Hi</p><p>Bye</p><img scr=""/></span>')
>>> d('.hello').siblings()
[<p>, <img>]
>>> d('.hello').siblings('img')
[<img>]
"""
return self._filter_only(selector, self._prevAll() + self._nextAll())
def parents(self, selector=None):
"""
>>> d = PyQuery('<span><p class="hello">Hi</p><p>Bye</p></span>')
>>> d('p').parents()
[<span>]
>>> d('.hello').parents('span')
[<span>]
>>> d('.hello').parents('p')
[]
"""
return self._filter_only(
selector,
[e for e in self._traverse_parent_topdown()],
unique = True
)
def children(self, selector=None):
"""Filter elements that are direct children of self using optional selector.
>>> d = PyQuery('<span><p class="hello">Hi</p><p>Bye</p></span>')
>>> d
[<span>]
>>> d.children()
[<p.hello>, <p>]
>>> d.children('.hello')
[<p.hello>]
"""
elements = [child for tag in self for child in tag.getchildren()]
return self._filter_only(selector, elements)
def closest(self, selector=None):
"""
>>> d = PyQuery('<div class="hello"><p>This is a <strong class="hello">test</strong></p></div>')
>>> d('strong').closest('div')
[<div.hello>]
>>> d('strong').closest('.hello')
[<strong.hello>]
>>> d('strong').closest('form')
[]
"""
result = []
for current in self:
while current is not None and not self.__class__(current).is_(selector):
current = current.getparent()
if current is not None:
result.append(current)
return self.__class__(result, **dict(parent=self))
def filter(self, selector):
"""Filter elements in self using selector (string or function).
>>> d = PyQuery('<p class="hello">Hi</p><p>Bye</p>')
>>> d('p')
[<p.hello>, <p>]
>>> d('p').filter('.hello')
[<p.hello>]
>>> d('p').filter(lambda i: i == 1)
[<p>]
>>> d('p').filter(lambda i: PyQuery(this).text() == 'Hi')
[<p.hello>]
"""
if not hasattr(selector, '__call__'):
return self._filter_only(selector, self)
else:
elements = []
try:
for i, this in enumerate(self):
func_globals(selector)['this'] = this
if callback(selector, i):
elements.append(this)
finally:
f_globals = func_globals(selector)
if 'this' in f_globals:
del f_globals['this']
return self.__class__(elements, **dict(parent=self))
def not_(self, selector):
"""Return elements that don't match the given selector.
>>> d = PyQuery('<p class="hello">Hi</p><p>Bye</p><div></div>')
>>> d('p').not_('.hello')
[<p>]
"""
exclude = set(self.__class__(selector, self))
return self.__class__([e for e in self if e not in exclude], **dict(parent=self))
def is_(self, selector):
"""Returns True if selector matches at least one current element, else False::
>>> d = PyQuery('<p class="hello">Hi</p><p>Bye</p><div></div>')
>>> d('p').eq(0).is_('.hello')
True
>>> d('p').eq(1).is_('.hello')
False
..
"""
return bool(self.__class__(selector, self))
def find(self, selector):
"""Find elements using selector traversing down from self::
>>> m = '<p><span><em>Whoah!</em></span></p><p><em> there</em></p>'
>>> d = PyQuery(m)
>>> d('p').find('em')
[<em>, <em>]
>>> d('p').eq(1).find('em')
[<em>]
..
"""
xpath = selector_to_xpath(selector)
results = [child.xpath(xpath) for tag in self for child in tag.getchildren()]
# Flatten the results
elements = []
for r in results:
elements.extend(r)
return self.__class__(elements, **dict(parent=self))
def eq(self, index):
"""Return PyQuery of only the element with the provided index::
>>> d = PyQuery('<p class="hello">Hi</p><p>Bye</p><div></div>')
>>> d('p').eq(0)
[<p.hello>]
>>> d('p').eq(1)
[<p>]
>>> d('p').eq(2)
[]
..
"""
# Use slicing to silently handle out of bounds indexes
items = self[index:index+1]
return self.__class__(items, **dict(parent=self))
def each(self, func):
"""apply func on each nodes
"""
try:
for i, element in enumerate(self):
func_globals(func)['this'] = element
if callback(func, i, element) == False:
break
finally:
f_globals = func_globals(func)
if 'this' in f_globals:
del f_globals['this']
return self
def map(self, func):
"""Returns a new PyQuery after transforming current items with func.
func should take two arguments - 'index' and 'element'. Elements can
also be referred to as 'this' inside of func::
>>> d = PyQuery('<p class="hello">Hi there</p><p>Bye</p><br />')
>>> d('p').map(lambda i, e: PyQuery(e).text())
['Hi there', 'Bye']
>>> d('p').map(lambda i, e: len(PyQuery(this).text()))
[8, 3]
>>> d('p').map(lambda i, e: PyQuery(this).text().split())
['Hi', 'there', 'Bye']
"""
items = []
try:
for i, element in enumerate(self):
func_globals(func)['this'] = element
result = callback(func, i, element)
if result is not None:
if not isinstance(result, list):
items.append(result)
else:
items.extend(result)
finally:
f_globals = func_globals(func)
if 'this' in f_globals:
del f_globals['this']
return self.__class__(items, **dict(parent=self))
@property
def length(self):
return len(self)
def size(self):
return len(self)
def end(self):
"""Break out of a level of traversal and return to the parent level.
>>> m = '<p><span><em>Whoah!</em></span></p><p><em> there</em></p>'
>>> d = PyQuery(m)
>>> d('p').eq(1).find('em').end().end()
[<p>, <p>]
"""
return self._parent
##############
# Attributes #
##############
def attr(self, *args, **kwargs):
"""Attributes manipulation
"""
mapping = {'class_': 'class', 'for_': 'for'}
attr = value = no_default
length = len(args)
if length == 1:
attr = args[0]
attr = mapping.get(attr, attr)
elif length == 2:
attr, value = args
attr = mapping.get(attr, attr)
elif kwargs:
attr = {}
for k, v in kwargs.items():
attr[mapping.get(k, k)] = v
else:
raise ValueError('Invalid arguments %s %s' % (args, kwargs))
if not self:
return None
elif isinstance(attr, dict):
for tag in self:
for key, value in attr.items():
tag.set(key, value)
elif value is no_default:
return self[0].get(attr)
elif value is None or value == '':
return self.removeAttr(attr)
else:
for tag in self:
tag.set(attr, value)
return self
def removeAttr(self, name):
"""Remove an attribute::
>>> d = PyQuery('<div id="myid"></div>')
>>> d.removeAttr('id')
[<div>]
..
"""
for tag in self:
del tag.attrib[name]
return self
attr = FlexibleElement(pget=attr, pdel=removeAttr)
#######
# CSS #
#######
def height(self, value=no_default):
"""set/get height of element
"""
return self.attr('height', value)
def width(self, value=no_default):
"""set/get width of element
"""
return self.attr('width', value)
def hasClass(self, name):
"""Return True if element has class::
>>> d = PyQuery('<div class="myclass"></div>')
>>> d.hasClass('myclass')
True
..
"""
return self.is_('.%s' % name)
def addClass(self, value):
"""Add a css class to elements::
>>> d = PyQuery('<div></div>')
>>> d.addClass('myclass')
[<div.myclass>]
..
"""
for tag in self:
values = value.split(' ')
classes = set((tag.get('class') or '').split())
classes = classes.union(values)
classes.difference_update([''])
tag.set('class', ' '.join(classes))
return self
def removeClass(self, value):
"""Remove a css class to elements::
>>> d = PyQuery('<div class="myclass"></div>')
>>> d.removeClass('myclass')
[<div>]
..
"""
for tag in self:
values = value.split(' ')
classes = set((tag.get('class') or '').split())
classes.difference_update(values)
classes.difference_update([''])
tag.set('class', ' '.join(classes))
return self
def toggleClass(self, value):
"""Toggle a css class to elements
>>> d = PyQuery('<div></div>')
>>> d.toggleClass('myclass')
[<div.myclass>]
"""
for tag in self:
values = set(value.split(' '))
classes = set((tag.get('class') or '').split())
values_to_add = values.difference(classes)
classes.difference_update(values)
classes = classes.union(values_to_add)
classes.difference_update([''])
tag.set('class', ' '.join(classes))
return self
def css(self, *args, **kwargs):
"""css attributes manipulation
"""
attr = value = no_default
length = len(args)
if length == 1:
attr = args[0]
elif length == 2:
attr, value = args
elif kwargs:
attr = kwargs
else:
raise ValueError('Invalid arguments %s %s' % (args, kwargs))
if isinstance(attr, dict):
for tag in self:
stripped_keys = [key.strip().replace('_', '-')
for key in attr.keys()]
current = [el.strip()
for el in (tag.get('style') or '').split(';')
if el.strip()
and not el.split(':')[0].strip() in stripped_keys]
for key, value in attr.items():
key = key.replace('_', '-')
current.append('%s: %s' % (key, value))
tag.set('style', '; '.join(current))
elif isinstance(value, basestring):
attr = attr.replace('_', '-')
for tag in self:
current = [el.strip()
for el in (tag.get('style') or '').split(';')
if el.strip()
and not el.split(':')[0].strip() == attr.strip()]
current.append('%s: %s' % (attr, value))
tag.set('style', '; '.join(current))
return self
css = FlexibleElement(pget=css, pset=css)
###################
# CORE UI EFFECTS #
###################
def hide(self):
"""remove display:none to elements style
>>> print(PyQuery('<div style="display:none;"/>').hide())
<div style="display: none"/>
"""
return self.css('display', 'none')
def show(self):
"""add display:block to elements style
>>> print(PyQuery('<div />').show())
<div style="display: block"/>
"""
return self.css('display', 'block')
########
# HTML #
########
def val(self, value=no_default):
"""Set the attribute value::
>>> d = PyQuery('<input />')
>>> d.val('Youhou')
[<input>]
Get the attribute value::
>>> d.val()
'Youhou'
"""
return self.attr('value', value)
def html(self, value=no_default):
"""Get or set the html representation of sub nodes.
Get the text value::
>>> d = PyQuery('<div><span>toto</span></div>')
>>> print(d.html())
<span>toto</span>
Set the text value::
>>> d.html('<span>Youhou !</span>')
[<div>]
>>> print(d)
<div><span>Youhou !</span></div>
"""
if value is no_default:
if not self:
return None
tag = self[0]
children = tag.getchildren()
if not children:
return tag.text
html = tag.text or ''
html += unicode('').join([etree.tostring(e, encoding=unicode) for e in children])
return html
else:
if isinstance(value, self.__class__):
new_html = unicode(value)
elif isinstance(value, basestring):
new_html = value
elif not value:
new_html = ''
else:
raise ValueError(type(value))
for tag in self:
for child in tag.getchildren():
tag.remove(child)
root = fromstring(unicode('<root>') + new_html + unicode('</root>'), self.parser)[0]
children = root.getchildren()
if children:
tag.extend(children)
tag.text = root.text
tag.tail = root.tail
return self
def outerHtml(self):
"""Get the html representation of the first selected element::
>>> d = PyQuery('<div><span class="red">toto</span> rocks</div>')
>>> print(d('span'))
<span class="red">toto</span> rocks
>>> print(d('span').outerHtml())
<span class="red">toto</span>
>>> S = PyQuery('<p>Only <b>me</b> & myself</p>')
>>> print(S('b').outerHtml())
<b>me</b>
..
"""
if not self:
return None
e0 = self[0]
if e0.tail:
e0 = deepcopy(e0)
e0.tail = ''
return lxml.html.tostring(e0, encoding=unicode)
def text(self, value=no_default):
"""Get or set the text representation of sub nodes.
Get the text value::
>>> doc = PyQuery('<div><span>toto</span><span>tata</span></div>')
>>> print(doc.text())
toto tata
Set the text value::
>>> doc.text('Youhou !')
[<div>]
>>> print(doc)
<div>Youhou !</div>
"""
if value is no_default:
if not self:
return None
text = []
def add_text(tag, no_tail=False):
if tag.text:
text.append(tag.text)
for child in tag.getchildren():
add_text(child)
if not no_tail and tag.tail:
text.append(tag.tail)
for tag in self:
add_text(tag, no_tail=True)
return ' '.join([t.strip() for t in text if t.strip()])
for tag in self:
for child in tag.getchildren():
tag.remove(child)
tag.text = value
return self
################
# Manipulating #
################
def _get_root(self, value):
if isinstance(value, basestring):
root = fromstring(unicode('<root>') + value + unicode('</root>'), self.parser)[0]
elif isinstance(value, etree._Element):
root = self.__class__(value)
elif isinstance(value, PyQuery):
root = value
else:
raise TypeError(
'Value must be string, PyQuery or Element. Got %r' % value)
if hasattr(root, 'text') and isinstance(root.text, basestring):
root_text = root.text
else:
root_text = ''
return root, root_text
def append(self, value):
"""append value to each nodes
"""
root, root_text = self._get_root(value)
for i, tag in enumerate(self):
if len(tag) > 0: # if the tag has children
last_child = tag[-1]
if not last_child.tail:
last_child.tail = ''
last_child.tail += root_text
else:
if not tag.text:
tag.text = ''
tag.text += root_text
if i > 0:
root = deepcopy(list(root))
tag.extend(root)
root = tag[-len(root):]
return self
def appendTo(self, value):
"""append nodes to value
"""
value.append(self)
return self
def prepend(self, value):
"""prepend value to nodes
"""
root, root_text = self._get_root(value)
for i, tag in enumerate(self):
if not tag.text:
tag.text = ''
if len(root) > 0:
root[-1].tail = tag.text
tag.text = root_text
else:
tag.text = root_text + tag.text
if i > 0:
root = deepcopy(list(root))
tag[:0] = root
root = tag[:len(root)]
return self
def prependTo(self, value):
"""prepend nodes to value
"""
value.prepend(self)
return self
def after(self, value):
"""add value after nodes
"""
root, root_text = self._get_root(value)
for i, tag in enumerate(self):
if not tag.tail:
tag.tail = ''
tag.tail += root_text
if i > 0:
root = deepcopy(list(root))
parent = tag.getparent()
index = parent.index(tag) + 1
parent[index:index] = root
root = parent[index:len(root)]
return self
def insertAfter(self, value):
"""insert nodes after value
"""
value.after(self)
return self
def before(self, value):
"""insert value before nodes
"""
root, root_text = self._get_root(value)
for i, tag in enumerate(self):
previous = tag.getprevious()
if previous != None:
if not previous.tail:
previous.tail = ''
previous.tail += root_text
else:
parent = tag.getparent()
if not parent.text:
parent.text = ''
parent.text += root_text
if i > 0:
root = deepcopy(list(root))
parent = tag.getparent()
index = parent.index(tag)
parent[index:index] = root
root = parent[index:len(root)]
return self
def insertBefore(self, value):
"""insert nodes before value
"""
value.before(self)
return self
def wrap(self, value):
"""A string of HTML that will be created on the fly and wrapped around
each target::
>>> d = PyQuery('<span>youhou</span>')
>>> d.wrap('<div></div>')
[<div>]
>>> print(d)
<div><span>youhou</span></div>
"""
assert isinstance(value, basestring)
value = fromstring(value)[0]
nodes = []
for tag in self:
wrapper = deepcopy(value)
# FIXME: using iterchildren is probably not optimal
if not wrapper.getchildren():
wrapper.append(deepcopy(tag))
else:
childs = [c for c in wrapper.iterchildren()]
child = childs[-1]
child.append(deepcopy(tag))
nodes.append(wrapper)
parent = tag.getparent()
if parent is not None:
for t in parent.iterchildren():
if t is tag:
t.addnext(wrapper)
parent.remove(t)
break
self[:] = nodes
return self
def wrapAll(self, value):
"""Wrap all the elements in the matched set into a single wrapper element::
>>> d = PyQuery('<div><span>Hey</span><span>you !</span></div>')
>>> print(d('span').wrapAll('<div id="wrapper"></div>'))
<div id="wrapper"><span>Hey</span><span>you !</span></div>
..
"""
if not self:
return self
assert isinstance(value, basestring)
value = fromstring(value)[0]
wrapper = deepcopy(value)
if not wrapper.getchildren():
child = wrapper
else:
childs = [c for c in wrapper.iterchildren()]
child = childs[-1]
replace_childs = True
parent = self[0].getparent()
if parent is None:
parent = no_default
# add nodes to wrapper and check parent
for tag in self:
child.append(deepcopy(tag))
if tag.getparent() is not parent:
replace_childs = False
# replace nodes i parent if possible
if parent is not no_default and replace_childs:
childs = [c for c in parent.iterchildren()]
if len(childs) == len(self):
for tag in self:
parent.remove(tag)
parent.append(wrapper)
self[:] = [wrapper]
return self
def replaceWith(self, value):
"""replace nodes by value
"""
if hasattr(value, '__call__'):
for i, element in enumerate(self):
self.__class__(element).before(value(i, element) + (element.tail or ''))
parent = element.getparent()
parent.remove(element)
else:
for tag in self:
self.__class__(tag).before(value + (tag.tail or ''))
parent = tag.getparent()
parent.remove(tag)
return self
def replaceAll(self, expr):
"""replace nodes by expr
"""
if self._parent is no_default:
raise ValueError(
'replaceAll can only be used with an object with parent')
self._parent(expr).replaceWith(self)
return self
def clone(self):
"""return a copy of nodes
"""
self[:] = [deepcopy(tag) for tag in self]
return self
def empty(self):
"""remove nodes content
"""
for tag in self:
tag.text = None
tag[:] = []
return self
def remove(self, expr=no_default):
"""remove nodes
>>> d = PyQuery('<div>Maybe <em>she</em> does <strong>NOT</strong> know</div>')
>>> d('strong').remove()
[<strong>]
>>> print(d)
<div>Maybe <em>she</em> does know</div>
"""
if expr is no_default:
for tag in self:
parent = tag.getparent()
if parent is not None:
if tag.tail:
prev = tag.getprevious()
if prev is None:
if not parent.text:
parent.text = ''
parent.text += ' ' + tag.tail
else:
if not prev.tail:
prev.tail = ''
prev.tail += ' ' + tag.tail
parent.remove(tag)
else:
results = self.__class__(expr, self)
results.remove()
return self
class Fn(object):
"""Hook for defining custom function (like the jQuery.fn)
>>> PyQuery.fn.listOuterHtml = lambda: this.map(lambda i, el: PyQuery(this).outerHtml())
>>> S = PyQuery('<ol> <li>Coffee</li> <li>Tea</li> <li>Milk</li> </ol>')
>>> S('li').listOuterHtml()
['<li>Coffee</li>', '<li>Tea</li>', '<li>Milk</li>']
"""
def __setattr__(self, name, func):
def fn(self, *args):
func_globals(func)['this'] = self
return func(*args)
fn.__name__ = name
setattr(PyQuery, name, fn)
fn = Fn()
#####################################################
# Additional methods that are not in the jQuery API #
#####################################################
@property
def base_url(self):
"""Return the url of current html document or None if not available.
"""
if self._base_url is not None:
return self._base_url
if self._parent is not no_default:
return self._parent.base_url
def make_links_absolute(self, base_url=None):
"""Make all links absolute.
"""
if base_url is None:
base_url = self.base_url
if base_url is None:
raise ValueError('You need a base URL to make your links'
'absolute. It can be provided by the base_url parameter.')
self('a').each(lambda: self(this).attr('href', urljoin(base_url, self(this).attr('href'))))
return self
| mpl-2.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py | 1009 | 2281 | import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| apache-2.0 |
waynechu/PythonProject | dns/rdtypes/IN/AAAA.py | 2 | 2015 | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.inet
import dns.rdata
import dns.tokenizer
class AAAA(dns.rdata.Rdata):
"""AAAA record.
@ivar address: an IPv6 address
@type address: string (in the standard IPv6 format)"""
__slots__ = ['address']
def __init__(self, rdclass, rdtype, address):
super(AAAA, self).__init__(rdclass, rdtype)
# check that it's OK
dns.inet.inet_pton(dns.inet.AF_INET6, address)
self.address = address
def to_text(self, origin=None, relativize=True, **kw):
return self.address
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
address = tok.get_identifier()
tok.get_eol()
return cls(rdclass, rdtype, address)
def to_wire(self, file, compress=None, origin=None):
file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.address))
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
address = dns.inet.inet_ntop(dns.inet.AF_INET6,
wire[current: current + rdlen])
return cls(rdclass, rdtype, address)
| mit |
shakhat/os-failures | os_failures/api/cloud_management.py | 1 | 1457 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class CloudManagement(object):
def __init__(self):
self.power_management = None
def set_power_management(self, power_management):
self.power_management = power_management
@abc.abstractmethod
def verify(self):
"""Verify connection to the cloud.
"""
pass
@abc.abstractmethod
def get_nodes(self, fqdns=None):
"""Get nodes in the cloud
This function returns NodesCollection representing all nodes in the
cloud or only those that has specified FQDNs.
:param fqdns list of FQDNs or None to retrieve all nodes
:return: NodesCollection
"""
pass
@abc.abstractmethod
def get_service(self, name):
"""Get service with specified name
:param name: name of the serives
:return: Service
"""
pass
| apache-2.0 |
Endika/edx-platform | openedx/tests/xblock_integration/test_done.py | 60 | 3745 | '''
Tests for the DoneXBlock.
This is nice as a simple example of the edX XBlock test framework.
'''
from openedx.tests.xblock_integration.xblock_testcase import XBlockTestCase
# pylint: disable=abstract-method
class TestDone(XBlockTestCase):
"""
Simple tests for the completion XBlock. We set up a page with two
of the block, make sure the page renders, toggle them a few times,
make sure they've toggled, and reconfirm the page renders.
"""
olx_scenarios = { # Currently not used
"two_done_block_test_case": """<vertical>
<done urlname="done0"/>
<done urlname="done1"/>
</vertical>"""
}
# This is a stop-gap until we can load OLX and/or OLX from
# normal workbench scenarios
test_configuration = [
{
"urlname": "two_done_block_test_case_0",
#"olx": self.olx_scenarios[0],
"xblocks": [ # Stopgap until we handle OLX
{
'blocktype': 'done',
'urlname': 'done_0'
}
]
},
{
"urlname": "two_done_block_test_case_1",
#"olx": self.olx_scenarios[0],
"xblocks": [ # Stopgap until we handle OLX
{
'blocktype': 'done',
'urlname': 'done_1'
}
]
}
]
def toggle_button(self, block, data, desired_state):
"""
Make an AJAX call to the XBlock, and assert the state is as
desired.
"""
resp = self.ajax('toggle_button', block, data)
self.assertEqual(resp.status_code, 200)
# pylint: disable=no-member
self.assertEqual(resp.data, {"state": desired_state})
# pylint: disable=unused-argument
def check_response(self, block_urlname, rendering):
"""
Confirm that we have a 200 response code (no server error)
In the future, visual diff test the response.
"""
response = self.render_block(block_urlname)
self.assertEqual(response.status_code, 200)
# To do: Below method needs to be implemented
#self.assertXBlockScreenshot(block_urlname, rendering)
def test_done(self):
"""
Walk through a few toggles. Make sure the blocks don't mix up
state between them, initial state is correct, and final state
is correct.
"""
# We confirm we don't have errors rendering the student view
self.check_response('done_0', 'done-unmarked')
self.check_response('done_1', 'done-unmarked')
# We confirm the block is initially false
self.toggle_button('done_0', {}, False)
self.reset_published_events()
self.toggle_button('done_1', {}, False)
self.assert_no_events_published("edx.done.toggled")
# We confirm we can toggle state both ways
self.reset_published_events()
self.toggle_button('done_0', {'done': True}, True)
self.assert_event_published('edx.done.toggled', event_fields={"done": True})
self.reset_published_events()
self.toggle_button('done_1', {'done': False}, False)
self.assert_event_published('edx.done.toggled', event_fields={"done": False})
self.toggle_button('done_0', {'done': False}, False)
self.assert_grade(0)
self.toggle_button('done_1', {'done': True}, True)
self.assert_grade(1)
# We confirm state sticks around
self.toggle_button('done_0', {}, False)
self.toggle_button('done_1', {}, True)
# And confirm we render correctly
self.check_response('done_0', 'done-unmarked')
self.check_response('done_1', 'done-marked')
| agpl-3.0 |
lnawrot/traffic-simulator | site_scons/site_tools/qt5/test/basic/CPPPATH/CPPPATH/sconstest-CPPPATH.py | 2 | 1765 | #!/usr/bin/env python
#
# Copyright (c) 2001-2010,2011,2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Test that CPPPATH works with generated files.
In the SConscript we really add the necessary path, such that
the compile run is successful. See also the accompanying test
that is supposed to fail.
"""
import os.path
import TestSCons
test = TestSCons.TestSCons()
test.dir_fixture('image')
test.file_fixture('SConscript-after','SConscript')
test.file_fixture('../../../qtenv.py')
test.file_fixture('../../../../__init__.py','site_scons/site_tools/qt5/__init__.py')
test.run(stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
jAlpedrinha/DeclRY | declry/templatetags/view_tag.py | 2 | 3969 | # _*_ encoding: utf-8 _*_
from copy import copy
from django.template import Library, loader, Context
from django.contrib.admin.templatetags.admin_static import static
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.core.serializers import serialize
from django.utils.safestring import mark_safe
from django.db.models.query import QuerySet
from django.utils import simplejson
from django.core.urlresolvers import reverse
register = Library()
@register.simple_tag(takes_context=True)
def instance_card(context, instance=None):
new_context= copy(context)
if not instance:
instance = context['instance']
else:
new_context['instance'] = instance
templates = ['{0}/{1}/{1}_card.html'.format(instance._meta.app_label,instance._meta.module_name), 'card.html']
t = loader.select_template(templates)
return t.render(new_context)
def get_edit_url(instance):
try:
return reverse('%s_%s_edit_foto' % (instance._meta.app_label, instance._meta.module_name),
args=(instance.pk,))
except:
return None
def has_edit_perm(user, instance):
return user.has_perm('{}_{}_{}'.format(instance._meta.app_label, instance._meta.module_name,'edit_foto'), obj= instance)
@register.simple_tag(takes_context=True)
def instance_photo(context, size= 64, instance= None, edit_foto = False):
no_edit_image_format = u'<img width="{2}" src="{0}" alt="{1}" />'
edit_image_format = u"""
<div class="inst-image" >
<img width="{2}" src="{0}" alt="{1}" />
<div class="foto-edit">
<a href="{3}">
<i class="icon-edit"></i>{4}
</a>
</div>
</div>
"""
if not instance:
instance = context['instance']
# user = None
# if 'request' in context:
# user = context['request'].user
# if user and edit_foto and has_edit_perm(user, instance):
# image_format = edit_image_format
# edit_url = get_edit_url(instance)
# else:
image_format = no_edit_image_format
edit_url = None
if hasattr(instance,'foto'):
if instance.foto:
url = instance.foto.url
else:
url = static("img/empty.png")
else:
if instance.icon:
return format_html(u'<div class="inst-icon" ><i class="icon-4x {}"> </i></div>',instance.icon)
else:
url = static("img/school.png")
return format_html(image_format, url, force_text(instance),size,edit_url,_("Edit"))
@register.simple_tag (takes_context=True)
def get_module_name(context):
instance = context['instance']
return instance._meta.verbose_name
@register.filter
def display_string(name):
return ' '.join([capfirst(x) for x in name.split('_')])
@register.filter
def ellipsis(value, limit=10):
try:
limit = int(limit)
except ValueError:
return value
if not isinstance(value, unicode):
value = unicode(value)
if len(value) <= limit:
return value
try:
value = u'{}{}'.format(value[:limit],'...')
except Exception as e:
return e.args
return value
@register.filter
def cap_letters(value):
return ''.join(c for c in value if c.isupper())
@register.filter
def dict_get(value, arg):
return value[arg]
@register.filter
def jsonify(obj):
if isinstance(obj, QuerySet):
return serialize('json', obj)
return mark_safe(simplejson.dumps(obj))
@register.filter
def simple_timeframe(value):
value = unicode(value)
return value.split('-')[0]
@register.filter
def can_edit(value, user):
if hasattr(user, 'roles'):
return value.has_instance_perm(user, "edit")
return False
@register.filter
def getattr(obj, attr):
if hasattr(obj,attr):
return getattr(obj,attr) | bsd-3-clause |
timokoola/finnkinotxt | botocore/vendored/requests/packages/chardet/escsm.py | 2930 | 7839 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
HZ_cls = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_st = (
eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17
5,eError, 6,eError, 5, 5, 4,eError,# 18-1f
4,eError, 4, 4, 4,eError, 4,eError,# 20-27
4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f
)
HZCharLenTable = (0, 0, 0, 0, 0, 0)
HZSMModel = {'classTable': HZ_cls,
'classFactor': 6,
'stateTable': HZ_st,
'charLenTable': HZCharLenTable,
'name': "HZ-GB-2312"}
ISO2022CN_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27
5, 6,eError,eError,eError,eError,eError,eError,# 28-2f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37
eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f
)
ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CNSMModel = {'classTable': ISO2022CN_cls,
'classFactor': 9,
'stateTable': ISO2022CN_st,
'charLenTable': ISO2022CNCharLenTable,
'name': "ISO-2022-CN"}
ISO2022JP_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f
eError, 5,eError,eError,eError, 4,eError,eError,# 20-27
eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f
eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47
)
ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JPSMModel = {'classTable': ISO2022JP_cls,
'classFactor': 10,
'stateTable': ISO2022JP_st,
'charLenTable': ISO2022JPCharLenTable,
'name': "ISO-2022-JP"}
ISO2022KR_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_st = (
eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17
eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f
eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27
)
ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0)
ISO2022KRSMModel = {'classTable': ISO2022KR_cls,
'classFactor': 6,
'stateTable': ISO2022KR_st,
'charLenTable': ISO2022KRCharLenTable,
'name': "ISO-2022-KR"}
# flake8: noqa
| apache-2.0 |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/xdg/Menu.py | 2 | 36309 | """
Implementation of the XDG Menu Specification Version 1.0.draft-1
http://standards.freedesktop.org/menu-spec/
"""
from __future__ import generators
import locale, os, xml.dom.minidom
from xdg.BaseDirectory import *
from xdg.DesktopEntry import *
from xdg.Exceptions import *
import xdg.Locale
import xdg.Config
ELEMENT_NODE = xml.dom.Node.ELEMENT_NODE
# for python <= 2.3
try:
reversed = reversed
except NameError:
def reversed(x):
return x[::-1]
class Menu:
def __init__(self):
# Public stuff
self.Name = ""
self.Directory = None
self.Entries = []
self.Doc = ""
self.Filename = ""
self.Depth = 0
self.Parent = None
self.NotInXml = False
# Can be one of Deleted/NoDisplay/Hidden/Empty/NotShowIn or True
self.Show = True
self.Visible = 0
# Private stuff, only needed for parsing
self.AppDirs = []
self.DefaultLayout = None
self.Deleted = "notset"
self.Directories = []
self.DirectoryDirs = []
self.Layout = None
self.MenuEntries = []
self.Moves = []
self.OnlyUnallocated = "notset"
self.Rules = []
self.Submenus = []
def __str__(self):
return self.Name
def __add__(self, other):
for dir in other.AppDirs:
self.AppDirs.append(dir)
for dir in other.DirectoryDirs:
self.DirectoryDirs.append(dir)
for directory in other.Directories:
self.Directories.append(directory)
if other.Deleted != "notset":
self.Deleted = other.Deleted
if other.OnlyUnallocated != "notset":
self.OnlyUnallocated = other.OnlyUnallocated
if other.Layout:
self.Layout = other.Layout
if other.DefaultLayout:
self.DefaultLayout = other.DefaultLayout
for rule in other.Rules:
self.Rules.append(rule)
for move in other.Moves:
self.Moves.append(move)
for submenu in other.Submenus:
self.addSubmenu(submenu)
return self
# FIXME: Performance: cache getName()
def __cmp__(self, other):
return locale.strcoll(self.getName(), other.getName())
def __eq__(self, other):
if self.Name == str(other):
return True
else:
return False
""" PUBLIC STUFF """
def getEntries(self, hidden=False):
for entry in self.Entries:
if hidden == True:
yield entry
elif entry.Show == True:
yield entry
# FIXME: Add searchEntry/seaqrchMenu function
# search for name/comment/genericname/desktopfileide
# return multiple items
def getMenuEntry(self, desktopfileid, deep = False):
for menuentry in self.MenuEntries:
if menuentry.DesktopFileID == desktopfileid:
return menuentry
if deep == True:
for submenu in self.Submenus:
submenu.getMenuEntry(desktopfileid, deep)
def getMenu(self, path):
array = path.split("/", 1)
for submenu in self.Submenus:
if submenu.Name == array[0]:
if len(array) > 1:
return submenu.getMenu(array[1])
else:
return submenu
def getPath(self, org=False, toplevel=False):
parent = self
names=[]
while 1:
if org:
names.append(parent.Name)
else:
names.append(parent.getName())
if parent.Depth > 0:
parent = parent.Parent
else:
break
names.reverse()
path = ""
if toplevel == False:
names.pop(0)
for name in names:
path = os.path.join(path, name)
return path
def getName(self):
try:
return self.Directory.DesktopEntry.getName()
except AttributeError:
return self.Name
def getGenericName(self):
try:
return self.Directory.DesktopEntry.getGenericName()
except AttributeError:
return ""
def getComment(self):
try:
return self.Directory.DesktopEntry.getComment()
except AttributeError:
return ""
def getIcon(self):
try:
return self.Directory.DesktopEntry.getIcon()
except AttributeError:
return ""
""" PRIVATE STUFF """
def addSubmenu(self, newmenu):
for submenu in self.Submenus:
if submenu == newmenu:
submenu += newmenu
break
else:
self.Submenus.append(newmenu)
newmenu.Parent = self
newmenu.Depth = self.Depth + 1
class Move:
"A move operation"
def __init__(self, node=None):
if node:
self.parseNode(node)
else:
self.Old = ""
self.New = ""
def __cmp__(self, other):
return cmp(self.Old, other.Old)
def parseNode(self, node):
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if child.tagName == "Old":
try:
self.parseOld(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Old cannot be empty', '??')
elif child.tagName == "New":
try:
self.parseNew(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('New cannot be empty', '??')
def parseOld(self, value):
self.Old = value
def parseNew(self, value):
self.New = value
class Layout:
"Menu Layout class"
def __init__(self, node=None):
self.order = []
if node:
self.show_empty = node.getAttribute("show_empty") or "false"
self.inline = node.getAttribute("inline") or "false"
self.inline_limit = node.getAttribute("inline_limit") or 4
self.inline_header = node.getAttribute("inline_header") or "true"
self.inline_alias = node.getAttribute("inline_alias") or "false"
self.inline_limit = int(self.inline_limit)
self.parseNode(node)
else:
self.show_empty = "false"
self.inline = "false"
self.inline_limit = 4
self.inline_header = "true"
self.inline_alias = "false"
self.order.append(["Merge", "menus"])
self.order.append(["Merge", "files"])
def parseNode(self, node):
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if child.tagName == "Menuname":
try:
self.parseMenuname(
child.childNodes[0].nodeValue,
child.getAttribute("show_empty") or "false",
child.getAttribute("inline") or "false",
child.getAttribute("inline_limit") or 4,
child.getAttribute("inline_header") or "true",
child.getAttribute("inline_alias") or "false" )
except IndexError:
raise ValidationError('Menuname cannot be empty', "")
elif child.tagName == "Separator":
self.parseSeparator()
elif child.tagName == "Filename":
try:
self.parseFilename(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Filename cannot be empty', "")
elif child.tagName == "Merge":
self.parseMerge(child.getAttribute("type") or "all")
def parseMenuname(self, value, empty="false", inline="false", inline_limit=4, inline_header="true", inline_alias="false"):
self.order.append(["Menuname", value, empty, inline, inline_limit, inline_header, inline_alias])
self.order[-1][4] = int(self.order[-1][4])
def parseSeparator(self):
self.order.append(["Separator"])
def parseFilename(self, value):
self.order.append(["Filename", value])
def parseMerge(self, type="all"):
self.order.append(["Merge", type])
class Rule:
"Inlcude / Exclude Rules Class"
def __init__(self, type, node=None):
# Type is Include or Exclude
self.Type = type
# Rule is a python expression
self.Rule = ""
# Private attributes, only needed for parsing
self.Depth = 0
self.Expr = [ "or" ]
self.New = True
# Begin parsing
if node:
self.parseNode(node)
self.compile()
def __str__(self):
return self.Rule
def compile(self):
exec("""
def do(menuentries, type, run):
for menuentry in menuentries:
if run == 2 and ( menuentry.MatchedInclude == True \
or menuentry.Allocated == True ):
continue
elif %s:
if type == "Include":
menuentry.Add = True
menuentry.MatchedInclude = True
else:
menuentry.Add = False
return menuentries
""" % self.Rule) in self.__dict__
def parseNode(self, node):
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if child.tagName == 'Filename':
try:
self.parseFilename(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Filename cannot be empty', "???")
elif child.tagName == 'Category':
try:
self.parseCategory(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Category cannot be empty', "???")
elif child.tagName == 'All':
self.parseAll()
elif child.tagName == 'And':
self.parseAnd(child)
elif child.tagName == 'Or':
self.parseOr(child)
elif child.tagName == 'Not':
self.parseNot(child)
def parseNew(self, set=True):
if not self.New:
self.Rule += " " + self.Expr[self.Depth] + " "
if not set:
self.New = True
elif set:
self.New = False
def parseFilename(self, value):
self.parseNew()
self.Rule += "menuentry.DesktopFileID == '%s'" % value.strip().replace("\\", r"\\").replace("'", r"\'")
def parseCategory(self, value):
self.parseNew()
self.Rule += "'%s' in menuentry.Categories" % value.strip()
def parseAll(self):
self.parseNew()
self.Rule += "True"
def parseAnd(self, node):
self.parseNew(False)
self.Rule += "("
self.Depth += 1
self.Expr.append("and")
self.parseNode(node)
self.Depth -= 1
self.Expr.pop()
self.Rule += ")"
def parseOr(self, node):
self.parseNew(False)
self.Rule += "("
self.Depth += 1
self.Expr.append("or")
self.parseNode(node)
self.Depth -= 1
self.Expr.pop()
self.Rule += ")"
def parseNot(self, node):
self.parseNew(False)
self.Rule += "not ("
self.Depth += 1
self.Expr.append("or")
self.parseNode(node)
self.Depth -= 1
self.Expr.pop()
self.Rule += ")"
class MenuEntry:
"Wrapper for 'Menu Style' Desktop Entries"
def __init__(self, filename, dir="", prefix=""):
# Create entry
self.DesktopEntry = DesktopEntry(os.path.join(dir,filename))
self.setAttributes(filename, dir, prefix)
# Can be one of Deleted/Hidden/Empty/NotShowIn/NoExec or True
self.Show = True
# Semi-Private
self.Original = None
self.Parents = []
# Private Stuff
self.Allocated = False
self.Add = False
self.MatchedInclude = False
# Caching
self.Categories = self.DesktopEntry.getCategories()
def save(self):
if self.DesktopEntry.tainted == True:
self.DesktopEntry.write()
def getDir(self):
return self.DesktopEntry.filename.replace(self.Filename, '')
def getType(self):
# Can be one of System/User/Both
if xdg.Config.root_mode == False:
if self.Original:
return "Both"
elif xdg_data_dirs[0] in self.DesktopEntry.filename:
return "User"
else:
return "System"
else:
return "User"
def setAttributes(self, filename, dir="", prefix=""):
self.Filename = filename
self.Prefix = prefix
self.DesktopFileID = os.path.join(prefix,filename).replace("/", "-")
if not os.path.isabs(self.DesktopEntry.filename):
self.__setFilename()
def updateAttributes(self):
if self.getType() == "System":
self.Original = MenuEntry(self.Filename, self.getDir(), self.Prefix)
self.__setFilename()
def __setFilename(self):
if xdg.Config.root_mode == False:
path = xdg_data_dirs[0]
else:
path= xdg_data_dirs[1]
if self.DesktopEntry.getType() == "Application":
dir = os.path.join(path, "applications")
else:
dir = os.path.join(path, "desktop-directories")
self.DesktopEntry.filename = os.path.join(dir, self.Filename)
def __cmp__(self, other):
return locale.strcoll(self.DesktopEntry.getName(), other.DesktopEntry.getName())
def __eq__(self, other):
if self.DesktopFileID == str(other):
return True
else:
return False
def __repr__(self):
return self.DesktopFileID
class Separator:
"Just a dummy class for Separators"
def __init__(self, parent):
self.Parent = parent
self.Show = True
class Header:
"Class for Inline Headers"
def __init__(self, name, generic_name, comment):
self.Name = name
self.GenericName = generic_name
self.Comment = comment
def __str__(self):
return self.Name
tmp = {}
def __getFileName(filename):
dirs = xdg_config_dirs[:]
if xdg.Config.root_mode == True:
dirs.pop(0)
for dir in dirs:
menuname = os.path.join (dir, "menus" , filename)
if os.path.isdir(dir) and os.path.isfile(menuname):
return menuname
def parse(filename=None):
# conver to absolute path
if filename and not os.path.isabs(filename):
filename = __getFileName(filename)
# use default if no filename given
if not filename:
candidate = os.environ.get('XDG_MENU_PREFIX', '') + "applications.menu"
filename = __getFileName(candidate)
if not filename:
raise ParsingError('File not found', "/etc/xdg/menus/%s" % candidate)
# check if it is a .menu file
if not os.path.splitext(filename)[1] == ".menu":
raise ParsingError('Not a .menu file', filename)
# create xml parser
try:
doc = xml.dom.minidom.parse(filename)
except xml.parsers.expat.ExpatError:
raise ParsingError('Not a valid .menu file', filename)
# parse menufile
tmp["Root"] = ""
tmp["mergeFiles"] = []
tmp["DirectoryDirs"] = []
tmp["cache"] = MenuEntryCache()
__parse(doc, filename, tmp["Root"])
__parsemove(tmp["Root"])
__postparse(tmp["Root"])
tmp["Root"].Doc = doc
tmp["Root"].Filename = filename
# generate the menu
__genmenuNotOnlyAllocated(tmp["Root"])
__genmenuOnlyAllocated(tmp["Root"])
# and finally sort
sort(tmp["Root"])
return tmp["Root"]
def __parse(node, filename, parent=None):
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if child.tagName == 'Menu':
__parseMenu(child, filename, parent)
elif child.tagName == 'AppDir':
try:
__parseAppDir(child.childNodes[0].nodeValue, filename, parent)
except IndexError:
raise ValidationError('AppDir cannot be empty', filename)
elif child.tagName == 'DefaultAppDirs':
__parseDefaultAppDir(filename, parent)
elif child.tagName == 'DirectoryDir':
try:
__parseDirectoryDir(child.childNodes[0].nodeValue, filename, parent)
except IndexError:
raise ValidationError('DirectoryDir cannot be empty', filename)
elif child.tagName == 'DefaultDirectoryDirs':
__parseDefaultDirectoryDir(filename, parent)
elif child.tagName == 'Name' :
try:
parent.Name = child.childNodes[0].nodeValue
except IndexError:
raise ValidationError('Name cannot be empty', filename)
elif child.tagName == 'Directory' :
try:
parent.Directories.append(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Directory cannot be empty', filename)
elif child.tagName == 'OnlyUnallocated':
parent.OnlyUnallocated = True
elif child.tagName == 'NotOnlyUnallocated':
parent.OnlyUnallocated = False
elif child.tagName == 'Deleted':
parent.Deleted = True
elif child.tagName == 'NotDeleted':
parent.Deleted = False
elif child.tagName == 'Include' or child.tagName == 'Exclude':
parent.Rules.append(Rule(child.tagName, child))
elif child.tagName == 'MergeFile':
try:
if child.getAttribute("type") == "parent":
__parseMergeFile("applications.menu", child, filename, parent)
else:
__parseMergeFile(child.childNodes[0].nodeValue, child, filename, parent)
except IndexError:
raise ValidationError('MergeFile cannot be empty', filename)
elif child.tagName == 'MergeDir':
try:
__parseMergeDir(child.childNodes[0].nodeValue, child, filename, parent)
except IndexError:
raise ValidationError('MergeDir cannot be empty', filename)
elif child.tagName == 'DefaultMergeDirs':
__parseDefaultMergeDirs(child, filename, parent)
elif child.tagName == 'Move':
parent.Moves.append(Move(child))
elif child.tagName == 'Layout':
if len(child.childNodes) > 1:
parent.Layout = Layout(child)
elif child.tagName == 'DefaultLayout':
if len(child.childNodes) > 1:
parent.DefaultLayout = Layout(child)
elif child.tagName == 'LegacyDir':
try:
__parseLegacyDir(child.childNodes[0].nodeValue, child.getAttribute("prefix"), filename, parent)
except IndexError:
raise ValidationError('LegacyDir cannot be empty', filename)
elif child.tagName == 'KDELegacyDirs':
__parseKDELegacyDirs(filename, parent)
def __parsemove(menu):
for submenu in menu.Submenus:
__parsemove(submenu)
# parse move operations
for move in menu.Moves:
move_from_menu = menu.getMenu(move.Old)
if move_from_menu:
move_to_menu = menu.getMenu(move.New)
menus = move.New.split("/")
oldparent = None
while len(menus) > 0:
if not oldparent:
oldparent = menu
newmenu = oldparent.getMenu(menus[0])
if not newmenu:
newmenu = Menu()
newmenu.Name = menus[0]
if len(menus) > 1:
newmenu.NotInXml = True
oldparent.addSubmenu(newmenu)
oldparent = newmenu
menus.pop(0)
newmenu += move_from_menu
move_from_menu.Parent.Submenus.remove(move_from_menu)
def __postparse(menu):
# unallocated / deleted
if menu.Deleted == "notset":
menu.Deleted = False
if menu.OnlyUnallocated == "notset":
menu.OnlyUnallocated = False
# Layout Tags
if not menu.Layout or not menu.DefaultLayout:
if menu.DefaultLayout:
menu.Layout = menu.DefaultLayout
elif menu.Layout:
if menu.Depth > 0:
menu.DefaultLayout = menu.Parent.DefaultLayout
else:
menu.DefaultLayout = Layout()
else:
if menu.Depth > 0:
menu.Layout = menu.Parent.DefaultLayout
menu.DefaultLayout = menu.Parent.DefaultLayout
else:
menu.Layout = Layout()
menu.DefaultLayout = Layout()
# add parent's app/directory dirs
if menu.Depth > 0:
menu.AppDirs = menu.Parent.AppDirs + menu.AppDirs
menu.DirectoryDirs = menu.Parent.DirectoryDirs + menu.DirectoryDirs
# remove duplicates
menu.Directories = __removeDuplicates(menu.Directories)
menu.DirectoryDirs = __removeDuplicates(menu.DirectoryDirs)
menu.AppDirs = __removeDuplicates(menu.AppDirs)
# go recursive through all menus
for submenu in menu.Submenus:
__postparse(submenu)
# reverse so handling is easier
menu.Directories.reverse()
menu.DirectoryDirs.reverse()
menu.AppDirs.reverse()
# get the valid .directory file out of the list
for directory in menu.Directories:
for dir in menu.DirectoryDirs:
if os.path.isfile(os.path.join(dir, directory)):
menuentry = MenuEntry(directory, dir)
if not menu.Directory:
menu.Directory = menuentry
elif menuentry.getType() == "System":
if menu.Directory.getType() == "User":
menu.Directory.Original = menuentry
if menu.Directory:
break
# Menu parsing stuff
def __parseMenu(child, filename, parent):
m = Menu()
__parse(child, filename, m)
if parent:
parent.addSubmenu(m)
else:
tmp["Root"] = m
# helper function
def __check(value, filename, type):
path = os.path.dirname(filename)
if not os.path.isabs(value):
value = os.path.join(path, value)
value = os.path.abspath(value)
if type == "dir" and os.path.exists(value) and os.path.isdir(value):
return value
elif type == "file" and os.path.exists(value) and os.path.isfile(value):
return value
else:
return False
# App/Directory Dir Stuff
def __parseAppDir(value, filename, parent):
value = __check(value, filename, "dir")
if value:
parent.AppDirs.append(value)
def __parseDefaultAppDir(filename, parent):
for dir in reversed(xdg_data_dirs):
__parseAppDir(os.path.join(dir, "applications"), filename, parent)
def __parseDirectoryDir(value, filename, parent):
value = __check(value, filename, "dir")
if value:
parent.DirectoryDirs.append(value)
def __parseDefaultDirectoryDir(filename, parent):
for dir in reversed(xdg_data_dirs):
__parseDirectoryDir(os.path.join(dir, "desktop-directories"), filename, parent)
# Merge Stuff
def __parseMergeFile(value, child, filename, parent):
if child.getAttribute("type") == "parent":
for dir in xdg_config_dirs:
rel_file = filename.replace(dir, "").strip("/")
if rel_file != filename:
for p in xdg_config_dirs:
if dir == p:
continue
if os.path.isfile(os.path.join(p,rel_file)):
__mergeFile(os.path.join(p,rel_file),child,parent)
break
else:
value = __check(value, filename, "file")
if value:
__mergeFile(value, child, parent)
def __parseMergeDir(value, child, filename, parent):
value = __check(value, filename, "dir")
if value:
for item in os.listdir(value):
try:
if os.path.splitext(item)[1] == ".menu":
__mergeFile(os.path.join(value, item), child, parent)
except UnicodeDecodeError:
continue
def __parseDefaultMergeDirs(child, filename, parent):
basename = os.path.splitext(os.path.basename(filename))[0]
for dir in reversed(xdg_config_dirs):
__parseMergeDir(os.path.join(dir, "menus", basename + "-merged"), child, filename, parent)
def __mergeFile(filename, child, parent):
# check for infinite loops
if filename in tmp["mergeFiles"]:
if debug:
raise ParsingError('Infinite MergeFile loop detected', filename)
else:
return
tmp["mergeFiles"].append(filename)
# load file
try:
doc = xml.dom.minidom.parse(filename)
except IOError:
if debug:
raise ParsingError('File not found', filename)
else:
return
except xml.parsers.expat.ExpatError:
if debug:
raise ParsingError('Not a valid .menu file', filename)
else:
return
# append file
for child in doc.childNodes:
if child.nodeType == ELEMENT_NODE:
__parse(child,filename,parent)
break
# Legacy Dir Stuff
def __parseLegacyDir(dir, prefix, filename, parent):
m = __mergeLegacyDir(dir,prefix,filename,parent)
if m:
parent += m
def __mergeLegacyDir(dir, prefix, filename, parent):
dir = __check(dir,filename,"dir")
if dir and dir not in tmp["DirectoryDirs"]:
tmp["DirectoryDirs"].append(dir)
m = Menu()
m.AppDirs.append(dir)
m.DirectoryDirs.append(dir)
m.Name = os.path.basename(dir)
m.NotInXml = True
for item in os.listdir(dir):
try:
if item == ".directory":
m.Directories.append(item)
elif os.path.isdir(os.path.join(dir,item)):
m.addSubmenu(__mergeLegacyDir(os.path.join(dir,item), prefix, filename, parent))
except UnicodeDecodeError:
continue
tmp["cache"].addMenuEntries([dir],prefix, True)
menuentries = tmp["cache"].getMenuEntries([dir], False)
for menuentry in menuentries:
categories = menuentry.Categories
if len(categories) == 0:
r = Rule("Include")
r.parseFilename(menuentry.DesktopFileID)
r.compile()
m.Rules.append(r)
if not dir in parent.AppDirs:
categories.append("Legacy")
menuentry.Categories = categories
return m
def __parseKDELegacyDirs(filename, parent):
f=os.popen3("kde-config --path apps")
output = f[1].readlines()
try:
for dir in output[0].split(":"):
__parseLegacyDir(dir,"kde", filename, parent)
except IndexError:
pass
# remove duplicate entries from a list
def __removeDuplicates(list):
set = {}
list.reverse()
list = [set.setdefault(e,e) for e in list if e not in set]
list.reverse()
return list
# Finally generate the menu
def __genmenuNotOnlyAllocated(menu):
for submenu in menu.Submenus:
__genmenuNotOnlyAllocated(submenu)
if menu.OnlyUnallocated == False:
tmp["cache"].addMenuEntries(menu.AppDirs)
menuentries = []
for rule in menu.Rules:
menuentries = rule.do(tmp["cache"].getMenuEntries(menu.AppDirs), rule.Type, 1)
for menuentry in menuentries:
if menuentry.Add == True:
menuentry.Parents.append(menu)
menuentry.Add = False
menuentry.Allocated = True
menu.MenuEntries.append(menuentry)
def __genmenuOnlyAllocated(menu):
for submenu in menu.Submenus:
__genmenuOnlyAllocated(submenu)
if menu.OnlyUnallocated == True:
tmp["cache"].addMenuEntries(menu.AppDirs)
menuentries = []
for rule in menu.Rules:
menuentries = rule.do(tmp["cache"].getMenuEntries(menu.AppDirs), rule.Type, 2)
for menuentry in menuentries:
if menuentry.Add == True:
menuentry.Parents.append(menu)
# menuentry.Add = False
# menuentry.Allocated = True
menu.MenuEntries.append(menuentry)
# And sorting ...
def sort(menu):
menu.Entries = []
menu.Visible = 0
for submenu in menu.Submenus:
sort(submenu)
tmp_s = []
tmp_e = []
for order in menu.Layout.order:
if order[0] == "Filename":
tmp_e.append(order[1])
elif order[0] == "Menuname":
tmp_s.append(order[1])
for order in menu.Layout.order:
if order[0] == "Separator":
separator = Separator(menu)
if len(menu.Entries) > 0 and isinstance(menu.Entries[-1], Separator):
separator.Show = False
menu.Entries.append(separator)
elif order[0] == "Filename":
menuentry = menu.getMenuEntry(order[1])
if menuentry:
menu.Entries.append(menuentry)
elif order[0] == "Menuname":
submenu = menu.getMenu(order[1])
if submenu:
__parse_inline(submenu, menu)
elif order[0] == "Merge":
if order[1] == "files" or order[1] == "all":
menu.MenuEntries.sort()
for menuentry in menu.MenuEntries:
if menuentry not in tmp_e:
menu.Entries.append(menuentry)
elif order[1] == "menus" or order[1] == "all":
menu.Submenus.sort()
for submenu in menu.Submenus:
if submenu.Name not in tmp_s:
__parse_inline(submenu, menu)
# getHidden / NoDisplay / OnlyShowIn / NotOnlyShowIn / Deleted / NoExec
for entry in menu.Entries:
entry.Show = True
menu.Visible += 1
if isinstance(entry, Menu):
if entry.Deleted == True:
entry.Show = "Deleted"
menu.Visible -= 1
elif isinstance(entry.Directory, MenuEntry):
if entry.Directory.DesktopEntry.getNoDisplay() == True:
entry.Show = "NoDisplay"
menu.Visible -= 1
elif entry.Directory.DesktopEntry.getHidden() == True:
entry.Show = "Hidden"
menu.Visible -= 1
elif isinstance(entry, MenuEntry):
if entry.DesktopEntry.getNoDisplay() == True:
entry.Show = "NoDisplay"
menu.Visible -= 1
elif entry.DesktopEntry.getHidden() == True:
entry.Show = "Hidden"
menu.Visible -= 1
elif entry.DesktopEntry.getTryExec() and not __try_exec(entry.DesktopEntry.getTryExec()):
entry.Show = "NoExec"
menu.Visible -= 1
elif xdg.Config.windowmanager:
if ( entry.DesktopEntry.getOnlyShowIn() != [] and xdg.Config.windowmanager not in entry.DesktopEntry.getOnlyShowIn() ) \
or xdg.Config.windowmanager in entry.DesktopEntry.getNotShowIn():
entry.Show = "NotShowIn"
menu.Visible -= 1
elif isinstance(entry,Separator):
menu.Visible -= 1
# remove separators at the beginning and at the end
if len(menu.Entries) > 0:
if isinstance(menu.Entries[0], Separator):
menu.Entries[0].Show = False
if len(menu.Entries) > 1:
if isinstance(menu.Entries[-1], Separator):
menu.Entries[-1].Show = False
# show_empty tag
for entry in menu.Entries:
if isinstance(entry,Menu) and entry.Layout.show_empty == "false" and entry.Visible == 0:
entry.Show = "Empty"
menu.Visible -= 1
if entry.NotInXml == True:
menu.Entries.remove(entry)
def __try_exec(executable):
paths = os.environ['PATH'].split(os.pathsep)
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
if os.access(f, os.X_OK):
return True
else:
if os.access(executable, os.X_OK):
return True
return False
# inline tags
def __parse_inline(submenu, menu):
if submenu.Layout.inline == "true":
if len(submenu.Entries) == 1 and submenu.Layout.inline_alias == "true":
menuentry = submenu.Entries[0]
menuentry.DesktopEntry.set("Name", submenu.getName(), locale = True)
menuentry.DesktopEntry.set("GenericName", submenu.getGenericName(), locale = True)
menuentry.DesktopEntry.set("Comment", submenu.getComment(), locale = True)
menu.Entries.append(menuentry)
elif len(submenu.Entries) <= submenu.Layout.inline_limit or submenu.Layout.inline_limit == 0:
if submenu.Layout.inline_header == "true":
header = Header(submenu.getName(), submenu.getGenericName(), submenu.getComment())
menu.Entries.append(header)
for entry in submenu.Entries:
menu.Entries.append(entry)
else:
menu.Entries.append(submenu)
else:
menu.Entries.append(submenu)
class MenuEntryCache:
"Class to cache Desktop Entries"
def __init__(self):
self.cacheEntries = {}
self.cacheEntries['legacy'] = []
self.cache = {}
def addMenuEntries(self, dirs, prefix="", legacy=False):
for dir in dirs:
if not self.cacheEntries.has_key(dir):
self.cacheEntries[dir] = []
self.__addFiles(dir, "", prefix, legacy)
def __addFiles(self, dir, subdir, prefix, legacy):
for item in os.listdir(os.path.join(dir,subdir)):
if os.path.splitext(item)[1] == ".desktop":
try:
menuentry = MenuEntry(os.path.join(subdir,item), dir, prefix)
except ParsingError:
continue
self.cacheEntries[dir].append(menuentry)
if legacy == True:
self.cacheEntries['legacy'].append(menuentry)
elif os.path.isdir(os.path.join(dir,subdir,item)) and legacy == False:
self.__addFiles(dir, os.path.join(subdir,item), prefix, legacy)
def getMenuEntries(self, dirs, legacy=True):
list = []
ids = []
# handle legacy items
appdirs = dirs[:]
if legacy == True:
appdirs.append("legacy")
# cache the results again
key = "".join(appdirs)
try:
return self.cache[key]
except KeyError:
pass
for dir in appdirs:
for menuentry in self.cacheEntries[dir]:
try:
if menuentry.DesktopFileID not in ids:
ids.append(menuentry.DesktopFileID)
list.append(menuentry)
elif menuentry.getType() == "System":
# FIXME: This is only 99% correct, but still...
i = list.index(menuentry)
e = list[i]
if e.getType() == "User":
e.Original = menuentry
except UnicodeDecodeError:
continue
self.cache[key] = list
return list
| gpl-3.0 |
nvbn/python-social-auth | social/tests/backends/test_bitbucket.py | 5 | 1630 | import json
from httpretty import HTTPretty
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test
class BitbucketOAuth1Test(OAuth1Test):
backend_path = 'social.backends.bitbucket.BitbucketOAuth'
user_data_url = 'https://bitbucket.org/api/1.0/users/foo@bar.com'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
emails_body = json.dumps([{
'active': True,
'email': 'foo@bar.com',
'primary': True
}])
user_data_body = json.dumps({
'user': {
'username': 'foobar',
'first_name': 'Foo',
'last_name': 'Bar',
'display_name': 'Foo Bar',
'is_team': False,
'avatar': 'https://secure.gravatar.com/avatar/'
'5280f15cedf540b544eecc30fcf3027c?'
'd=https%3A%2F%2Fd3oaxc4q5k2d6q.cloudfront.net%2Fm%2F'
'9e262ba34f96%2Fimg%2Fdefault_avatar%2F32%2F'
'user_blue.png&s=32',
'resource_uri': '/1.0/users/foobar'
}
})
def test_login(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://bitbucket.org/api/1.0/emails/',
status=200, body=self.emails_body)
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| bsd-3-clause |
thaumos/ansible | lib/ansible/modules/web_infrastructure/deploy_helper.py | 149 | 19571 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: deploy_helper
version_added: "2.0"
author: "Ramon de la Fuente (@ramondelafuente)"
short_description: Manages some of the steps common in deploying projects.
description:
- The Deploy Helper manages some of the steps common in deploying software.
It creates a folder structure, manages a symlink for the current release
and cleans up old releases.
- "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
C(project_path), whatever you set in the path parameter,
C(current_path), the path to the symlink that points to the active release,
C(releases_path), the path to the folder to keep releases in,
C(shared_path), the path to the folder to keep shared resources in,
C(unfinished_filename), the file to check for to recognize unfinished builds,
C(previous_release), the release the 'current' symlink is pointing to,
C(previous_release_path), the full path to the 'current' symlink target,
C(new_release), either the 'release' parameter or a generated timestamp,
C(new_release_path), the path to the new release folder (not created by the module)."
options:
path:
required: True
aliases: ['dest']
description:
- the root path of the project. Alias I(dest).
Returned in the C(deploy_helper.project_path) fact.
state:
description:
- the state of the project.
C(query) will only gather facts,
C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
C(finalize) will remove the unfinished_filename file, create a symlink to the newly
deployed release and optionally clean old releases,
C(clean) will remove failed & old releases,
C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent))
choices: [ present, finalize, absent, clean, query ]
default: present
release:
description:
- the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
You can use the generated fact C(release={{ deploy_helper.new_release }}).
releases_path:
description:
- the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
Returned in the C(deploy_helper.releases_path) fact.
default: releases
shared_path:
description:
- the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
If this is set to an empty string, no shared folder will be created.
Returned in the C(deploy_helper.shared_path) fact.
default: shared
current_path:
description:
- the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
Returned in the C(deploy_helper.current_path) fact.
default: current
unfinished_filename:
description:
- the name of the file that indicates a deploy has not finished. All folders in the releases_path that
contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
automatically deleted from the I(new_release_path) during C(state=finalize).
default: DEPLOY_UNFINISHED
clean:
description:
- Whether to run the clean procedure in case of C(state=finalize).
type: bool
default: 'yes'
keep_releases:
description:
- the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
will be deleted first, so only correct releases will count. The current version will not count.
default: 5
notes:
- Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
parameters to both calls, otherwise the second call will overwrite the facts of the first one.
- When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
new naming strategy without problems.
- Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
be much of a problem.
'''
EXAMPLES = '''
# General explanation, starting with an example folder structure for a project:
# root:
# releases:
# - 20140415234508
# - 20140415235146
# - 20140416082818
#
# shared:
# - sessions
# - uploads
#
# current: releases/20140416082818
# The 'releases' folder holds all the available releases. A release is a complete build of the application being
# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
# git tags or commit hashes.
#
# During a deploy, a new folder should be created in the releases folder and any build steps required should be
# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
# with a link to this build.
#
# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
#
# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
# release is reduced to the time it takes to switch the link.
#
# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
# procedure to remove it during cleanup.
# Typical usage
- name: Initialize the deploy root and gather facts
deploy_helper:
path: /path/to/root
- name: Clone the project to the new release folder
git:
repo: git://foosball.example.org/path/to/repo.git
dest: '{{ deploy_helper.new_release_path }}'
version: v1.1.1
- name: Add an unfinished file, to allow cleanup on successful finalize
file:
path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
state: touch
- name: Perform some build steps, like running your dependency manager for example
composer:
command: install
working_dir: '{{ deploy_helper.new_release_path }}'
- name: Create some folders in the shared folder
file:
path: '{{ deploy_helper.shared_path }}/{{ item }}'
state: directory
with_items:
- sessions
- uploads
- name: Add symlinks from the new release to the shared folder
file:
path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
state: link
with_items:
- path: app/sessions
src: sessions
- path: web/uploads
src: uploads
- name: Finalize the deploy, removing the unfinished file and switching the symlink
deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Retrieving facts before running a deploy
- name: Run 'state=query' to gather facts without changing anything
deploy_helper:
path: /path/to/root
state: query
# Remember to set the 'release' parameter when you actually call 'state=present' later
- name: Initialize the deploy root
deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: present
# all paths can be absolute or relative (to the 'path' parameter)
- deploy_helper:
path: /path/to/root
releases_path: /var/www/project/releases
shared_path: /var/www/shared
current_path: /var/www/active
# Using your own naming strategy for releases (a version tag in this case):
- deploy_helper:
path: /path/to/root
release: v1.1.1
state: present
- deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Using a different unfinished_filename:
- deploy_helper:
path: /path/to/root
unfinished_filename: README.md
release: '{{ deploy_helper.new_release }}'
state: finalize
# Postponing the cleanup of older builds:
- deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
clean: False
- deploy_helper:
path: /path/to/root
state: clean
# Or running the cleanup ahead of the new deploy
- deploy_helper:
path: /path/to/root
state: clean
- deploy_helper:
path: /path/to/root
state: present
# Keeping more old releases:
- deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
keep_releases: 10
# Or, if you use 'clean=false' on finalize:
- deploy_helper:
path: /path/to/root
state: clean
keep_releases: 10
# Removing the entire project root folder
- deploy_helper:
path: /path/to/root
state: absent
# Debugging the facts returned by the module
- deploy_helper:
path: /path/to/root
- debug:
var: deploy_helper
'''
import os
import shutil
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class DeployHelper(object):
def __init__(self, module):
self.module = module
self.file_args = module.load_file_common_arguments(module.params)
self.clean = module.params['clean']
self.current_path = module.params['current_path']
self.keep_releases = module.params['keep_releases']
self.path = module.params['path']
self.release = module.params['release']
self.releases_path = module.params['releases_path']
self.shared_path = module.params['shared_path']
self.state = module.params['state']
self.unfinished_filename = module.params['unfinished_filename']
def gather_facts(self):
current_path = os.path.join(self.path, self.current_path)
releases_path = os.path.join(self.path, self.releases_path)
if self.shared_path:
shared_path = os.path.join(self.path, self.shared_path)
else:
shared_path = None
previous_release, previous_release_path = self._get_last_release(current_path)
if not self.release and (self.state == 'query' or self.state == 'present'):
self.release = time.strftime("%Y%m%d%H%M%S")
if self.release:
new_release_path = os.path.join(releases_path, self.release)
else:
new_release_path = None
return {
'project_path': self.path,
'current_path': current_path,
'releases_path': releases_path,
'shared_path': shared_path,
'previous_release': previous_release,
'previous_release_path': previous_release_path,
'new_release': self.release,
'new_release_path': new_release_path,
'unfinished_filename': self.unfinished_filename
}
def delete_path(self, path):
if not os.path.lexists(path):
return False
if not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
if not self.module.check_mode:
try:
shutil.rmtree(path, ignore_errors=False)
except Exception as e:
self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
return True
def create_path(self, path):
changed = False
if not os.path.lexists(path):
changed = True
if not self.module.check_mode:
os.makedirs(path)
elif not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
return changed
def check_link(self, path):
if os.path.lexists(path):
if not os.path.islink(path):
self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
def create_link(self, source, link_name):
changed = False
if os.path.islink(link_name):
norm_link = os.path.normpath(os.path.realpath(link_name))
norm_source = os.path.normpath(os.path.realpath(source))
if norm_link == norm_source:
changed = False
else:
changed = True
if not self.module.check_mode:
if not os.path.lexists(source):
self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
tmp_link_name = link_name + '.' + self.unfinished_filename
if os.path.islink(tmp_link_name):
os.unlink(tmp_link_name)
os.symlink(source, tmp_link_name)
os.rename(tmp_link_name, link_name)
else:
changed = True
if not self.module.check_mode:
os.symlink(source, link_name)
return changed
def remove_unfinished_file(self, new_release_path):
changed = False
unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
if os.path.lexists(unfinished_file_path):
changed = True
if not self.module.check_mode:
os.remove(unfinished_file_path)
return changed
def remove_unfinished_builds(self, releases_path):
changes = 0
for release in os.listdir(releases_path):
if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
if self.module.check_mode:
changes += 1
else:
changes += self.delete_path(os.path.join(releases_path, release))
return changes
def remove_unfinished_link(self, path):
changed = False
tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
if not self.module.check_mode and os.path.exists(tmp_link_name):
changed = True
os.remove(tmp_link_name)
return changed
def cleanup(self, releases_path, reserve_version):
changes = 0
if os.path.lexists(releases_path):
releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
try:
releases.remove(reserve_version)
except ValueError:
pass
if not self.module.check_mode:
releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
for release in releases[self.keep_releases:]:
changes += self.delete_path(os.path.join(releases_path, release))
elif len(releases) > self.keep_releases:
changes += (len(releases) - self.keep_releases)
return changes
def _get_file_args(self, path):
file_args = self.file_args.copy()
file_args['path'] = path
return file_args
def _get_last_release(self, current_path):
previous_release = None
previous_release_path = None
if os.path.lexists(current_path):
previous_release_path = os.path.realpath(current_path)
previous_release = os.path.basename(previous_release_path)
return previous_release, previous_release_path
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(aliases=['dest'], required=True, type='path'),
release=dict(required=False, type='str', default=None),
releases_path=dict(required=False, type='str', default='releases'),
shared_path=dict(required=False, type='path', default='shared'),
current_path=dict(required=False, type='path', default='current'),
keep_releases=dict(required=False, type='int', default=5),
clean=dict(required=False, type='bool', default=True),
unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
),
add_file_common_args=True,
supports_check_mode=True
)
deploy_helper = DeployHelper(module)
facts = deploy_helper.gather_facts()
result = {
'state': deploy_helper.state
}
changes = 0
if deploy_helper.state == 'query':
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'present':
deploy_helper.check_link(facts['current_path'])
changes += deploy_helper.create_path(facts['project_path'])
changes += deploy_helper.create_path(facts['releases_path'])
if deploy_helper.shared_path:
changes += deploy_helper.create_path(facts['shared_path'])
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'finalize':
if not deploy_helper.release:
module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
if deploy_helper.keep_releases <= 0:
module.fail_json(msg="'keep_releases' should be at least 1")
changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
if deploy_helper.clean:
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'clean':
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'absent':
# destroy the facts
result['ansible_facts'] = {'deploy_helper': []}
changes += deploy_helper.delete_path(facts['project_path'])
if changes > 0:
result['changed'] = True
else:
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
immesys/smap | python/smap/drivers/htp.py | 6 | 2294 | """
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
Drivers for HTP devices
"""
import struct
from smap.drivers import modbus
from smap.drivers.modbus import ModbusRegister as R
from smap.iface.modbustcp.ModbusTCP import ModbusRTU, FUNC_READ_INPUT
t = modbus.FactorStruct('>h', 0.1)
class VersaFlame(modbus.ModbusDriver):
# this guy used RTU
CLIENT = ModbusRTU
MAX_READ_RANGE = 5
REGISTERS = {
# input registers
51 : R("/ntc6", "F", t, "NTC 6 Temperature", FUNC_READ_INPUT),
52 : R("/ntc7", "F", t, "NTC 7 Temperature", FUNC_READ_INPUT),
53 : R("/solar_panel_temp", "F", t, "", FUNC_READ_INPUT),
54 : R("/solar_flow", "LPM", t, "", FUNC_READ_INPUT),
55 : R("/solar_temp", "F", t, "", FUNC_READ_INPUT),
# holding registers
4 : R("/tank_setpoint", "F", t, "Tank set point"),
5 : R("/tank_temperature", "F", t, "Tank temperature"),
6 : R("/oat", "F", t, "Outside air temperature"),
}
| bsd-2-clause |
nathaniel-mahieu/bitcoin | qa/rpc-tests/abandonconflict.py | 13 | 7700 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import urllib.parse
class AbandonConflictTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
# Note had to make sure tx did not have AllowFree priority
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
# Verify txs no longer in mempool
assert(len(self.nodes[0].getrawmempool()) == 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert(unconfbalance == newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])
assert(len(self.nodes[0].getrawmempool()) == 0)
assert(self.nodes[0].getbalance() == balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
assert(len(self.nodes[0].getrawmempool()) == 0)
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert(newbalance == balance - Decimal("10"))
print("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
print("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
print(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| mit |
kwinkunks/lasio | tests/test_enhancements.py | 1 | 1908 | import os, sys; sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import fnmatch
import numpy
import pytest
import math
from lasio import las, read, exceptions
test_dir = os.path.dirname(__file__)
egfn = lambda fn: os.path.join(os.path.dirname(__file__), "examples", fn)
stegfn = lambda vers, fn: os.path.join(
os.path.dirname(__file__), "examples", vers, fn)
def test_autodepthindex():
m = read(egfn("autodepthindex_M.las"))
f = read(egfn("autodepthindex_F.las"))
ft = read(egfn("autodepthindex_FT.las"))
err = read(egfn("autodepthindex_M_FT.las"))
def test_autodepthindex_inconsistent():
err = read(egfn("autodepthindex_M_FT.las"))
with pytest.raises(exceptions.LASUnknownUnitError):
print(err.depth_m)
def test_autodepthindex_m():
l = read(egfn("autodepthindex_M.las"))
assert (l.depth_ft[-1] * 0.3048 == l.index[-1])
def test_autodepthindex_f():
l = read(egfn("autodepthindex_F.las"))
assert (l.depth_m[-1] / 0.3048 == l.index[-1])
def test_autodepthindex_ft():
l = read(egfn("autodepthindex_FT.las"))
assert (l.depth_m[-1] / 0.3048 == l.index[-1])
def test_df_indexing():
l = read(egfn("6038187_v1.2.las"))
metres = 9.05
spacing = l.well["STEP"].value
calc_index = math.floor((metres / spacing) - (l.well["STRT"].value / spacing))
calc_index = int(calc_index)
assert l["GAMN"][calc_index] == l.df()["GAMN"][metres]
# TODO: make above test in reverse-ordered LAS (e.g. STRT > STOP)
def test_df_reverse():
l = read(egfn("sample_rev.las"))
metres = 1667
spacing = l.well["STEP"].value
calc_index = math.floor((metres // spacing) - (l.well["STRT"].value // spacing))
calc_index = int(calc_index)
assert l["DT"][calc_index] == l.df()["DT"][metres]
def test_df_curve_names():
l = read(egfn("sample_rev.las"))
assert l.keys()[1:] == list(l.df().columns.values) | mit |
nkalodimas/invenio | modules/websubmit/lib/functions/Move_Revised_Files_to_Storage.py | 24 | 17765 | ## $Id: Move_Revised_Files_to_Storage.py,v 1.20 2009/03/26 13:48:42 jerome Exp $
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSubmit function - Archives uploaded files
TODO:
- Add parameter 'elementNameToFilename' so that files to revise can
be matched by name instead of doctype.
- Icons are created only for uploaded files, but not for related format
created on the fly.
"""
__revision__ = "$Id$"
import time
import os
from invenio.bibdocfile import \
InvenioBibDocFileError, \
BibRecDocs
from invenio.errorlib import register_exception
from invenio.websubmit_icon_creator import \
create_icon, InvenioWebSubmitIconCreatorError
from invenio.config import CFG_BINDIR
from invenio.dbquery import run_sql
from invenio.websubmit_functions.Shared_Functions import \
createRelatedFormats
from invenio.bibdocfile_managedocfiles import get_description_and_comment
def Move_Revised_Files_to_Storage(parameters, curdir, form, user_info=None):
"""
The function revises the files of a record with the newly uploaded
files.
This function can work only if you can define a mapping from the
WebSubmit element name that uploads the file, to the doctype of
the file. In most cases, the doctype is equivalent to the element
name, or just map to 'Main' doctype. That is typically the case if
you use the Move_Files_to_Storage.py function to upload the files
at submission step. For eg. with the DEMOBOO submission of the
Atlantis Demo site, a file is uploaded thanks to the DEMOBOO_FILE
element/File input, which is mapped to doctype DEMOBOO_FILE.
The function ignores files for which multiple files exist for a
single doctype in the record, or when several files are uploaded
with the same element name. If the record to revise does not have
a corresponding file, the file is inserted
This function is similar to Move_Uploaded_Files_to_Storage.py,
excepted that Move_Uploaded_Files_to_Storage relies on files
uploaded from the web interface created by
Create_Upload_Files_Interface.py, while this function relies on
the files uploaded by a regular WebSubmit page that you have built
from WebSubmit admin:
Regular WebSubmit interface --(upload file)--> Move_Revised_Files_to_Storage.py
Create_Upload_Files_Interface.py --(upload file)--> Move_Uploaded_Files_to_Storage.py
The main advantages of this function over the functions
Create_Upload_Files_Interface.py/Move_Uploaded_Files_to_Storage is
that it lets you customize the display of your submission in the
way you want, which could be simpler for your users if you usually
only upload a few and fixed number of files per record. The
disadvantages are that this function is not capable of : deleting
files, adding an alternative format to a file, add a variable
number of files, does not allow to set permissions at the level of
file, does not support user comments, renaming, etc.
@param parameters:(dictionary) - must contain:
+ elementNameToDoctype: maps an element/field name to a doctype.
Eg. the file uploaded from the
DEMOBOO_FILE element (input file tag)
should revise the file with document
type (doctype) "Main":
DEMOBOO_FILE=Main|DEMOBOO_FILE_2=ADDITIONAL
('=' separates element name and doctype
'|' separates each doctype/element name group)
In most cases, the element name == doctype:
DEMOBOO_FILE=DEMOBOO_FILE|DEMOBOO_FILE_2=DEMOBOO_FILE_2
+ createIconDoctypes: the list of doctypes for which an icon
should be created when revising the file.
Eg:
Figure|Graph
('|' separated values)
Use '*' for all doctypes
+ iconsize: size of the icon to create (when applicable)
+ keepPreviousVersionDoctypes: the list of doctypes for which
the function should keep previous
versions visible when revising a
file.
Eg:
Main|Additional
('|' separated values)
Default is all
+ createRelatedFormats: if uploaded files get converted to
whatever format we can (1) or not (0)
"""
# pylint: disable=E0602
# sysno is defined in the WebSubmit functions sandbox.
global sysno
bibrecdocs = BibRecDocs(int(sysno))
# Wash function parameters
(element_name_and_doctype, create_icon_doctypes, iconsize,
keep_previous_version_doctypes, createRelatedFormats_p) = \
wash_function_parameters(parameters, curdir)
for element_name, doctype in element_name_and_doctype:
_do_log(curdir, "Processing " + element_name)
# Check if there is a corresponding file
file_path = os.path.join(curdir, 'files', element_name,
read_file(curdir, element_name))
if file_path and os.path.exists(file_path):
# Now identify which file to revise
files_in_record = bibrecdocs.list_bibdocs(doctype)
if len(files_in_record) == 1:
# Ok, we can revise
bibdoc_name = bibrecdocs.get_docname(files_in_record[0].id)
revise(bibrecdocs, curdir, sysno, file_path,
bibdoc_name, doctype, iconsize,
create_icon_doctypes,
keep_previous_version_doctypes,
createRelatedFormats_p)
elif len(files_in_record) == 0:
# We must add the file
add(bibrecdocs, curdir, sysno, file_path,
doctype, iconsize, create_icon_doctypes,
createRelatedFormats_p)
else:
_do_log(curdir, " %s ignored, because multiple files found for same doctype %s in record %s: %s" %\
(element_name, doctype, sysno,
', '.join(files_in_record)))
else:
_do_log(curdir, " No corresponding file found (%s)" % file_path)
# Update the MARC
bibdocfile_bin = os.path.join(CFG_BINDIR, 'bibdocfile --yes-i-know')
os.system(bibdocfile_bin + " --fix-marc --recid=" + sysno)
# Delete the HB BibFormat cache in the DB, so that the fulltext
# links do not point to possible dead files
run_sql("DELETE LOW_PRIORITY from bibfmt WHERE format='HB' AND id_bibrec=%s", (sysno,))
# pylint: enable=E0602
def add(bibrecdocs, curdir, sysno, file_path, doctype,
iconsize, create_icon_doctypes, createRelatedFormats_p):
"""
Adds the file using bibdocfile
"""
try:
# Add file
bibdoc = bibrecdocs.add_new_file(file_path,
doctype,
never_fail=True)
_do_log(curdir, ' Added ' + bibrecdocs.get_docname(bibdoc.id) + ': ' + \
file_path)
# Add icon
iconpath = ''
if doctype in create_icon_doctypes or \
'*' in create_icon_doctypes:
iconpath = _create_icon(file_path, iconsize)
if iconpath is not None:
bibdoc.add_icon(iconpath)
_do_log(curdir, ' Added icon to ' + \
bibrecdocs.get_docname(bibdoc.id) + ': ' + iconpath)
# Automatically create additional formats when
# possible.
additional_formats = []
if createRelatedFormats_p:
additional_formats = createRelatedFormats(file_path,
overwrite=False)
for additional_format in additional_formats:
bibdoc.add_new_format(additional_format,
bibrecdocs.get_docname(bibdoc.id))
# Log
_do_log(curdir, ' Added format ' + additional_format + \
' to ' + bibrecdocs.get_docname(bibdoc.id) + ': ' + iconpath)
except InvenioBibDocFileError, e:
# Format already existed. How come? We should
# have checked this in Create_Upload_Files_Interface.py
register_exception(prefix='Move_Revised_Files_to_Storage ' \
'tried to add already existing file %s ' \
'to record %i. %s' % \
(file_path, sysno, curdir),
alert_admin=True)
def revise(bibrecdocs, curdir, sysno, file_path, bibdoc_name, doctype,
iconsize, create_icon_doctypes,
keep_previous_version_doctypes, createRelatedFormats_p):
"""
Revises the given bibdoc with a new file
"""
try:
# Retrieve the current description and comment, or they
# will be lost when revising
latest_files = bibrecdocs.list_bibdocs(doctype)[0].list_latest_files()
prev_desc, prev_comment = get_description_and_comment(latest_files)
if doctype in keep_previous_version_doctypes:
# Standard procedure, keep previous version
bibdoc = bibrecdocs.add_new_version(file_path,
bibdoc_name,
prev_desc,
prev_comment)
_do_log(curdir, ' Revised ' + bibrecdocs.get_docname(bibdoc.id) + \
' with : ' + file_path)
else:
# Soft-delete previous versions, and add new file
# (we need to get the doctype before deleting)
if bibrecdocs.has_docname_p(bibdoc_name):
# Delete only if bibdoc originally
# existed
bibrecdocs.delete_bibdoc(bibdoc_name)
_do_log(curdir, ' Deleted ' + bibdoc_name)
try:
bibdoc = bibrecdocs.add_new_file(file_path,
doctype,
bibdoc_name,
never_fail=True,
description=prev_desc,
comment=prev_comment)
_do_log(curdir, ' Added ' + bibrecdocs.get_docname(bibdoc.id) + ': ' + \
file_path)
except InvenioBibDocFileError, e:
_do_log(curdir, str(e))
register_exception(prefix='Move_Uploaded_Files_to_Storage ' \
'tried to revise a file %s ' \
'named %s in record %i. %s' % \
(file_path, bibdoc_name, sysno, curdir),
alert_admin=True)
# Add icon
iconpath = ''
if doctype in create_icon_doctypes or \
'*' in create_icon_doctypes:
iconpath = _create_icon(file_path, iconsize)
if iconpath is not None:
bibdoc.add_icon(iconpath)
_do_log(curdir, 'Added icon to ' + \
bibrecdocs.get_docname(bibdoc.id) + ': ' + iconpath)
# Automatically create additional formats when
# possible.
additional_formats = []
if createRelatedFormats_p:
additional_formats = createRelatedFormats(file_path,
overwrite=False)
for additional_format in additional_formats:
bibdoc.add_new_format(additional_format,
bibdoc_name,
prev_desc,
prev_comment)
# Log
_do_log(curdir, ' Addeded format ' + additional_format + \
' to ' + bibrecdocs.get_docname(bibdoc.id) + ': ' + iconpath)
except InvenioBibDocFileError, e:
# Format already existed. How come? We should
# have checked this in Create_Upload_Files_Interface.py
register_exception(prefix='Move_Revised_Files_to_Storage ' \
'tried to revise a file %s ' \
'named %s in record %i. %s' % \
(file_path, bibdoc_name, sysno, curdir),
alert_admin=True)
def wash_function_parameters(parameters, curdir):
"""
Returns the functions (admin-defined) parameters washed and
initialized properly, as a tuple:
Parameters:
check Move_Revised_Files_to_Storage(..) docstring
Returns:
tuple (element_name_and_doctype, create_icon_doctypes, iconsize,
keep_previous_version_doctypes, createRelatedFormats_p)
"""
# The mapping element name -> doctype.
# '|' is used to separate mapping groups, and '=' to separate
# element name and doctype.
# Eg: DEMOBOO_FILE=Main|DEMOBOO_FILEADDITIONAL=Additional File
element_name_and_doctype = [mapping.strip().split("=") for mapping \
in parameters['elementNameToDoctype'].split('|') \
if mapping.strip() != '']
# The list of doctypes for which we want to create an icon
# (list of values separated by "|")
create_icon_doctypes = [doctype.strip() for doctype \
in parameters['createIconDoctypes'].split('|') \
if doctype.strip() != '']
# If we should create additional formats when applicable (1) or
# not (0)
try:
createRelatedFormats_p = int(parameters['createRelatedFormats'])
except ValueError, e:
createRelatedFormats_p = False
# Icons size
iconsize = parameters.get('iconsize')
# The list of doctypes for which we want to keep previous versions
# of files visible.
# (list of values separated by "|")
keep_previous_version_doctypes = [doctype.strip() for doctype \
in parameters['keepPreviousVersionDoctypes'].split('|') \
if doctype.strip() != '']
if not keep_previous_version_doctypes:
# Nothing specified: keep all by default
keep_previous_version_doctypes = [doctype for (elem, doctype) \
in element_name_and_doctype]
return (element_name_and_doctype, create_icon_doctypes, iconsize,
keep_previous_version_doctypes, createRelatedFormats_p)
def _do_log(log_dir, msg):
"""
Log what we have done, in case something went wrong.
Nice to compare with bibdocactions.log
Should be removed when the development is over.
"""
log_file = os.path.join(log_dir, 'performed_actions.log')
file_desc = open(log_file, "a+")
file_desc.write("%s --> %s\n" %(time.strftime("%Y-%m-%d %H:%M:%S"), msg))
file_desc.close()
def _create_icon(file_path, icon_size, format='gif', verbosity=9):
"""
Creates icon of given file.
Returns path to the icon. If creation fails, return None, and
register exception (send email to admin).
Parameters:
- file_path : *str* full path to icon
- icon_size : *int* the scaling information to be used for the
creation of the new icon.
- verbosity : *int* the verbosity level under which the program
is to run;
"""
icon_path = None
try:
filename = os.path.splitext(os.path.basename(file_path))[0]
(icon_dir, icon_name) = create_icon(
{'input-file':file_path,
'icon-name': "icon-%s" % filename,
'multipage-icon': False,
'multipage-icon-delay': 0,
'icon-scale': icon_size,
'icon-file-format': format,
'verbosity': verbosity})
icon_path = icon_dir + os.sep + icon_name
except InvenioWebSubmitIconCreatorError, e:
register_exception(prefix='Icon for file %s could not be created: %s' % \
(file_path, str(e)),
alert_admin=False)
return icon_path
def read_file(curdir, filename):
"""
Reads a file in curdir.
Returns None if does not exist, cannot be read, or if file is not
really in curdir
"""
try:
file_path = os.path.abspath(os.path.join(curdir, filename))
if not file_path.startswith(curdir):
return None
file_desc = file(file_path, 'r')
content = file_desc.read()
file_desc.close()
except:
content = None
return content
| gpl-2.0 |
ec-geolink/d1lod | d1lod/d1lod/people/processing.py | 1 | 3993 | """
file: processing.py
author: Bryce Mecum (mecum@nceas.ucsb.edu)
Processes the scientific metadata documents in ./documents for person
and organization information. For each document, the script tries to find
the person in an existing list. Matches are currently made off of all
information available but future versions should be more loose about this.
The document a person/organization was found in are also added to that
person/organization so the documents belonging to that person/organization
can be attributed to them and used in later graph generation activities.
"""
import os
import re
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError
from d1lod.metadata import eml
from d1lod.metadata import dryad
from d1lod.metadata import fgdc
def processDirectory(job):
filenames = os.listdir("%s" % job.directory)
i = 0
for filename in filenames:
if i % 1000 == 0:
print "%d..." % i
try:
xmldoc = ET.parse("%s/%s" % (job.directory, filename))
except ParseError:
continue
processDocument(job, xmldoc, filename)
i += 1
print "Processed a total of %d documents" % i
def detectMetadataFormat(xmldoc):
""" Detect the format of the metadata in `xmldoc`.
"""
root = xmldoc
if re.search("eml$", root.tag):
return "eml"
elif re.search("Dryad", root.tag):
return "dryad"
elif re.search("metadata", root.tag):
return "fgdc"
else:
return "unknown"
def extractCreators(identifier, doc):
"""
Detect the format of and extract people/organization creators from a document.
Arguments:
document: str
The document's PID
doc:
An XML document of the scientific metadata
Returns:
List of records.
"""
if doc is None:
return []
# Detect the format
metadata_format = detectMetadataFormat(doc)
# Process the document for people/orgs
if metadata_format == "eml":
records = eml.process(doc, identifier)
elif metadata_format == "dryad":
records = dryad.process(doc, identifier)
elif metadata_format == "fgdc":
records = fgdc.process(doc, identifier)
else:
print "Unknown format."
records = []
return records
def processDocument(job, xmldoc, filename):
""" Process an individual document."""
document = filename
# Strip trailing revision number from filename
just_pid = re.match("(autogen.\d+)\.\d", document)
if just_pid is not None:
document = just_pid.groups(0)[0]
# Map the filename to its PID if we have a map to go off of
if job.identifier_map is not None:
if document in job.identifier_map:
document = job.identifier_map[document]
# Null out the document PID if it's not public
if job.public_pids is not None:
if document not in job.public_pids:
document = ''
records = extractCreators(document, xmldoc)
if records is not None:
saveRecords(job, records)
def saveRecords(job, records):
"""Saves an array of records to disk, according to their filename"""
if records is None:
return
for record in records:
# Skip empty records
if 'type' not in record:
continue
if record['type'] == 'person':
job.writePerson(record)
# Add their organization too (if applicable)
if 'organization' in record and len(record['organization']) > 0:
org_record = {
'name': record['organization'],
'format': record['format'],
'source': record['source'],
'document': record['document']
}
job.writeOrganization(org_record)
elif record['type'] == 'organization':
job.writeOrganization(record)
| apache-2.0 |
lenarother/moderna | moderna/analyze/HBondCalculator.py | 1 | 6621 | #!/usr/bin/env python
#
# HBondCalculator.py
#
# Calculates hydrogen bonds between residues.
#
# http://iimcb.genesilico.pl/moderna/
#
__author__ = "Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Kristian Rother"
__email__ = "krother@genesilico.pl"
__status__ = "Prototype"
# -*- coding: cp1250 -*-
from Bio.PDB.Vector import calc_angle
from math import degrees
from Bio.PDB.Vector import Vector
from Constants import MAX_C1_DIST, MAXD1, MAXD2
from moderna.Constants import BACKBONE_RIBOSE_ATOMS
#TODO: bifurcated bonds
#TODO: different parameter sets
#TODO: h-bond tolerance depending on b-factor
def sq3_norm(a):
return a.norm()
class HBond:
def __init__(self,donor,acceptor,dist):
self.donor = donor
self.acceptor = acceptor
self.dist = dist
self.donor_resi = donor.parent
self.acceptor_resi = acceptor.parent
def is_primary(self):
"""Returns True if hbond fulfills criteria for primary hbond:
It should be a H-bond that occurs between donor and acceptor
atoms, both of which belong to a base, and with the distance
between the two atoms (N or O) <3.4A.
"""
if self.donor.name in BACKBONE_RIBOSE_ATOMS: return False
if self.acceptor.name in BACKBONE_RIBOSE_ATOMS: return False
if self.donor.name[0] == 'C' or self.acceptor.name[0] == 'C': return False
if self.dist >= 3.4: return False
return True
def is_secondary(self):
"""Returns True if hbond fulfills criteria for secondary hbond:
The second H-bond is determined with extended distances.
The distance between donor and acceptor
for base (N, O) - base (N, O)<3.75A,
for base (N, O) - base (CH) <3.9A
for base (N, O) - ribose (O20) <3.75A
"""
cutoff = 3.75
if self.donor.name[0] == 'C': cutoff = 3.9
if self.dist >= cutoff: return False
return True
def is_tertiary(self):
"""Returns True if hbond fulfills criteria for tertiary interaction hbond."""
return True
def __repr__(self):
return "%s-%s"%(self.donor.name,self.acceptor.name)
#return "%s%s-%s%s"%(self.donor.parent.resname,self.donor.name,self.acceptor.parent.resname,self.acceptor.name)
class HBondParameterCalculator:
"""Class that performs the vector calculations for an hbond."""
def __init__(self, donor, hydrogen, acceptor, acc_support):
"""Takes four sets of coord vectors."""
self.donor = Vector(donor)
self.acceptor = Vector(acceptor)
self.hydrogen = Vector(hydrogen)
self.acc_support = Vector(acc_support)
# helper vectors
self.dh = self.hydrogen-self.donor
self.ha = self.hydrogen-self.acceptor
self.acs = self.acc_support-self.acceptor
# calculate distances and angles
def _get_d1(self):
conn = self.acceptor-self.hydrogen
return conn.norm()
def _get_d2(self):
conn = self.donor-self.acceptor
return conn.norm()
def _get_alpha(self):
# SLOWER
#return PDB.calc_angle(Vector(self.donor), Vector(self.hydrogen), Vector(self.acceptor))* 180.0/pi
return degrees(calc_angle(self.donor, self.hydrogen, self.acceptor))
def _get_beta(self):
return degrees(calc_angle(self.hydrogen, self.acceptor, self.acc_support))
def _get_gamma(self):
return degrees(calc_angle(self.donor, self.hydrogen, self.acc_support ))
d1 = property(_get_d1)
d2 = property(_get_d2)
alpha = property(_get_alpha)
beta = property(_get_beta)
gamma = property(_get_gamma)
def is_valid(self):
"""
Returns boolean if the parameters are within range
Criteria set I (default values) according to Suehnel/HBExplore
d1 < 3.9 A, d2 < 2.5 A , a > 90, b > 90, c > 90.
There is a mistake in the figure. If you read the text you find
that d1 and d2 are mixed up! It should be d1<2.5 and d2<3.9
"""
if self.d2<MAXD2 and self.d1 < MAXD1 \
and self.alpha > 90 and self.beta>90 and self.gamma>90:
return True
def __repr__(self):
return """alpha:%5.1f beta:%5.1f gamma:%5.1f d1:%5.2f d2:%5.2f"""%(self.alpha, self.beta, self.gamma, self.d1, self.d2)
class HBondCalculator:
"""
Calculates hydrogen bonds out of pairs of Residue objects.
(moderna.ModernaResidue or Bio.PDB.Residue)
"""
def calc_hbond_list(self,res1,res2):
"""
Calculates and returns a list of HBond objects.
res1, res2 - Residue objects
"""
# shortcut: compare c1'-c1' distance first
c1dist = res1["C1'"]-res2["C1'"]
if c1dist > MAX_C1_DIST: return []
hbonds = []
for acceptor in res1.get_hbond_acceptors():
for donor in res2.get_hbond_donors():
hb = self.calc_hbond(donor,acceptor)
if hb: hbonds.append(hb)
for acceptor in res2.get_hbond_acceptors():
for donor in res1.get_hbond_donors():
hb = self.calc_hbond(donor,acceptor)
if hb: hbonds.append(hb)
return hbonds
def get_acceptor_support(self, acceptor):
"""
Returns coordinates of the support point behind the acceptor atom.
According to Fig.1 in the Suehnel HBExplore pare, this is A1 if there is
only one neighbor, and Am if there are two.
Atoms with 3 or more neighbors will be rejected.
"""
acc_support = None
neighbors = acceptor.parent.get_neighbors(acceptor)
if len(neighbors)==1:
acc_support = neighbors[0].coord
elif len(neighbors) == 2:
acc_support = (neighbors[0].coord + neighbors[1].coord ) * 0.5
return acc_support
def calc_hbond(self,donor,acceptor):
"""
Returns a HBond object for two atoms, or None.
donor, acceptor - Bio.PDB.Atom objects
"""
dist = donor-acceptor
if dist < MAXD2: #TODO: refactor this one out (checked twice)
acc_support = self.get_acceptor_support(acceptor)
if acc_support != None:
for hydrogen in donor.parent.get_donor_hydrogens(donor):
params = HBondParameterCalculator(donor.coord, hydrogen, acceptor.coord, acc_support)
if params.is_valid():
return HBond(donor,acceptor,dist)
| gpl-3.0 |
fritsvanveen/QGIS | python/ext-libs/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| gpl-2.0 |
nvoron23/statsmodels | statsmodels/stats/tests/test_corrpsd.py | 31 | 16765 | # -*- coding: utf-8 -*-
"""Tests for finding a positive semi-definite correlation or covariance matrix
Created on Mon May 27 12:07:02 2013
Author: Josef Perktold
"""
import numpy as np
import scipy.sparse as sparse
from numpy.testing import (assert_almost_equal, assert_allclose,
assert_equal)
from statsmodels.stats.correlation_tools import (
corr_nearest, corr_clipped, cov_nearest,
_project_correlation_factors, corr_nearest_factor, _spg_optim,
corr_thresholded, cov_nearest_factor_homog, FactoredPSDMatrix)
import warnings
def norm_f(x, y):
'''Frobenious norm (squared sum) of difference between two arrays
'''
d = ((x - y)**2).sum()
return np.sqrt(d)
class Holder(object):
pass
# R library Matrix results
cov1_r = Holder()
#> nc <- nearPD(pr, conv.tol = 1e-7, keepDiag = TRUE, doDykstra =FALSE, corr=TRUE)
#> cat_items(nc, prefix="cov1_r.")
cov1_r.mat = '''<S4 object of class structure("dpoMatrix", package = "Matrix")>'''
cov1_r.eigenvalues = np.array([
4.197315628646795, 0.7540460243978023, 0.5077608149667492,
0.3801267599652769, 0.1607508970775889, 4.197315628646795e-08
])
cov1_r.corr = '''TRUE'''
cov1_r.normF = 0.0743805226512533
cov1_r.iterations = 11
cov1_r.rel_tol = 8.288594638441735e-08
cov1_r.converged = '''TRUE'''
#> mkarray2(as.matrix(nc$mat), name="cov1_r.mat")
cov1_r.mat = np.array([
1, 0.487968018215892, 0.642651880010906, 0.4906386709070835,
0.6440990530811909, 0.8087111845493985, 0.487968018215892, 1,
0.5141147294352735, 0.2506688108312097, 0.672351311297074,
0.725832055882795, 0.642651880010906, 0.5141147294352735, 1,
0.596827778712154, 0.5821917790519067, 0.7449631633814129,
0.4906386709070835, 0.2506688108312097, 0.596827778712154, 1,
0.729882058012399, 0.772150225146826, 0.6440990530811909,
0.672351311297074, 0.5821917790519067, 0.729882058012399, 1,
0.813191720191944, 0.8087111845493985, 0.725832055882795,
0.7449631633814129, 0.772150225146826, 0.813191720191944, 1
]).reshape(6,6, order='F')
cov_r = Holder()
#nc <- nearPD(pr+0.01*diag(6), conv.tol = 1e-7, keepDiag = TRUE, doDykstra =FALSE, corr=FALSE)
#> cat_items(nc, prefix="cov_r.")
#cov_r.mat = '''<S4 object of class structure("dpoMatrix", package = "Matrix")>'''
cov_r.eigenvalues = np.array([
4.209897516692652, 0.7668341923072066, 0.518956980021938,
0.390838551407132, 0.1734728460460068, 4.209897516692652e-08
])
cov_r.corr = '''FALSE'''
cov_r.normF = 0.0623948693159157
cov_r.iterations = 11
cov_r.rel_tol = 5.83987595937896e-08
cov_r.converged = '''TRUE'''
#> mkarray2(as.matrix(nc$mat), name="cov_r.mat")
cov_r.mat = np.array([
1.01, 0.486207476951913, 0.6428524769306785, 0.4886092840296514,
0.645175579158233, 0.811533860074678, 0.486207476951913, 1.01,
0.514394615153752, 0.2478398278204047, 0.673852495852274,
0.7297661648968664, 0.6428524769306785, 0.514394615153752, 1.01,
0.5971503271420517, 0.582018469844712, 0.7445177382760834,
0.4886092840296514, 0.2478398278204047, 0.5971503271420517, 1.01,
0.73161232298669, 0.7766852947049376, 0.645175579158233,
0.673852495852274, 0.582018469844712, 0.73161232298669, 1.01,
0.8107916469252828, 0.811533860074678, 0.7297661648968664,
0.7445177382760834, 0.7766852947049376, 0.8107916469252828, 1.01
]).reshape(6,6, order='F')
def test_corr_psd():
# test positive definite matrix is unchanged
x = np.array([[1, -0.2, -0.9], [-0.2, 1, -0.2], [-0.9, -0.2, 1]])
y = corr_nearest(x, n_fact=100)
#print np.max(np.abs(x - y))
assert_almost_equal(x, y, decimal=14)
y = corr_clipped(x)
assert_almost_equal(x, y, decimal=14)
y = cov_nearest(x, n_fact=100)
assert_almost_equal(x, y, decimal=14)
x2 = x + 0.001 * np.eye(3)
y = cov_nearest(x2, n_fact=100)
assert_almost_equal(x2, y, decimal=14)
class CheckCorrPSDMixin(object):
def test_nearest(self):
x = self.x
res_r = self.res
y = corr_nearest(x, threshold=1e-7, n_fact=100)
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=3)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.0015)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals / res_r.eigenvalues[::-1] - 1
assert_allclose(evals, res_r.eigenvalues[::-1], rtol=0.003, atol=1e-7)
#print evals[0] / 1e-7 - 1
assert_allclose(evals[0], 1e-7, rtol=1e-6)
def test_clipped(self):
x = self.x
res_r = self.res
y = corr_clipped(x, threshold=1e-7)
#print np.max(np.abs(x - y)), np.max(np.abs((x - y) / y))
assert_almost_equal(y, res_r.mat, decimal=1)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.15)
evals = np.linalg.eigvalsh(y)
assert_allclose(evals, res_r.eigenvalues[::-1], rtol=0.1, atol=1e-7)
assert_allclose(evals[0], 1e-7, rtol=0.02)
def test_cov_nearest(self):
x = self.x
res_r = self.res
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y = cov_nearest(x, method='nearest', threshold=1e-7)
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=2)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.0015)
class TestCovPSD(object):
@classmethod
def setup_class(cls):
x = np.array([ 1, 0.477, 0.644, 0.478, 0.651, 0.826,
0.477, 1, 0.516, 0.233, 0.682, 0.75,
0.644, 0.516, 1, 0.599, 0.581, 0.742,
0.478, 0.233, 0.599, 1, 0.741, 0.8,
0.651, 0.682, 0.581, 0.741, 1, 0.798,
0.826, 0.75, 0.742, 0.8, 0.798, 1]).reshape(6,6)
cls.x = x + 0.01 * np.eye(6)
cls.res = cov_r
def test_cov_nearest(self):
x = self.x
res_r = self.res
y = cov_nearest(x, method='nearest')
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=3)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.001)
y = cov_nearest(x, method='clipped')
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=2)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.15)
class TestCorrPSD1(CheckCorrPSDMixin):
@classmethod
def setup_class(cls):
x = np.array([ 1, 0.477, 0.644, 0.478, 0.651, 0.826,
0.477, 1, 0.516, 0.233, 0.682, 0.75,
0.644, 0.516, 1, 0.599, 0.581, 0.742,
0.478, 0.233, 0.599, 1, 0.741, 0.8,
0.651, 0.682, 0.581, 0.741, 1, 0.798,
0.826, 0.75, 0.742, 0.8, 0.798, 1]).reshape(6,6)
cls.x = x
cls.res = cov1_r
def test_corrpsd_threshold():
x = np.array([[1, -0.9, -0.9], [-0.9, 1, -0.9], [-0.9, -0.9, 1]])
#print np.linalg.eigvalsh(x)
for threshold in [0, 1e-15, 1e-10, 1e-6]:
y = corr_nearest(x, n_fact=100, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
assert_allclose(evals[0], threshold, rtol=1e-6, atol=1e-15)
y = corr_clipped(x, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
assert_allclose(evals[0], threshold, rtol=0.25, atol=1e-15)
y = cov_nearest(x, method='nearest', n_fact=100, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
#print evals[0] / threshold - 1
assert_allclose(evals[0], threshold, rtol=1e-6, atol=1e-15)
y = cov_nearest(x, n_fact=100, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
#print evals[0] / threshold - 1
assert_allclose(evals[0], threshold, rtol=0.25, atol=1e-15)
class Test_Factor(object):
def test_corr_nearest_factor_arrpack(self):
# regression results for svds call
u2 = np.array([[
6.39407581e-19, 9.15225947e-03, 1.82631698e-02,
2.72917181e-02, 3.61975557e-02, 4.49413101e-02,
5.34848732e-02, 6.17916613e-02, 6.98268388e-02,
7.75575058e-02, 8.49528448e-02, 9.19842264e-02,
9.86252769e-02, 1.04851906e-01, 1.10642305e-01,
1.15976906e-01, 1.20838331e-01, 1.25211306e-01,
1.29082570e-01, 1.32440778e-01, 1.35276397e-01,
1.37581605e-01, 1.39350201e-01, 1.40577526e-01,
1.41260396e-01, 1.41397057e-01, 1.40987160e-01,
1.40031756e-01, 1.38533306e-01, 1.36495727e-01,
1.33924439e-01, 1.30826443e-01, 1.27210404e-01,
1.23086750e-01, 1.18467769e-01, 1.13367717e-01,
1.07802909e-01, 1.01791811e-01, 9.53551023e-02,
8.85157320e-02, 8.12989329e-02, 7.37322125e-02,
6.58453049e-02, 5.76700847e-02, 4.92404406e-02,
4.05921079e-02, 3.17624629e-02, 2.27902803e-02,
1.37154584e-02, 4.57871801e-03, -4.57871801e-03,
-1.37154584e-02, -2.27902803e-02, -3.17624629e-02,
-4.05921079e-02, -4.92404406e-02, -5.76700847e-02,
-6.58453049e-02, -7.37322125e-02, -8.12989329e-02,
-8.85157320e-02, -9.53551023e-02, -1.01791811e-01,
-1.07802909e-01, -1.13367717e-01, -1.18467769e-01,
-1.23086750e-01, -1.27210404e-01, -1.30826443e-01,
-1.33924439e-01, -1.36495727e-01, -1.38533306e-01,
-1.40031756e-01, -1.40987160e-01, -1.41397057e-01,
-1.41260396e-01, -1.40577526e-01, -1.39350201e-01,
-1.37581605e-01, -1.35276397e-01, -1.32440778e-01,
-1.29082570e-01, -1.25211306e-01, -1.20838331e-01,
-1.15976906e-01, -1.10642305e-01, -1.04851906e-01,
-9.86252769e-02, -9.19842264e-02, -8.49528448e-02,
-7.75575058e-02, -6.98268388e-02, -6.17916613e-02,
-5.34848732e-02, -4.49413101e-02, -3.61975557e-02,
-2.72917181e-02, -1.82631698e-02, -9.15225947e-03,
-3.51829569e-17]]).T
s2 = np.array([ 24.88812183])
d = 100
dm = 1
# Construct a test matrix with exact factor structure
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
for j in range(dm):
X[:,j] = np.sin(x*(j+1))
_project_correlation_factors(X)
X *= 0.7
mat = np.dot(X, X.T)
np.fill_diagonal(mat, 1.)
from scipy.sparse.linalg import svds
u, s, vt = svds(mat, dm)
#difference in sign
dsign = np.sign(u[1]) * np.sign(u2[1])
assert_allclose(u, dsign * u2, rtol=1e-6, atol=1e-14)
assert_allclose(s, s2, rtol=1e-6)
def test_corr_nearest_factor(self):
objvals = [np.array([6241.8, 6241.8, 579.4, 264.6, 264.3]),
np.array([2104.9, 2104.9, 710.5, 266.3, 286.1])]
d = 100
for dm in 1,2:
# Construct a test matrix with exact factor structure
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
np.random.seed(10)
for j in range(dm):
X[:,j] = np.sin(x*(j+1)) + 1e-10 * np.random.randn(d)
_project_correlation_factors(X)
assert np.isfinite(X).all()
X *= 0.7
mat = np.dot(X, X.T)
np.fill_diagonal(mat, 1.)
# Try to recover the structure
rslt = corr_nearest_factor(mat, dm)
err_msg = 'rank=%d, niter=%d' % (dm, len(rslt.objective_values))
assert_allclose(rslt.objective_values[:5], objvals[dm - 1],
rtol=0.5, err_msg=err_msg)
assert_equal(rslt.Converged, True, err_msg=err_msg)
mat1 = rslt.corr.to_matrix()
assert_allclose(mat, mat1, rtol=0.25, atol=1e-3, err_msg=err_msg)
# Test that we get the same result if the input is dense or sparse
def test_corr_nearest_factor_sparse(self):
d = 100
for dm in 1,2:
# Generate a test matrix of factors
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
np.random.seed(10)
for j in range(dm):
X[:,j] = np.sin(x*(j+1)) + 1e-10 * np.random.randn(d)
# Get the correlation matrix
_project_correlation_factors(X)
X *= 0.7
mat = np.dot(X, X.T)
np.fill_diagonal(mat, 1)
# Threshold it
mat *= (np.abs(mat) >= 0.4)
smat = sparse.csr_matrix(mat)
rslt = corr_nearest_factor(smat, dm)
assert_equal(rslt.Converged, True)
mat_dense = rslt.corr.to_matrix()
rslt = corr_nearest_factor(smat, dm)
assert_equal(rslt.Converged, True)
mat_sparse = rslt.corr.to_matrix()
assert_allclose(mat_dense, mat_sparse, rtol=0.25,
atol=1e-3)
# Test on a quadratic function.
def test_spg_optim(self):
dm = 100
ind = np.arange(dm)
indmat = np.abs(ind[:,None] - ind[None,:])
M = 0.8**indmat
def obj(x):
return np.dot(x, np.dot(M, x))
def grad(x):
return 2*np.dot(M, x)
def project(x):
return x
x = np.random.normal(size=dm)
rslt = _spg_optim(obj, grad, x, project)
xnew = rslt.params
assert_equal(rslt.Converged, True)
assert_almost_equal(obj(xnew), 0, decimal=3)
def test_decorrelate(self):
d = 30
dg = np.linspace(1, 2, d)
root = np.random.normal(size=(d, 4))
fac = FactoredPSDMatrix(dg, root)
mat = fac.to_matrix()
rmat = np.linalg.cholesky(mat)
dcr = fac.decorrelate(rmat)
idm = np.dot(dcr, dcr.T)
assert_almost_equal(idm, np.eye(d))
rhs = np.random.normal(size=(d, 5))
mat2 = np.dot(rhs.T, np.linalg.solve(mat, rhs))
mat3 = fac.decorrelate(rhs)
mat3 = np.dot(mat3.T, mat3)
assert_almost_equal(mat2, mat3)
def test_logdet(self):
d = 30
dg = np.linspace(1, 2, d)
root = np.random.normal(size=(d, 4))
fac = FactoredPSDMatrix(dg, root)
mat = fac.to_matrix()
_, ld = np.linalg.slogdet(mat)
ld2 = fac.logdet()
assert_almost_equal(ld, ld2)
def test_solve(self):
d = 30
dg = np.linspace(1, 2, d)
root = np.random.normal(size=(d, 2))
fac = FactoredPSDMatrix(dg, root)
rhs = np.random.normal(size=(d, 5))
sr1 = fac.solve(rhs)
mat = fac.to_matrix()
sr2 = np.linalg.solve(mat, rhs)
assert_almost_equal(sr1, sr2)
def test_cov_nearest_factor_homog(self):
d = 100
for dm in 1,2:
# Construct a test matrix with exact factor structure
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
for j in range(dm):
X[:,j] = np.sin(x*(j+1))
mat = np.dot(X, X.T)
np.fill_diagonal(mat, np.diag(mat) + 3.1)
# Try to recover the structure
rslt = cov_nearest_factor_homog(mat, dm)
mat1 = rslt.to_matrix()
assert_allclose(mat, mat1, rtol=0.25, atol=1e-3)
# Check that dense and sparse inputs give the same result
def test_cov_nearest_factor_homog_sparse(self):
d = 100
for dm in 1,2:
# Construct a test matrix with exact factor structure
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
for j in range(dm):
X[:,j] = np.sin(x*(j+1))
mat = np.dot(X, X.T)
np.fill_diagonal(mat, np.diag(mat) + 3.1)
# Fit to dense
rslt = cov_nearest_factor_homog(mat, dm)
mat1 = rslt.to_matrix()
# Fit to sparse
smat = sparse.csr_matrix(mat)
rslt = cov_nearest_factor_homog(smat, dm)
mat2 = rslt.to_matrix()
assert_allclose(mat1, mat2, rtol=0.25, atol=1e-3)
def test_corr_thresholded(self):
import datetime
t1 = datetime.datetime.now()
X = np.random.normal(size=(2000,10))
tcor = corr_thresholded(X, 0.2, max_elt=4e6)
t2 = datetime.datetime.now()
ss = (t2-t1).seconds
fcor = np.corrcoef(X)
fcor *= (np.abs(fcor) >= 0.2)
assert_allclose(tcor.todense(), fcor, rtol=0.25, atol=1e-3)
| bsd-3-clause |
PhonologicalCorpusTools/CorpusTools | tests/test_gui_views.py | 1 | 1683 |
from corpustools.gui.views import *
from corpustools.gui.models import CorpusModel
def test_discourse_view(qtbot):
widget = DiscourseView()
qtbot.addWidget(widget)
def test_lexicon_view(qtbot, unspecified_test_corpus, settings):
widget = LexiconView()
model = CorpusModel(unspecified_test_corpus, settings)
qtbot.addWidget(widget)
qtbot.addWidget(model)
widget.setModel(model)
widget.search()
assert(len(widget.table.selectionModel().selectedRows()) == 0)
widget.searchField.setText('ma')
widget.search()
assert(len(widget.table.selectionModel().selectedRows()) == 1)
assert(widget.table.selectionModel().selectedRows()[0].row() == 0)
widget.search()
assert(len(widget.table.selectionModel().selectedRows()) == 1)
assert(widget.table.selectionModel().selectedRows()[0].row() == 2)
widget.searchField.setText('matemma')
widget.search()
assert(len(widget.table.selectionModel().selectedRows()) == 0)
w = model.wordObject(0)
widget.highlightType(w)
assert(len(widget.table.selectionModel().selectedRows()) == 1)
assert(widget.table.selectionModel().selectedRows()[0].row() == 0)
#def test_phono_search_results():
# widget = PhonoSearchResults()
#qtbot.addWidget(widget)
def test_tree_widget(qtbot):
widget = TreeWidget()
qtbot.addWidget(widget)
def test_table_widget(qtbot):
widget = TableWidget()
qtbot.addWidget(widget)
def test_text_view(qtbot):
widget = TextView()
qtbot.addWidget(widget)
def test_variant_view(qtbot, unspecified_test_corpus):
w = unspecified_test_corpus['atema']
widget = VariantView(None, w)
qtbot.addWidget(widget)
| bsd-3-clause |
barbuza/django | django/conf/locale/sv/formats.py | 504 | 1569 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y', # '10/25/06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
jonathonwalz/ansible | lib/ansible/modules/network/junos/junos_vlan.py | 2 | 5010 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: junos_vlan
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage VLANs on Juniper JUNOS network devices
description:
- This module provides declarative management of VLANs
on Juniper JUNOS network devices.
options:
name:
description:
- Name of the VLAN.
required: true
vlan_id:
description:
- ID of the VLAN.
required: true
description:
description:
- Text description of VLANs.
interfaces:
description:
- List of interfaces to check the VLAN has been
configured correctly.
collection:
description: List of VLANs definitions.
purge:
description:
- Purge VLANs not defined in the collections parameter.
default: no
state:
description:
- State of the VLAN configuration.
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
choices: [True, False]
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed
"""
EXAMPLES = """
- name: configure VLAN ID and name
junos_vlan:
vlan_name: test
vlan_id: 20
name: test-vlan
- name: remove VLAN configuration
junos_vlan:
vlan_name: test
state: absent
- name: deactive VLAN configuration
junos_vlan:
vlan_name: test
state: present
active: False
- name: activate VLAN configuration
junos_vlan:
vlan_name: test
state: present
active: True
"""
RETURN = """
rpc:
description: load-configuration RPC send to the device
returned: when configuration is changed on device
type: string
sample: "<vlans><vlan><name>test-vlan-4</name></vlan></vlans>"
"""
import collections
from ansible.module_utils.junos import junos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.junos import load_config, map_params_to_obj, map_obj_to_ele
try:
from lxml.etree import tostring
except ImportError:
from xml.etree.ElementTree import tostring
USE_PERSISTENT_CONNECTION = True
def validate_vlan_id(value, module):
if not 1 <= value <= 4094:
module.fail_json(msg='vlan_id must be between 1 and 4094')
def validate_param_values(module, obj):
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(module.params.get(key), module)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
name=dict(required=True),
vlan_id=dict(required=True, type='int'),
description=dict(),
interfaces=dict(),
collection=dict(),
purge=dict(default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent']),
active=dict(default=True, type='bool')
)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'vlans/vlan'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('name', {'xpath': 'name', 'is_key': True}),
('vlan_id', 'vlan-id'),
('description', 'description')
])
validate_param_values(module, param_to_xpath_map)
want = list()
want.append(map_params_to_obj(module, param_to_xpath_map))
ele = map_obj_to_ele(module, want, top)
kwargs = {'commit': not module.check_mode}
kwargs['action'] = 'replace'
diff = load_config(module, tostring(ele), warnings, **kwargs)
if diff:
result.update({
'changed': True,
'diff': {'prepared': diff},
'rpc': tostring(ele)
})
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
mgedmin/ansible | lib/ansible/playbook/task_include.py | 92 | 2704 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.task import Task
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskInclude']
class TaskInclude(Task):
"""
A task include is derived from a regular task to handle the special
circumstances related to the `- include: ...` task.
"""
# =================================================================================
# ATTRIBUTES
_static = FieldAttribute(isa='bool', default=None)
def __init__(self, block=None, role=None, task_include=None):
super(TaskInclude, self).__init__(block=block, role=role, task_include=task_include)
self.statically_loaded = False
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = TaskInclude(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(TaskInclude, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
new_me.statically_loaded = self.statically_loaded
return new_me
def get_vars(self):
'''
We override the parent Task() classes get_vars here because
we need to include the args of the include into the vars as
they are params to the included tasks.
'''
all_vars = dict()
if self._parent:
all_vars.update(self._parent.get_vars())
all_vars.update(self.vars)
all_vars.update(self.args)
if 'tags' in all_vars:
del all_vars['tags']
if 'when' in all_vars:
del all_vars['when']
return all_vars
| gpl-3.0 |
keeeener/nicki | platform/external/webkit/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py | 15 | 3786 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import unittest
from webkitpy.common.net.buildbot import Builder
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.bot.sheriff import Sheriff
from webkitpy.tool.mocktool import MockTool
class MockSheriffBot(object):
name = "mock-sheriff-bot"
watchers = [
"watcher@example.com",
]
def run_webkit_patch(self, args):
return "Created bug https://bugs.webkit.org/show_bug.cgi?id=36936\n"
class SheriffTest(unittest.TestCase):
def test_post_blame_comment_on_bug(self):
def run():
sheriff = Sheriff(MockTool(), MockSheriffBot())
builders = [
Builder("Foo", None),
Builder("Bar", None),
]
commit_info = Mock()
commit_info.bug_id = lambda: None
commit_info.revision = lambda: 4321
# Should do nothing with no bug_id
sheriff.post_blame_comment_on_bug(commit_info, builders, [])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
# Should try to post a comment to the bug, but MockTool.bugs does nothing.
commit_info.bug_id = lambda: 1234
sheriff.post_blame_comment_on_bug(commit_info, builders, [])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1"])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
expected_stderr = u"""MOCK bug comment: bug_id=1234, cc=['watcher@example.com']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
--- End comment ---
MOCK bug comment: bug_id=1234, cc=['watcher@example.com']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
The following tests are not passing:
mock-test-1
--- End comment ---
MOCK bug comment: bug_id=1234, cc=['watcher@example.com']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
The following tests are not passing:
mock-test-1
mock-test-2
--- End comment ---
"""
OutputCapture().assert_outputs(self, run, expected_stderr=expected_stderr)
| gpl-2.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/manifold/mds.py | 18 | 15138 | """
Multi-dimensional Scaling (MDS)
"""
# author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
import numpy as np
import warnings
from ..base import BaseEstimator
from ..metrics import euclidean_distances
from ..utils import check_random_state, check_array, check_symmetric
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..isotonic import IsotonicRegression
def _smacof_single(similarities, metric=True, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None):
"""
Computes multidimensional scaling using SMACOF algorithm
Parameters
----------
similarities: symmetric ndarray, shape [n * n]
similarities between the points
metric: boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components: int, optional, default: 2
number of dimension in which to immerse the similarities
overwritten if initial array is provided.
init: {None or ndarray}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
max_iter: int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose: int, optional, default: 0
level of verbosity
eps: float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
X: ndarray (n_samples, n_components), float
coordinates of the n_samples points in a n_components-space
stress_: float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
Number of iterations run.
"""
similarities = check_symmetric(similarities, raise_exception=True)
n_samples = similarities.shape[0]
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = similarities
else:
dis_flat = dis.ravel()
# similarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
# Compute stress
stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
# Update X using the Guttman transform
dis[dis == 0] = 1e-5
ratio = disparities / dis
B = - ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1. / n_samples * np.dot(B, X)
dis = np.sqrt((X ** 2).sum(axis=1)).sum()
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X, stress, it + 1
def smacof(similarities, metric=True, n_components=2, init=None, n_init=8,
n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state=None,
return_n_iter=False):
"""
Computes multidimensional scaling using SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
The SMACOF algorithm is a multidimensional scaling algorithm: it minimizes
a objective function, the *stress*, using a majorization technique. The
Stress Majorization, also known as the Guttman Transform, guarantees a
monotone convergence of Stress, and is more powerful than traditional
techniques such as gradient descent.
The SMACOF algorithm for metric MDS can summarized by the following steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression steps before computing
the stress.
Parameters
----------
similarities : symmetric ndarray, shape (n_samples, n_samples)
similarities between the points
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
init : {None or ndarray of shape (n_samples, n_components)}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
n_init : int, optional, default: 8
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
X : ndarray (n_samples,n_components)
Coordinates of the n_samples points in a n_components-space
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
The number of iterations corresponding to the best stress.
Returned only if `return_n_iter` is set to True.
Notes
-----
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
similarities = check_array(similarities)
random_state = check_random_state(random_state)
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of %d'
% n_init)
n_init = 1
best_pos, best_stress = None, None
if n_jobs == 1:
for it in range(n_init):
pos, stress, n_iter_ = _smacof_single(
similarities, metric=metric,
n_components=n_components, init=init,
max_iter=max_iter, verbose=verbose,
eps=eps, random_state=random_state)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single)(
similarities, metric=metric, n_components=n_components,
init=init, max_iter=max_iter, verbose=verbose, eps=eps,
random_state=seed)
for seed in seeds)
positions, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos, best_stress, best_iter
else:
return best_pos, best_stress
class MDS(BaseEstimator):
"""Multidimensional scaling
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
n_init : int, optional, default: 4
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
dissimilarity : string
Which dissimilarity measure to use.
Supported are 'euclidean' and 'precomputed'.
Attributes
----------
embedding_ : array-like, shape [n_components, n_samples]
Stores the position of the dataset in the embedding space
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
References
----------
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
def __init__(self, n_components=2, metric=True, n_init=4,
max_iter=300, verbose=0, eps=1e-3, n_jobs=1,
random_state=None, dissimilarity="euclidean"):
self.n_components = n_components
self.dissimilarity = dissimilarity
self.metric = metric
self.n_init = n_init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, init=None):
"""
Computes the position of the points in the embedding space
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
self.fit_transform(X, init=init)
return self
def fit_transform(self, X, y=None, init=None):
"""
Fit the data from X, and returns the embedded coordinates
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
X = check_array(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs an"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity='precomputed'``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix_ = X
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix_ = euclidean_distances(X)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
self.embedding_, self.stress_, self.n_iter_ = smacof(
self.dissimilarity_matrix_, metric=self.metric,
n_components=self.n_components, init=init, n_init=self.n_init,
n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,
eps=self.eps, random_state=self.random_state,
return_n_iter=True)
return self.embedding_
| mit |
glastopf/conpot | conpot/core/virtual_fs.py | 1 | 7135 | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import os
import sys
import fs
import conpot
from fs import open_fs, errors, subfs
from conpot.core.filesystem import AbstractFS, SubAbstractFS
logger = logging.getLogger(__name__)
class VirtualFS(object):
"""
Conpot's virtual file system. Based on Pyfilesystem2, it would allow us to have
arbitrary file uploads while sand boxing them for later analysis. This is how it should look like:
[_conpot_vfs]
|
|-- data_fs (persistent)
| |-- ftp/uploads
| `-- misc.
|
`-- protocol_fs (temporary, refreshed at startup)
|-- common
|-- telnet
|-- http
|-- snmp
`-- ftp etc.
:param data_fs_path: Path for storing data_fs. A dictionary with attribute name _protocol_vfs stores all the
fs folders made by all the individual protocols.
:type data_fs_path: fs.open_fs
"""
def __init__(self, data_fs_path=None):
self._conpot_vfs = dict() # dictionary to keep all the protocol vfs instances, maintain easy access for
# individual mounted protocols with paths
if data_fs_path is None:
try:
self.data_fs = open_fs(os.path.join('/'.join(conpot.__file__.split('/')[:-1]), 'tests', 'data',
'data_temp_fs'))
except fs.errors.FSError:
logger.exception('Unable to create persistent storage for Conpot. Exiting')
sys.exit(3)
else:
try:
assert data_fs_path and isinstance(data_fs_path, str)
self.data_fs = open_fs(data_fs_path) # Specify the place where you would place the uploads
except AssertionError:
logger.exception('Incorrect FS url specified. Please check documentation for more details.')
sys.exit(3)
except fs.errors.CreateFailed:
logger.exception('Unexpected error occurred while creating Conpot FS.')
sys.exit(3)
self.protocol_fs = None
def initialize_vfs(self, fs_path=None, data_fs_path=None, temp_dir=None):
if data_fs_path is not None:
logger.info('Opening path {} for persistent storage of files.'.format(data_fs_path))
self.__init__(data_fs_path=data_fs_path)
if fs_path is None:
fs_path = 'tar://' + os.path.join('/'.join(conpot.__file__.split('/')[:-1]), 'data.tar')
logger.warning('Using default FS path. {}'.format(fs_path))
self.protocol_fs = AbstractFS(src_path=fs_path, temp_dir=temp_dir)
def add_protocol(self,
protocol_name: str,
data_fs_subdir: str,
vfs_dst_path: str,
src_path=None,
owner_uid=0,
group_gid=0,
perms=0o755) -> (SubAbstractFS, subfs.SubFS):
"""
Method that would be used by protocols to initialize vfs. May be called by each protocol individually. This
creates a chroot jail sub file system env which makes easier handling. It also creates a data_fs sub file system
for managing protocol specific uploads.
:param protocol_name: name of the protocol for which VFS is being created.
:param data_fs_subdir: sub-folder name within data_fs that would be storing the uploads for later analysis
:param vfs_dst_path: protocol specific sub-folder path in the fs.
:param src_path: Source from where the files are to copied.
:param owner_uid: UID of a registered user. This is the default owner in the sub file system
:param group_gid: GID of a existing group.
:param perms: Default permissions of the sub file system.
:return: fs object
**Note:** The owner_uid and group_gid must be already registered with the fs. Otherwise an exception
would be raised.
"""
assert isinstance(protocol_name, str) and protocol_name
assert isinstance(data_fs_subdir, str) and data_fs_subdir
assert isinstance(vfs_dst_path, str) and vfs_dst_path
if src_path:
assert isinstance(src_path, str)
if not os.path.isdir(src_path):
logger.exception('Protocol directory is not a valid directory.')
sys.exit(3)
logger.info('Creating persistent data store for protocol: {}'.format(protocol_name))
# create a sub directory for persistent storage.
if self.data_fs.isdir(data_fs_subdir):
sub_data_fs = self.data_fs.opendir(path=data_fs_subdir)
else:
sub_data_fs = self.data_fs.makedir(path=data_fs_subdir)
if protocol_name not in self._conpot_vfs.keys():
sub_protocol_fs = self.protocol_fs.mount_fs(vfs_dst_path,
src_path,
owner_uid,
group_gid,
perms)
self._conpot_vfs[protocol_name] = (sub_protocol_fs, sub_data_fs)
return self._conpot_vfs[protocol_name]
def close(self, force=False):
"""
Close the filesystem properly. Better and more graceful than __del__
:param force: Force close. This would close the AbstractFS instance - without close closing data_fs File Systems
"""
if self._conpot_vfs and (not force):
for _fs in self._conpot_vfs.keys():
try:
# First let us close all the data_fs instances.
self._conpot_vfs[_fs][1].close()
# Let us close the protocol_fs sub dirs for that protocol
self._conpot_vfs[_fs][0].close()
except fs.errors.FSError:
logger.exception('Error occurred while closing FS {}'.format(_fs))
del self._conpot_vfs[_fs][0]
self.protocol_fs.close()
self.protocol_fs.clean() | gpl-2.0 |
Spiderlover/Toontown | toontown/town/TownBattleSOSPetSearchPanel.py | 5 | 1541 | from pandac.PandaModules import *
from direct.fsm import StateData
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
class TownBattleSOSPetSearchPanel(StateData.StateData):
def __init__(self, doneEvent):
StateData.StateData.__init__(self, doneEvent)
def load(self):
gui = loader.loadModel('phase_3.5/models/gui/battle_gui')
self.frame = DirectFrame(relief=None, image=gui.find('**/Waiting4Others'), text_align=TextNode.ALeft, pos=(0, 0, 0), scale=0.65)
self.frame.hide()
self.backButton = DirectButton(parent=self.frame, relief=None, image=(gui.find('**/PckMn_BackBtn'), gui.find('**/PckMn_BackBtn_Dn'), gui.find('**/PckMn_BackBtn_Rlvr')), pos=(-0.647, 0, -0.011), scale=1.05, text=TTLocalizer.TownBattleWaitBack, text_scale=0.05, text_pos=(0.01, -0.012), text_fg=Vec4(0, 0, 0.8, 1), command=self.__handleBack)
gui.removeNode()
return
def unload(self):
self.frame.destroy()
del self.frame
del self.backButton
def enter(self, petId, petName):
self.petId = petId
self.petName = petName
self.frame['text'] = TTLocalizer.TownBattleSOSPetSearchTitle % petName
self.frame['text_pos'] = (0, 0.01, 0)
self.frame['text_scale'] = TTLocalizer.TBSOSPSPenter
self.frame.show()
def exit(self):
self.frame.hide()
def __handleBack(self):
doneStatus = {'mode': 'Back'}
messenger.send(self.doneEvent, [doneStatus])
| mit |
skg-net/ansible | lib/ansible/modules/network/nxos/nxos_vrf_interface.py | 68 | 7978 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrf_interface
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages interface specific VRF configuration.
description:
- Manages interface specific VRF configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- VRF needs to be added globally with M(nxos_vrf) before
adding a VRF to an interface.
- Remove a VRF from an interface will still remove
all L3 attributes just as it does from CLI.
- VRF is not read from an interface until IP address is
configured on that interface.
options:
vrf:
description:
- Name of VRF to be managed.
required: true
interface:
description:
- Full name of interface to be managed, i.e. Ethernet1/1.
required: true
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure vrf ntc exists on Eth1/1
nxos_vrf_interface:
vrf: ntc
interface: Ethernet1/1
state: present
- name: Ensure ntc VRF does not exist on Eth1/1
nxos_vrf_interface:
vrf: ntc
interface: Ethernet1/1
state: absent
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface loopback16", "vrf member ntc"]
'''
import re
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.network.nxos.nxos import get_interface_type
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
cmds = [{
'command': command,
'output': output,
}]
return run_commands(module, cmds)[0]
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)
try:
interface_table = body['TABLE_interface']['ROW_interface']
except KeyError:
return mode
if interface_table and isinstance(interface_table, dict):
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
else:
return mode
elif intf_type == 'loopback' or intf_type == 'svi':
mode = 'layer3'
return mode
def get_vrf_list(module):
command = 'show vrf all'
vrf_list = []
body = execute_show_command(command, module)
try:
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (KeyError, AttributeError):
return vrf_list
for each in vrf_table:
vrf_list.append(str(each['vrf_name']))
return vrf_list
def get_interface_info(interface, module):
if not interface.startswith('loopback'):
interface = interface.capitalize()
command = 'show run interface {0}'.format(interface)
vrf_regex = r".*vrf\s+member\s+(?P<vrf>\S+).*"
try:
body = execute_show_command(command, module)
match_vrf = re.match(vrf_regex, body, re.DOTALL)
group_vrf = match_vrf.groupdict()
vrf = group_vrf["vrf"]
except (AttributeError, TypeError):
return ""
return vrf
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError, IndexError):
return 'DNE'
def main():
argument_spec = dict(
vrf=dict(required=True),
interface=dict(type='str', required=True),
state=dict(default='present', choices=['present', 'absent'], required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
results = {'changed': False, 'commands': [], 'warnings': warnings}
vrf = module.params['vrf']
interface = module.params['interface'].lower()
state = module.params['state']
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
current_vrfs = get_vrf_list(module)
if vrf not in current_vrfs:
warnings.append("The VRF is not present/active on the device. "
"Use nxos_vrf to fix this.")
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and network_api == 'cliconf'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg="interface does not exist on switch. Verify "
"switch platform or create it first with "
"nxos_interface if it's a logical interface")
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='Ensure interface is a Layer 3 port before '
'configuring a VRF on an interface. You can '
'use nxos_interface')
proposed = dict(interface=interface, vrf=vrf)
current_vrf = get_interface_info(interface, module)
existing = dict(interface=interface, vrf=current_vrf)
changed = False
end_state = existing
if not existing['vrf']:
pass
elif vrf != existing['vrf'] and state == 'absent':
module.fail_json(msg='The VRF you are trying to remove '
'from the interface does not exist '
'on that interface.',
interface=interface, proposed_vrf=vrf,
existing_vrf=existing['vrf'])
commands = []
if existing:
if state == 'absent':
if existing and vrf == existing['vrf']:
command = 'no vrf member {0}'.format(vrf)
commands.append(command)
elif state == 'present':
if existing['vrf'] != vrf:
command = 'vrf member {0}'.format(vrf)
commands.append(command)
if commands:
commands.insert(0, 'interface {0}'.format(interface))
if commands:
if module.check_mode:
module.exit_json(changed=True, commands=commands)
else:
load_config(module, commands)
changed = True
changed_vrf = get_interface_info(interface, module)
end_state = dict(interface=interface, vrf=changed_vrf)
if 'configure' in commands:
commands.pop(0)
results['commands'] = commands
results['changed'] = changed
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
Khan/python-phabricator | phabricator/tests/test_phabricator.py | 1 | 4151 | try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import unittest.mock as mock
except ImportError:
import mock
from pkg_resources import resource_string
import json
import phabricator
RESPONSES = json.loads(
resource_string(
'phabricator.tests.resources',
'responses.json'
).decode('utf8')
)
CERTIFICATE = resource_string(
'phabricator.tests.resources',
'certificate.txt'
).decode('utf8').strip()
# Protect against local user's .arcrc interference.
phabricator.ARCRC = {}
class PhabricatorTest(unittest.TestCase):
def setUp(self):
self.api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost'
)
self.api.certificate = CERTIFICATE
def test_generate_hash(self):
token = '12345678'
hashed = self.api.generate_hash(token)
self.assertEqual(hashed, 'f8d3bea4e58a2b2967d93d5b307bfa7c693b2e7f')
@mock.patch('phabricator.httplib.HTTPConnection')
def test_connect(self, mock_connection):
mock_obj = mock_connection.return_value = mock.Mock()
mock_obj.getresponse.return_value = StringIO(
RESPONSES['conduit.connect']
)
mock_obj.getresponse.return_value.status = 200
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost'
)
api.connect()
keys = api._conduit.keys()
self.assertIn('sessionKey', keys)
self.assertIn('connectionID', keys)
@mock.patch('phabricator.httplib.HTTPConnection')
def test_user_whoami(self, mock_connection):
mock_obj = mock_connection.return_value = mock.Mock()
mock_obj.getresponse.return_value = StringIO(RESPONSES['user.whoami'])
mock_obj.getresponse.return_value.status = 200
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost'
)
api._conduit = True
self.assertEqual(api.user.whoami()['userName'], 'testaccount')
@mock.patch('phabricator.httplib.HTTPConnection')
def test_bad_status(self, mock_connection):
mock_obj = mock_connection.return_value = mock.Mock()
mock_obj.getresponse.return_value = mock.Mock()
mock_obj.getresponse.return_value.status = 400
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost'
)
api._conduit = True
with self.assertRaises(phabricator.httplib.HTTPException):
api.user.whoami()
@mock.patch('phabricator.httplib.HTTPConnection')
def test_maniphest_find(self, mock_connection):
mock_obj = mock_connection.return_value = mock.Mock()
mock_obj.getresponse.return_value = StringIO(
RESPONSES['maniphest.find']
)
mock_obj.getresponse.return_value.status = 200
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost'
)
api._conduit = True
result = api.maniphest.find(
ownerphids=['PHID-USER-5022a9389121884ab9db']
)
self.assertEqual(len(result), 1)
# Test iteration
self.assertIsInstance([x for x in result], list)
# Test getattr
self.assertEqual(
result['PHID-TASK-4cgpskv6zzys6rp5rvrc']['status'],
'3'
)
def test_validation(self):
self.api._conduit = True
self.assertRaises(ValueError, self.api.differential.find)
with self.assertRaises(ValueError):
self.api.differential.find(query=1)
with self.assertRaises(ValueError):
self.api.differential.find(query='1')
with self.assertRaises(ValueError):
self.api.differential.find(query='1', guids='1')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
konstruktoid/ansible-upstream | lib/ansible/modules/system/mount.py | 16 | 20727 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Red Hat, inc
# Written by Seth Vidal
# based on the mount modules from salt and puppet
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: mount
short_description: Control active and configured mount points
description:
- This module controls active and configured mount points in C(/etc/fstab).
author:
- Ansible Core Team
- Seth Vidal
version_added: "0.6"
options:
path:
description:
- Path to the mount point (e.g. C(/mnt/files)).
- Before 2.3 this option was only usable as I(dest), I(destfile) and
I(name).
required: true
aliases: [ name ]
src:
description:
- Device to be mounted on I(path). Required when I(state) set to
C(present) or C(mounted).
fstype:
description:
- Filesystem type. Required when I(state) is C(present) or C(mounted).
opts:
description:
- Mount options (see fstab(5), or vfstab(4) on Solaris).
dump:
description:
- Dump (see fstab(5)). Note that if set to C(null) and I(state) set to
C(present), it will cease to work and duplicate entries will be made
with subsequent runs.
- Has no effect on Solaris systems.
default: 0
passno:
description:
- Passno (see fstab(5)). Note that if set to C(null) and I(state) set to
C(present), it will cease to work and duplicate entries will be made
with subsequent runs.
- Deprecated on Solaris systems.
default: 0
state:
description:
- If C(mounted), the device will be actively mounted and appropriately
configured in I(fstab). If the mount point is not present, the mount
point will be created.
- If C(unmounted), the device will be unmounted without changing I(fstab).
- C(present) only specifies that the device is to be configured in
I(fstab) and does not trigger or require a mount.
- C(absent) specifies that the device mount's entry will be removed from
I(fstab) and will also unmount the device and remove the mount
point.
required: true
choices: [ absent, mounted, present, unmounted ]
fstab:
description:
- File to use instead of C(/etc/fstab). You shouldn't use this option
unless you really know what you are doing. This might be useful if
you need to configure mountpoints in a chroot environment. OpenBSD
does not allow specifying alternate fstab files with mount so do not
use this on OpenBSD with any state that operates on the live
filesystem.
default: /etc/fstab (/etc/vfstab on Solaris)
boot:
description:
- Determines if the filesystem should be mounted on boot.
- Only applies to Solaris systems.
type: bool
default: 'yes'
version_added: '2.2'
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
type: bool
default: "no"
version_added: '2.5'
notes:
- As of Ansible 2.3, the I(name) option has been changed to I(path) as
default, but I(name) still works as well.
'''
EXAMPLES = '''
# Before 2.3, option 'name' was used instead of 'path'
- name: Mount DVD read-only
mount:
path: /mnt/dvd
src: /dev/sr0
fstype: iso9660
opts: ro,noauto
state: present
- name: Mount up device by label
mount:
path: /srv/disk
src: LABEL=SOME_LABEL
fstype: ext4
state: present
- name: Mount up device by UUID
mount:
path: /home
src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077
fstype: xfs
opts: noatime
state: present
'''
import os
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils.ismount import ismount
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
def write_fstab(module, lines, path):
if module.params['backup']:
module.backup_local(path)
fs_w = open(path, 'w')
for l in lines:
fs_w.write(l)
fs_w.flush()
fs_w.close()
def _escape_fstab(v):
"""Escape invalid characters in fstab fields.
space (040)
ampersand (046)
backslash (134)
"""
if isinstance(v, int):
return v
else:
return(
v.
replace('\\', '\\134').
replace(' ', '\\040').
replace('&', '\\046'))
def set_mount(module, args):
"""Set/change a mount point location in fstab."""
to_write = []
exists = False
changed = False
escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)])
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
if get_platform() == 'SunOS':
new_line = (
'%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\n')
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
# Check if we got a valid line for splitting
if (
get_platform() == 'SunOS' and len(line.split()) != 7 or
get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
continue
ld = {}
if get_platform() == 'SunOS':
(
ld['src'],
dash,
ld['name'],
ld['fstype'],
ld['passno'],
ld['boot'],
ld['opts']
) = line.split()
else:
(
ld['src'],
ld['name'],
ld['fstype'],
ld['opts'],
ld['dump'],
ld['passno']
) = line.split()
# Check if we found the correct line
if ld['name'] != escaped_args['name']:
to_write.append(line)
continue
# If we got here we found a match - let's check if there is any
# difference
exists = True
args_to_check = ('src', 'fstype', 'opts', 'dump', 'passno')
if get_platform() == 'SunOS':
args_to_check = ('src', 'fstype', 'passno', 'boot', 'opts')
for t in args_to_check:
if ld[t] != escaped_args[t]:
ld[t] = escaped_args[t]
changed = True
if changed:
to_write.append(new_line % ld)
else:
to_write.append(line)
if not exists:
to_write.append(new_line % escaped_args)
changed = True
if changed and not module.check_mode:
write_fstab(module, to_write, args['fstab'])
return (args['name'], changed)
def unset_mount(module, args):
"""Remove a mount point from fstab."""
to_write = []
changed = False
escaped_name = _escape_fstab(args['name'])
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
# Check if we got a valid line for splitting
if (
get_platform() == 'SunOS' and len(line.split()) != 7 or
get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
continue
ld = {}
if get_platform() == 'SunOS':
(
ld['src'],
dash,
ld['name'],
ld['fstype'],
ld['passno'],
ld['boot'],
ld['opts']
) = line.split()
else:
(
ld['src'],
ld['name'],
ld['fstype'],
ld['opts'],
ld['dump'],
ld['passno']
) = line.split()
if ld['name'] != escaped_name:
to_write.append(line)
continue
# If we got here we found a match - continue and mark changed
changed = True
if changed and not module.check_mode:
write_fstab(module, to_write, args['fstab'])
return (args['name'], changed)
def _set_fstab_args(fstab_file):
result = []
if (
fstab_file and
fstab_file != '/etc/fstab' and
get_platform().lower() != 'sunos'):
if get_platform().lower().endswith('bsd'):
result.append('-F')
else:
result.append('-T')
result.append(fstab_file)
return result
def mount(module, args):
"""Mount up a path or remount if needed."""
mount_bin = module.get_bin_path('mount', required=True)
name = args['name']
cmd = [mount_bin]
if get_platform().lower() == 'openbsd':
# Use module.params['fstab'] here as args['fstab'] has been set to the
# default value.
if module.params['fstab'] is not None:
module.fail_json(
msg=(
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
cmd += [name]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out + err
def umount(module, path):
"""Unmount a path."""
umount_bin = module.get_bin_path('umount', required=True)
cmd = [umount_bin, path]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out + err
def remount(module, args):
"""Try to use 'remount' first and fallback to (u)mount if unsupported."""
mount_bin = module.get_bin_path('mount', required=True)
cmd = [mount_bin]
# Multiplatform remount opts
if get_platform().lower().endswith('bsd'):
cmd += ['-u']
else:
cmd += ['-o', 'remount']
if get_platform().lower() == 'openbsd':
# Use module.params['fstab'] here as args['fstab'] has been set to the
# default value.
if module.params['fstab'] is not None:
module.fail_json(
msg=(
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
cmd += [args['name']]
out = err = ''
try:
if get_platform().lower().endswith('bsd'):
# Note: Forcing BSDs to do umount/mount due to BSD remount not
# working as expected (suspect bug in the BSD mount command)
# Interested contributor could rework this to use mount options on
# the CLI instead of relying on fstab
# https://github.com/ansible/ansible-modules-core/issues/5591
rc = 1
else:
rc, out, err = module.run_command(cmd)
except:
rc = 1
msg = ''
if rc != 0:
msg = out + err
rc, msg = umount(module, args['name'])
if rc == 0:
rc, msg = mount(module, args)
return rc, msg
# Note if we wanted to put this into module_utils we'd have to get permission
# from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923
# @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439
# and @abadger to relicense from GPLv3+
def is_bind_mounted(module, linux_mounts, dest, src=None, fstype=None):
"""Return whether the dest is bind mounted
:arg module: The AnsibleModule (used for helper functions)
:arg dest: The directory to be mounted under. This is the primary means
of identifying whether the destination is mounted.
:kwarg src: The source directory. If specified, this is used to help
ensure that we are detecting that the correct source is mounted there.
:kwarg fstype: The filesystem type. If specified this is also used to
help ensure that we are detecting the right mount.
:kwarg linux_mounts: Cached list of mounts for Linux.
:returns: True if the dest is mounted with src otherwise False.
"""
is_mounted = False
if get_platform() == 'Linux' and linux_mounts is not None:
if src is None:
# That's for unmounted/absent
if dest in linux_mounts:
is_mounted = True
else:
if dest in linux_mounts:
is_mounted = linux_mounts[dest]['src'] == src
else:
bin_path = module.get_bin_path('mount', required=True)
cmd = '%s -l' % bin_path
rc, out, err = module.run_command(cmd)
mounts = []
if len(out):
mounts = to_native(out).strip().split('\n')
for mnt in mounts:
arguments = mnt.split()
if (
(arguments[0] == src or src is None) and
arguments[2] == dest and
(arguments[4] == fstype or fstype is None)):
is_mounted = True
if is_mounted:
break
return is_mounted
def get_linux_mounts(module, mntinfo_file="/proc/self/mountinfo"):
"""Gather mount information"""
try:
f = open(mntinfo_file)
except IOError:
return
lines = map(str.strip, f.readlines())
try:
f.close()
except IOError:
module.fail_json(msg="Cannot close file %s" % mntinfo_file)
mntinfo = {}
for line in lines:
fields = line.split()
record = {
'id': int(fields[0]),
'parent_id': int(fields[1]),
'root': fields[3],
'dst': fields[4],
'opts': fields[5],
'fs': fields[-3],
'src': fields[-2]
}
mntinfo[record['id']] = record
mounts = {}
for mnt in mntinfo.values():
if mnt['parent_id'] != 1 and mnt['parent_id'] in mntinfo:
m = mntinfo[mnt['parent_id']]
if (
len(m['root']) > 1 and
mnt['root'].startswith("%s/" % m['root'])):
# Ommit the parent's root in the child's root
# == Example:
# 140 136 253:2 /rootfs / rw - ext4 /dev/sdb2 rw
# 141 140 253:2 /rootfs/tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw
# == Expected result:
# src=/tmp/aaa
mnt['root'] = mnt['root'][len(m['root']):]
# Prepend the parent's dst to the child's root
# == Example:
# 42 60 0:35 / /tmp rw - tmpfs tmpfs rw
# 78 42 0:35 /aaa /tmp/bbb rw - tmpfs tmpfs rw
# == Expected result:
# src=/tmp/aaa
if m['dst'] != '/':
mnt['root'] = "%s%s" % (m['dst'], mnt['root'])
src = mnt['root']
else:
src = mnt['src']
record = {
'dst': mnt['dst'],
'src': src,
'opts': mnt['opts'],
'fs': mnt['fs']
}
mounts[mnt['dst']] = record
return mounts
def main():
module = AnsibleModule(
argument_spec=dict(
boot=dict(type='bool', default=True),
dump=dict(type='str'),
fstab=dict(type='str'),
fstype=dict(type='str'),
path=dict(type='path', required=True, aliases=['name']),
opts=dict(type='str'),
passno=dict(type='str'),
src=dict(type='path'),
backup=dict(default=False, type='bool'),
state=dict(type='str', required=True, choices=['absent', 'mounted', 'present', 'unmounted']),
),
supports_check_mode=True,
required_if=(
['state', 'mounted', ['src', 'fstype']],
['state', 'present', ['src', 'fstype']],
),
)
# solaris args:
# name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab
# linux args:
# name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
# Note: Do not modify module.params['fstab'] as we need to know if the user
# explicitly specified it in mount() and remount()
if get_platform().lower() == 'sunos':
args = dict(
name=module.params['path'],
opts='-',
passno='-',
fstab=module.params['fstab'],
boot='yes'
)
if args['fstab'] is None:
args['fstab'] = '/etc/vfstab'
else:
args = dict(
name=module.params['path'],
opts='defaults',
dump='0',
passno='0',
fstab=module.params['fstab']
)
if args['fstab'] is None:
args['fstab'] = '/etc/fstab'
# FreeBSD doesn't have any 'default' so set 'rw' instead
if get_platform() == 'FreeBSD':
args['opts'] = 'rw'
linux_mounts = []
# Cache all mounts here in order we have consistent results if we need to
# call is_bind_mounted() multiple times
if get_platform() == 'Linux':
linux_mounts = get_linux_mounts(module)
if linux_mounts is None:
args['warnings'] = (
'Cannot open file /proc/self/mountinfo. '
'Bind mounts might be misinterpreted.')
# Override defaults with user specified params
for key in ('src', 'fstype', 'passno', 'opts', 'dump', 'fstab'):
if module.params[key] is not None:
args[key] = module.params[key]
# If fstab file does not exist, we first need to create it. This mainly
# happens when fstab option is passed to the module.
if not os.path.exists(args['fstab']):
if not os.path.exists(os.path.dirname(args['fstab'])):
os.makedirs(os.path.dirname(args['fstab']))
open(args['fstab'], 'a').close()
# absent:
# Remove from fstab and unmounted.
# unmounted:
# Do not change fstab state, but unmount.
# present:
# Add to fstab, do not change mount state.
# mounted:
# Add to fstab if not there and make sure it is mounted. If it has
# changed in fstab then remount it.
state = module.params['state']
name = module.params['path']
changed = False
if state == 'absent':
name, changed = unset_mount(module, args)
if changed and not module.check_mode:
if ismount(name) or is_bind_mounted(module, linux_mounts, name):
res, msg = umount(module, name)
if res:
module.fail_json(
msg="Error unmounting %s: %s" % (name, msg))
if os.path.exists(name):
try:
os.rmdir(name)
except (OSError, IOError) as e:
module.fail_json(msg="Error rmdir %s: %s" % (name, to_native(e)))
elif state == 'unmounted':
if ismount(name) or is_bind_mounted(module, linux_mounts, name):
if not module.check_mode:
res, msg = umount(module, name)
if res:
module.fail_json(
msg="Error unmounting %s: %s" % (name, msg))
changed = True
elif state == 'mounted':
if not os.path.exists(name) and not module.check_mode:
try:
os.makedirs(name)
except (OSError, IOError) as e:
module.fail_json(
msg="Error making dir %s: %s" % (name, to_native(e)))
name, changed = set_mount(module, args)
res = 0
if (
ismount(name) or
is_bind_mounted(
module, linux_mounts, name, args['src'], args['fstype'])):
if changed and not module.check_mode:
res, msg = remount(module, args)
changed = True
else:
changed = True
if not module.check_mode:
res, msg = mount(module, args)
if res:
module.fail_json(msg="Error mounting %s: %s" % (name, msg))
elif state == 'present':
name, changed = set_mount(module, args)
else:
module.fail_json(msg='Unexpected position reached')
module.exit_json(changed=changed, **args)
if __name__ == '__main__':
main()
| gpl-3.0 |
binhqnguyen/lena | plotting/timeout/second_retran_sequence.py | 1 | 1935 | #!/usr/bin/python
import os
import sys
import FileHandle
import re
if __name__ == "__main__":
if (len (sys.argv) < 2):
print ("Usage retrans_count.py <number of Ues>")
exit(1)
UE = int (str(sys.argv[1]))
for i in range(0,UE):
##filter retrans from send sequence.
grep_cmd = 'grep "TCP Retransmission" sequence_send-700'+ str(i+2)+'.all > sequence_retrans-700'+ str(i+2)+'.dat'
os.system(grep_cmd)
INPUT_FILE = 'sequence_retrans-700'+ str(i+2)+'.dat'
OUTPUT_FILE_1 = "sequence_second_retrans-700"+str(i+2)+".dat"
OUTPUT_FILE_2 = "sequence_first_retrans-700"+str(i+2)+".dat"
if (not os.path.exists(INPUT_FILE)):
continue
file = open (INPUT_FILE)
line = file.readline()
tokens = {}
MOD = 1000
if re.search('TSval',line) and flag == 0:
SEG_SIZE = 1448
else:
SEG_SIZE = 1460
if (os.path.isfile(OUTPUT_FILE_1)): ##if output file not exist
open(OUTPUT_FILE_1,'w').close()
outfile_1 = open (OUTPUT_FILE_1,'w+')
if (os.path.isfile(OUTPUT_FILE_2)): ##if output file not exist
open(OUTPUT_FILE_2,'w').close()
outfile_2 = open (OUTPUT_FILE_2,'w+')
retran_seq = {}
second_retran_seq = {}
while (line):
tokens = line.split()
time_stamp = tokens[1]
seq_i = 0
for j in range(0,len(tokens)):
if re.match(r'Seq=',tokens[j]):
seq_i = j
break
retran_seq[time_stamp] = int(tokens[seq_i].split("=")[1]) #assuming time_stamp is unique for each retran.
line = file.readline()
print len(retran_seq)
dup_retran = {}
for key in retran_seq:
for key_1 in retran_seq:
if retran_seq[key] == retran_seq[key_1] and key < key_1:
dup_retran[key_1] = retran_seq[key_1]
print len(dup_retran)
for key in sorted(retran_seq):
outfile_2.write(str (key)+"\t"+ str (retran_seq[key]/SEG_SIZE % MOD)+"\n")
for key in sorted(dup_retran):
outfile_1.write(str (key)+"\t"+ str (retran_seq[key]/SEG_SIZE % MOD)+"\n")
| gpl-2.0 |
2013Commons/HUE-SHARK | build/env/lib/python2.7/site-packages/pam-0.1.3-py2.7.egg/pam.py | 55 | 3769 | # (c) 2007 Chris AtLee <chris@atlee.ca>
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
PAM module for python
Provides an authenticate function that will allow the caller to authenticate
a user against the Pluggable Authentication Modules (PAM) on the system.
Implemented using ctypes, so no compilation is necessary.
"""
__all__ = ['authenticate']
from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof
from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
from ctypes.util import find_library
LIBPAM = CDLL(find_library("pam"))
LIBC = CDLL(find_library("c"))
CALLOC = LIBC.calloc
CALLOC.restype = c_void_p
CALLOC.argtypes = [c_uint, c_uint]
STRDUP = LIBC.strdup
STRDUP.argstypes = [c_char_p]
STRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!
# Various constants
PAM_PROMPT_ECHO_OFF = 1
PAM_PROMPT_ECHO_ON = 2
PAM_ERROR_MSG = 3
PAM_TEXT_INFO = 4
class PamHandle(Structure):
"""wrapper class for pam_handle_t"""
_fields_ = [
("handle", c_void_p)
]
def __init__(self):
Structure.__init__(self)
self.handle = 0
class PamMessage(Structure):
"""wrapper class for pam_message structure"""
_fields_ = [
("msg_style", c_int),
("msg", c_char_p),
]
def __repr__(self):
return "<PamMessage %i '%s'>" % (self.msg_style, self.msg)
class PamResponse(Structure):
"""wrapper class for pam_response structure"""
_fields_ = [
("resp", c_char_p),
("resp_retcode", c_int),
]
def __repr__(self):
return "<PamResponse %i '%s'>" % (self.resp_retcode, self.resp)
CONV_FUNC = CFUNCTYPE(c_int,
c_int, POINTER(POINTER(PamMessage)),
POINTER(POINTER(PamResponse)), c_void_p)
class PamConv(Structure):
"""wrapper class for pam_conv structure"""
_fields_ = [
("conv", CONV_FUNC),
("appdata_ptr", c_void_p)
]
PAM_START = LIBPAM.pam_start
PAM_START.restype = c_int
PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv),
POINTER(PamHandle)]
PAM_AUTHENTICATE = LIBPAM.pam_authenticate
PAM_AUTHENTICATE.restype = c_int
PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
def authenticate(username, password, service='login'):
"""Returns True if the given username and password authenticate for the
given service. Returns False otherwise
``username``: the username to authenticate
``password``: the password in plain text
``service``: the PAM service to authenticate against.
Defaults to 'login'"""
@CONV_FUNC
def my_conv(n_messages, messages, p_response, app_data):
"""Simple conversation function that responds to any
prompt where the echo is off with the supplied password"""
# Create an array of n_messages response objects
addr = CALLOC(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = STRDUP(str(password))
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = PAM_START(service, username, pointer(conv), pointer(handle))
if retval != 0:
# TODO: This is not an authentication error, something
# has gone wrong starting up PAM
return False
retval = PAM_AUTHENTICATE(handle, 0)
return retval == 0
if __name__ == "__main__":
import getpass
print authenticate(getpass.getuser(), getpass.getpass())
| apache-2.0 |
intfrr/shaka-player | third_party/gjslint/closure_linter-2.3.13/closure_linter/closurizednamespacesinfo.py | 107 | 19655 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for computing dependency information for closurized JavaScript files.
Closurized JavaScript files express dependencies using goog.require and
goog.provide statements. In order for the linter to detect when a statement is
missing or unnecessary, all identifiers in the JavaScript file must first be
processed to determine if they constitute the creation or usage of a dependency.
"""
import re
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable=g-bad-name
TokenType = javascripttokens.JavaScriptTokenType
DEFAULT_EXTRA_NAMESPACES = [
'goog.testing.asserts',
'goog.testing.jsunit',
]
class ClosurizedNamespacesInfo(object):
"""Dependency information for closurized JavaScript files.
Processes token streams for dependency creation or usage and provides logic
for determining if a given require or provide statement is unnecessary or if
there are missing require or provide statements.
"""
def __init__(self, closurized_namespaces, ignored_extra_namespaces):
"""Initializes an instance the ClosurizedNamespacesInfo class.
Args:
closurized_namespaces: A list of namespace prefixes that should be
processed for dependency information. Non-matching namespaces are
ignored.
ignored_extra_namespaces: A list of namespaces that should not be reported
as extra regardless of whether they are actually used.
"""
self._closurized_namespaces = closurized_namespaces
self._ignored_extra_namespaces = (ignored_extra_namespaces +
DEFAULT_EXTRA_NAMESPACES)
self.Reset()
def Reset(self):
"""Resets the internal state to prepare for processing a new file."""
# A list of goog.provide tokens in the order they appeared in the file.
self._provide_tokens = []
# A list of goog.require tokens in the order they appeared in the file.
self._require_tokens = []
# Namespaces that are already goog.provided.
self._provided_namespaces = []
# Namespaces that are already goog.required.
self._required_namespaces = []
# Note that created_namespaces and used_namespaces contain both namespaces
# and identifiers because there are many existing cases where a method or
# constant is provided directly instead of its namespace. Ideally, these
# two lists would only have to contain namespaces.
# A list of tuples where the first element is the namespace of an identifier
# created in the file, the second is the identifier itself and the third is
# the line number where it's created.
self._created_namespaces = []
# A list of tuples where the first element is the namespace of an identifier
# used in the file, the second is the identifier itself and the third is the
# line number where it's used.
self._used_namespaces = []
# A list of seemingly-unnecessary namespaces that are goog.required() and
# annotated with @suppress {extraRequire}.
self._suppressed_requires = []
# A list of goog.provide tokens which are duplicates.
self._duplicate_provide_tokens = []
# A list of goog.require tokens which are duplicates.
self._duplicate_require_tokens = []
# Whether this file is in a goog.scope. Someday, we may add support
# for checking scopified namespaces, but for now let's just fail
# in a more reasonable way.
self._scopified_file = False
# TODO(user): Handle the case where there are 2 different requires
# that can satisfy the same dependency, but only one is necessary.
def GetProvidedNamespaces(self):
"""Returns the namespaces which are already provided by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.provide statement in the file being checked.
"""
return set(self._provided_namespaces)
def GetRequiredNamespaces(self):
"""Returns the namespaces which are already required by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.require statement in the file being checked.
"""
return set(self._required_namespaces)
def IsExtraProvide(self, token):
"""Returns whether the given goog.provide token is unnecessary.
Args:
token: A goog.provide token.
Returns:
True if the given token corresponds to an unnecessary goog.provide
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if token in self._duplicate_provide_tokens:
return True
# TODO(user): There's probably a faster way to compute this.
for created_namespace, created_identifier, _ in self._created_namespaces:
if namespace == created_namespace or namespace == created_identifier:
return False
return True
def IsExtraRequire(self, token):
"""Returns whether the given goog.require token is unnecessary.
Args:
token: A goog.require token.
Returns:
True if the given token corresponds to an unnecessary goog.require
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if namespace in self._ignored_extra_namespaces:
return False
if token in self._duplicate_require_tokens:
return True
if namespace in self._suppressed_requires:
return False
# If the namespace contains a component that is initial caps, then that
# must be the last component of the namespace.
parts = namespace.split('.')
if len(parts) > 1 and parts[-2][0].isupper():
return True
# TODO(user): There's probably a faster way to compute this.
for used_namespace, used_identifier, _ in self._used_namespaces:
if namespace == used_namespace or namespace == used_identifier:
return False
return True
def GetMissingProvides(self):
"""Returns the dict of missing provided namespaces for the current file.
Returns:
Returns a dictionary of key as string and value as integer where each
string(key) is a namespace that should be provided by this file, but is
not and integer(value) is first line number where it's defined.
"""
missing_provides = dict()
for namespace, identifier, line_number in self._created_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in self._provided_namespaces and
identifier not in self._provided_namespaces and
namespace not in self._required_namespaces and
namespace not in missing_provides):
missing_provides[namespace] = line_number
return missing_provides
def GetMissingRequires(self):
"""Returns the dict of missing required namespaces for the current file.
For each non-private identifier used in the file, find either a
goog.require, goog.provide or a created identifier that satisfies it.
goog.require statements can satisfy the identifier by requiring either the
namespace of the identifier or the identifier itself. goog.provide
statements can satisfy the identifier by providing the namespace of the
identifier. A created identifier can only satisfy the used identifier if
it matches it exactly (necessary since things can be defined on a
namespace in more than one file). Note that provided namespaces should be
a subset of created namespaces, but we check both because in some cases we
can't always detect the creation of the namespace.
Returns:
Returns a dictionary of key as string and value integer where each
string(key) is a namespace that should be required by this file, but is
not and integer(value) is first line number where it's used.
"""
external_dependencies = set(self._required_namespaces)
# Assume goog namespace is always available.
external_dependencies.add('goog')
created_identifiers = set()
for namespace, identifier, line_number in self._created_namespaces:
created_identifiers.add(identifier)
missing_requires = dict()
for namespace, identifier, line_number in self._used_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in external_dependencies and
namespace not in self._provided_namespaces and
identifier not in external_dependencies and
identifier not in created_identifiers and
namespace not in missing_requires):
missing_requires[namespace] = line_number
return missing_requires
def _IsPrivateIdentifier(self, identifier):
"""Returns whether the given identifer is private."""
pieces = identifier.split('.')
for piece in pieces:
if piece.endswith('_'):
return True
return False
def IsFirstProvide(self, token):
"""Returns whether token is the first provide token."""
return self._provide_tokens and token == self._provide_tokens[0]
def IsFirstRequire(self, token):
"""Returns whether token is the first require token."""
return self._require_tokens and token == self._require_tokens[0]
def IsLastProvide(self, token):
"""Returns whether token is the last provide token."""
return self._provide_tokens and token == self._provide_tokens[-1]
def IsLastRequire(self, token):
"""Returns whether token is the last require token."""
return self._require_tokens and token == self._require_tokens[-1]
def ProcessToken(self, token, state_tracker):
"""Processes the given token for dependency information.
Args:
token: The token to process.
state_tracker: The JavaScript state tracker.
"""
# Note that this method is in the critical path for the linter and has been
# optimized for performance in the following ways:
# - Tokens are checked by type first to minimize the number of function
# calls necessary to determine if action needs to be taken for the token.
# - The most common tokens types are checked for first.
# - The number of function calls has been minimized (thus the length of this
# function.
if token.type == TokenType.IDENTIFIER:
# TODO(user): Consider saving the whole identifier in metadata.
whole_identifier_string = tokenutil.GetIdentifierForToken(token)
if whole_identifier_string is None:
# We only want to process the identifier one time. If the whole string
# identifier is None, that means this token was part of a multi-token
# identifier, but it was not the first token of the identifier.
return
# In the odd case that a goog.require is encountered inside a function,
# just ignore it (e.g. dynamic loading in test runners).
if token.string == 'goog.require' and not state_tracker.InFunction():
self._require_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._required_namespaces:
self._duplicate_require_tokens.append(token)
else:
self._required_namespaces.append(namespace)
# If there is a suppression for the require, add a usage for it so it
# gets treated as a regular goog.require (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraRequire' in jsdoc.suppressions):
self._suppressed_requires.append(namespace)
self._AddUsedNamespace(state_tracker, namespace, token.line_number)
elif token.string == 'goog.provide':
self._provide_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._provided_namespaces:
self._duplicate_provide_tokens.append(token)
else:
self._provided_namespaces.append(namespace)
# If there is a suppression for the provide, add a creation for it so it
# gets treated as a regular goog.provide (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraProvide' in jsdoc.suppressions):
self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
elif token.string == 'goog.scope':
self._scopified_file = True
elif token.string == 'goog.setTestOnly':
# Since the message is optional, we don't want to scan to later lines.
for t in tokenutil.GetAllTokensInSameLine(token):
if t.type == TokenType.STRING_TEXT:
message = t.string
if re.match(r'^\w+(\.\w+)+$', message):
# This looks like a namespace. If it's a Closurized namespace,
# consider it created.
base_namespace = message.split('.', 1)[0]
if base_namespace in self._closurized_namespaces:
self._AddCreatedNamespace(state_tracker, message,
token.line_number)
break
else:
jsdoc = state_tracker.GetDocComment()
if token.metadata and token.metadata.aliased_symbol:
whole_identifier_string = token.metadata.aliased_symbol
if jsdoc and jsdoc.HasFlag('typedef'):
self._AddCreatedNamespace(state_tracker, whole_identifier_string,
token.line_number,
namespace=self.GetClosurizedNamespace(
whole_identifier_string))
else:
if not (token.metadata and token.metadata.is_alias_definition):
self._AddUsedNamespace(state_tracker, whole_identifier_string,
token.line_number)
elif token.type == TokenType.SIMPLE_LVALUE:
identifier = token.values['identifier']
start_token = tokenutil.GetIdentifierStart(token)
if start_token and start_token != token:
# Multi-line identifier being assigned. Get the whole identifier.
identifier = tokenutil.GetIdentifierForToken(start_token)
else:
start_token = token
# If an alias is defined on the start_token, use it instead.
if (start_token and
start_token.metadata and
start_token.metadata.aliased_symbol and
not start_token.metadata.is_alias_definition):
identifier = start_token.metadata.aliased_symbol
if identifier:
namespace = self.GetClosurizedNamespace(identifier)
if state_tracker.InFunction():
self._AddUsedNamespace(state_tracker, identifier, token.line_number)
elif namespace and namespace != 'goog':
self._AddCreatedNamespace(state_tracker, identifier,
token.line_number, namespace=namespace)
elif token.type == TokenType.DOC_FLAG:
flag_type = token.attached_object.flag_type
is_interface = state_tracker.GetDocComment().HasFlag('interface')
if flag_type == 'implements' or (flag_type == 'extends' and is_interface):
# Interfaces should be goog.require'd.
doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE)
interface = tokenutil.Search(doc_start, TokenType.COMMENT)
self._AddUsedNamespace(state_tracker, interface.string,
token.line_number)
def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
namespace=None):
"""Adds the namespace of an identifier to the list of created namespaces.
If the identifier is annotated with a 'missingProvide' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: The identifier to add.
line_number: Line number where namespace is created.
namespace: The namespace of the identifier or None if the identifier is
also the namespace.
"""
if not namespace:
namespace = identifier
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingProvide' in jsdoc.suppressions:
return
self._created_namespaces.append([namespace, identifier, line_number])
def _AddUsedNamespace(self, state_tracker, identifier, line_number):
"""Adds the namespace of an identifier to the list of used namespaces.
If the identifier is annotated with a 'missingRequire' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: An identifier which has been used.
line_number: Line number where namespace is used.
"""
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingRequire' in jsdoc.suppressions:
return
namespace = self.GetClosurizedNamespace(identifier)
# b/5362203 If its a variable in scope then its not a required namespace.
if namespace and not state_tracker.IsVariableInScope(namespace):
self._used_namespaces.append([namespace, identifier, line_number])
def GetClosurizedNamespace(self, identifier):
"""Given an identifier, returns the namespace that identifier is from.
Args:
identifier: The identifier to extract a namespace from.
Returns:
The namespace the given identifier resides in, or None if one could not
be found.
"""
if identifier.startswith('goog.global'):
# Ignore goog.global, since it is, by definition, global.
return None
parts = identifier.split('.')
for namespace in self._closurized_namespaces:
if not identifier.startswith(namespace + '.'):
continue
last_part = parts[-1]
if not last_part:
# TODO(robbyw): Handle this: it's a multi-line identifier.
return None
# The namespace for a class is the shortest prefix ending in a class
# name, which starts with a capital letter but is not a capitalized word.
#
# We ultimately do not want to allow requiring or providing of inner
# classes/enums. Instead, a file should provide only the top-level class
# and users should require only that.
namespace = []
for part in parts:
if part == 'prototype' or part.isupper():
return '.'.join(namespace)
namespace.append(part)
if part[0].isupper():
return '.'.join(namespace)
# At this point, we know there's no class or enum, so the namespace is
# just the identifier with the last part removed. With the exception of
# apply, inherits, and call, which should also be stripped.
if parts[-1] in ('apply', 'inherits', 'call'):
parts.pop()
parts.pop()
# If the last part ends with an underscore, it is a private variable,
# method, or enum. The namespace is whatever is before it.
if parts and parts[-1].endswith('_'):
parts.pop()
return '.'.join(parts)
return None
| apache-2.0 |
elaird/supy-tflat | look.py | 1 | 7705 | import supy
import configuration
import displayer
import steps
import calculables
import calculables.sv
import ROOT as r
ss = supy.steps
sshv = ss.histos.value
class look(supy.analysis):
def listOfSteps(self, pars):
mMax = 500.0
return [ss.printer.progressPrinter(),
# sshv("t1ByCombinedIsolationDeltaBetaCorrRaw3Hits", 100, 0.0, 10.0, xtitle="isolation (GeV)"),
# sshv("t2ByCombinedIsolationDeltaBetaCorrRaw3Hits", 100, 0.0, 10.0, xtitle="isolation (GeV)"),
# ss.filters.value("t1GenMass", min=0.0),
# ss.filters.value("t2GenMass", min=0.0),
ss.histos.pt("rv1", 100, 0.0, 100.0, xtitle="reco vis 1 p_{T}"),
ss.histos.pt("rv2", 100, 0.0, 100.0, xtitle="reco vis 2 p_{T}"),
# ss.filters.pt("rv1", min=35.0),
# ss.filters.pt("rv2", min=35.0),
ss.filters.value("DR1", max=0.4),
ss.filters.value("DR2", max=0.4),
sshv("CDT1", 150, -1.0, 2.0, xtitle="cos(#Delta#theta_{1})"),
sshv("CDT2", 150, -1.0, 2.0, xtitle="cos(#Delta#theta_{2})"),
sshv("DR1", 100, 0.0, 10.0, xtitle="#DeltaR_{1} (gen,reco)"),
sshv("DR2", 100, 0.0, 10.0, xtitle="#DeltaR_{2} (gen,reco)"),
sshv("gDPhi12", 20, -r.TMath.Pi(), r.TMath.Pi(), xtitle="gen. taus' #Delta#phi"),
sshv("rDPhi12", 20, -r.TMath.Pi(), r.TMath.Pi(), xtitle="reco. taus' #Delta#phi"),
sshv("gv1_gt1", 60, 0.0, 2.0, xtitle="gen. vis. p_{T} 1 (#mu/e/h_{1}) / gen. tau p_{T}"),
sshv("gv2_gt2", 60, 0.0, 2.0, xtitle="gen. vis. p_{T} 2 (h_{2}/h/e) / gen. tau p_{T}"),
steps.visHistos(),
ss.histos.pt("nus", 100, 0.0, 100.0, xtitle="(sum nu) pT (GeV)"),
sshv("pfmetrho_xy", 100, -1.0, 1.0, xtitle="correlation of METx and METy"),
sshv("genmetgauspfmet", 100, 0.0, 5.0, xtitle="Gaus(PF MET, genmet, cov.) (#sigma)"),
sshv("nusgauspfmet", 100, 0.0, 5.0, xtitle="Gaus(PF MET, nus, cov.) (#sigma)"),
sshv(("gvMass", "rvMass"), (100, 100), (0.0, 0.0), (mMax, mMax), xtitle="gen visible mass (GeV);reco visible mass (GeV)"),
sshv(("gtMass", "rvMass"), (100, 100), (0.0, 0.0), (mMax, mMax), xtitle="gen mass (GeV);reco visible mass (GeV)"),
sshv(("genMetEt", "pt_two_nu"), (100, 100), (0.0, 0.0), (mMax, mMax), xtitle="gen met (GeV);pt two nu (GeV)"),
sshv(("nus_pt", "pt_two_nu"), (100, 100), (0.0, 0.0), (mMax, mMax), xtitle="(sum nu) pT (GeV);pt two nu (GeV)"),
sshv(("genMetEt", "nus_pt"), (100, 100), (0.0, 0.0), (mMax, mMax), xtitle="gen met (GeV);(sum nu) pT (GeV)"),
sshv(("genMetEt", "pfMetEt"), (100, 100), (0.0, 0.0), (mMax, mMax), xtitle="gen met (GeV);pf met (GeV)"),
sshv(("nus_pt", "pfMetEt"), (100, 100), (0.0, 0.0), (mMax, mMax), xtitle="(sum nu) pT (GeV);pf met (GeV)"),
# ss.filters.multiplicity("measured_tau_leptons", min=2, max=2),
# ss.other.touchstuff(["nSelected"]),
# ss.other.touchstuff(["pfmetsvfitter"]),
# ss.other.touchstuff(["pfmetsvs"]),
steps.svHistos(met="pfmet", svs=["mc", "vg", "pl"]),
# #displayer.displayer(),
]
def listOfCalculables(self, pars):
out = supy.calculables.zeroArgs(supy.calculables)
# out += supy.calculables.zeroArgs(calculables) # fixme
out += [calculables.nus(),
calculables.nus_pt(),
calculables.pt_two_nu(),
calculables.DR1(),
calculables.DR2(),
calculables.CDT1(),
calculables.CDT2(),
calculables.gDPhi12(),
calculables.rDPhi12(),
calculables.gt1(),
calculables.gt2(),
calculables.gv1(),
calculables.gv2(),
calculables.gv1_gt1(),
calculables.gv2_gt2(),
calculables.rv1_gv1(),
calculables.rv2_gv2(),
calculables.rv1(),
calculables.rv2(),
calculables.gtMass(),
calculables.gvMass(),
calculables.rvMass(),
calculables.genmet(),
calculables.pfmet(),
calculables.nSelected(),
calculables.cov("pfmet", sym=True),
calculables.cov("pfmet", sym=False),
calculables.rho_xy("pfmet"),
calculables.gaus(("genmet", "pfmet")),
calculables.gaus(("nus", "pfmet")),
calculables.sv.measured_tau_leptons(),
calculables.sv.has_hadronic_taus(),
calculables.sv.svfitter(met="pfmet", verbosity=2),
calculables.sv.svs(met="pfmet", mc=True, vg=False, pl=False),
]
return out
def listOfSampleDictionaries(self):
h = supy.samples.SampleHolder()
# xs in pb
# h.add('dy_ll', 'utils.fileListFromDisk("/home/elaird/v3/DY_all_SYNC_tt.root", pruneList=False, isDirectory=False)', xs=3504.)
# d = 'utils.fileListFromDisk("/user_data/zmao/13TeV_samples_25ns/%s_inclusive.root", pruneList=False, isDirectory=False)'
d = 'utils.fileListFromDisk("/user_data/elaird/svSkim-sep21/%s_inclusive.root", pruneList=False, isDirectory=False)'
h.add('dy_tt', d % 'DY_all_ZTT_SYNC_tt', xs=3504.) # fake xs; to be updated with skim eff.
h.add('dy_mt', d % 'DY_all_ZTT_SYNC_mt', xs=3504.) # fake xs; to be updated with skim eff.
h.add('dy_et', d % 'DY_all_ZTT_SYNC_et', xs=3504.) # fake xs; to be updated with skim eff.
h.add('dy_em', d % 'DY_all_ZTT_SYNC_em', xs=3504.) # fake xs; to be updated with skim eff.
return [h]
def listOfSamples(self, pars):
from supy.samples import specify
n = 1000
return (specify(names="dy_tt", nEventsMax=n) +
specify(names="dy_mt", nEventsMax=n) +
specify(names="dy_et", nEventsMax=n) +
specify(names="dy_em", nEventsMax=n) +
[]
)
def conclude(self, pars):
org = self.organizer(pars, verbose=True)
def gopts(name="", color=1):
return {"name": name, "color": color, "markerStyle": 1, "lineWidth": 2, "goptions": "ehist"}
for new, old, color in [("DY->tt", "dy_tt", r.kBlue),
("DY->mt", "dy_mt", r.kRed),
("DY->et", "dy_et", r.kOrange + 3),
("DY->em", "dy_em", r.kGreen),
]:
org.mergeSamples(targetSpec=gopts(new, color), sources=[old])
# org.scale() # to data
org.scale(lumiToUseInAbsenceOfData=4.0e3) # /pb
# org.scale(toPdf=True)
def yx(h):
if "_prof" in h.GetName():
ax = h.GetXaxis()
f = r.TF1("yx", "x", ax.GetXmin(), ax.GetXmax())
f.SetLineWidth(1)
f.Draw("same")
return f
supy.plotter(org,
pdfFileName=self.pdfFileName(org.tag),
printImperfectCalcPageIfEmpty=False,
printXs=True,
blackList=["lumiHisto", "xsHisto", "nJobsHisto"],
rowColors=[r.kBlack, r.kViolet+4],
doLog=False,
# pegMinimum=0.1,
fitFunc=yx,
).plotAll()
| gpl-3.0 |
xavierwu/scikit-learn | sklearn/manifold/setup.py | 99 | 1243 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
pilou-/ansible | lib/ansible/modules/cloud/rackspace/rax_dns_record.py | 57 | 11401 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- Manage DNS records on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
data:
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
SRV/TXT
required: True
domain:
description:
- Domain name to create the record in. This is an invalid option when
type=PTR
loadbalancer:
description:
- Load Balancer ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
name:
description:
- FQDN record name to create
required: True
overwrite:
description:
- Add new records if data doesn't match, instead of updating existing
record with matching name. If there are already multiple records with
matching name and overwrite=true, this module will fail.
default: true
type: bool
version_added: 2.1
priority:
description:
- Required for MX and SRV records, but forbidden for other record types.
If specified, must be an integer from 0 to 65535.
server:
description:
- Server ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of record in seconds
default: 3600
type:
description:
- DNS record type
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: "Matt Martz (@sivel)"
extends_documentation_fragment:
- rackspace
- rackspace.openstack
'''
EXAMPLES = '''
- name: Create DNS Records
hosts: all
gather_facts: False
tasks:
- name: Create A record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
domain: example.org
name: www.example.org
data: "{{ rax_accessipv4 }}"
type: A
register: a_record
- name: Create PTR record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
server: "{{ rax_id }}"
name: "{{ inventory_hostname }}"
region: DFW
register: ptr_record
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import (rax_argument_spec,
rax_find_loadbalancer,
rax_find_server,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
name=None, server=None, state='present', ttl=7200):
changed = False
results = []
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if loadbalancer:
item = rax_find_loadbalancer(module, pyrax, loadbalancer)
elif server:
item = rax_find_server(module, pyrax, server)
if state == 'present':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
if record.ttl != ttl or record.name != name:
try:
dns.update_ptr_record(item, record, name, data, ttl)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
record.ttl = ttl
record.name = name
results.append(rax_to_dict(record))
break
else:
results.append(rax_to_dict(record))
break
if not results:
record = dict(name=name, type='PTR', data=data, ttl=ttl,
comment=comment)
try:
results = dns.add_ptr_records(item, [record])
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
elif state == 'absent':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
results.append(rax_to_dict(record))
break
if results:
try:
dns.delete_ptr_records(item, data)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
overwrite=True, priority=None, record_type='A',
state='present', ttl=7200):
"""Function for manipulating record types other than PTR"""
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
module.fail_json(msg='A "priority" attribute is required for '
'creating a MX or SRV record')
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
if overwrite:
record = domain.find_record(record_type, name=name)
else:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='overwrite=true and there are multiple matching records')
except pyrax.exceptions.DomainRecordNotFound as e:
try:
record_data = {
'type': record_type,
'name': name,
'data': data,
'ttl': ttl
}
if comment:
record_data.update(dict(comment=comment))
if priority and record_type.upper() in ['MX', 'SRV']:
record_data.update(dict(priority=priority))
record = domain.add_records([record_data])[0]
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(record, 'comment', None):
update['comment'] = comment
if ttl != getattr(record, 'ttl', None):
update['ttl'] = ttl
if priority != getattr(record, 'priority', None):
update['priority'] = priority
if data != getattr(record, 'data', None):
update['data'] = data
if update:
try:
record.update(**update)
changed = True
record.get()
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound as e:
record = {}
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='%s' % e.message)
if record:
try:
record.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, record=rax_to_dict(record))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
data=dict(required=True),
domain=dict(),
loadbalancer=dict(),
name=dict(required=True),
overwrite=dict(type='bool', default=True),
priority=dict(type='int'),
server=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
'SRV', 'TXT', 'PTR'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['server', 'loadbalancer', 'domain'],
],
required_one_of=[
['server', 'loadbalancer', 'domain'],
],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
loadbalancer = module.params.get('loadbalancer')
name = module.params.get('name')
overwrite = module.params.get('overwrite')
priority = module.params.get('priority')
server = module.params.get('server')
state = module.params.get('state')
ttl = module.params.get('ttl')
record_type = module.params.get('type')
setup_rax_module(module, pyrax, False)
if record_type.upper() == 'PTR':
if not server and not loadbalancer:
module.fail_json(msg='one of the following is required: '
'server,loadbalancer')
rax_dns_record_ptr(module, data=data, comment=comment,
loadbalancer=loadbalancer, name=name, server=server,
state=state, ttl=ttl)
else:
rax_dns_record(module, comment=comment, data=data, domain=domain,
name=name, overwrite=overwrite, priority=priority,
record_type=record_type, state=state, ttl=ttl)
if __name__ == '__main__':
main()
| gpl-3.0 |
keli/furion | furion/socks5.py | 1 | 14581 | import struct
import select
import socket
import ssl
try:
import socketserver
except ImportError:
import SocketServer as socketserver
import logging
from .helpers import make_connection, my_inet_aton, hexstring, trigger_upstream_check
# https://github.com/gevent/gevent/issues/477
# Re-add sslwrap to Python 2.7.9
__ssl__ = __import__('ssl')
try:
_ssl = __ssl__._ssl
except AttributeError:
_ssl = __ssl__._ssl2
if not hasattr(_ssl, 'sslwrap'):
from .helpers import new_sslwrap
_ssl.sslwrap = new_sslwrap
# ###################################
# Constants
####################################
BUF_SIZE = 1024
TIME_OUT = 30
# Socks5 stages
INIT_STAGE = 0
AUTH_STAGE = 1
FINAL_STAGE = 2
CONN_ACCEPTED = 3
# Socks5 auth codes
AUTH_SUCCESSFUL = b'\x00'
AUTH_ERR_SERVER = b'\x01'
AUTH_ERR_BANDWIDTH = b'\x02'
AUTH_ERR_NOPLANFOUND = b'\x03'
AUTH_ERR_USERNOTFOUND = b'\x04'
####################################
# Exceptions
####################################
class Socks5Exception(Exception):
"""Base socks5 exception class"""
pass
class Socks5NoAuthMethodAccepted(Socks5Exception):
def __init__(self):
Exception.__init__(self, "No auth method accepted.")
class Socks5AuthFailed(Socks5Exception):
def __init__(self, reason=None):
if reason:
Exception.__init__(self, "Authentication failed: %s." % reason)
else:
Exception.__init__(self, "Authentication failed.")
class Socks5DnsFailed(Socks5Exception):
def __init__(self):
Exception.__init__(self, "DNS resolve failed.")
class Socks5ConnectionFailed(Socks5Exception):
def __init__(self):
Exception.__init__(self, "Connection to upstream/destination failed.")
class Socks5RemoteConnectionClosed(Socks5Exception):
def __init__(self):
Exception.__init__(self, "Remote connection closed.")
class Socks5SocketError(Socks5Exception):
def __init__(self):
Exception.__init__(self, "A socket error occurred when forwarding.")
class Socks5ConnectionClosed(Socks5Exception):
def __init__(self):
Exception.__init__(self, "Socks5 connection closed.")
class Socks5NotImplemented(Socks5Exception):
def __init__(self):
Exception.__init__(self, "Protocol not implemented yet.")
class Socks5PortForbidden(Socks5Exception):
def __init__(self, port):
Exception.__init__(self, "Port %d is not allowed" % port)
####################################
# Socks5 handlers
####################################
class Socks5RequestHandler(socketserver.StreamRequestHandler):
"""Socks5 request handler"""
def setup(self):
socketserver.StreamRequestHandler.setup(self)
self.bytes_in = 0
self.bytes_out = 0
self.member_id = 0
self.client_name = None
self.server_name = None
def handle(self):
"""Main handler"""
stage = INIT_STAGE
leftover = b''
dest = None
try:
while stage < CONN_ACCEPTED:
data = self.request.recv(BUF_SIZE)
# Client closed connection
if not data:
raise Socks5ConnectionClosed
data = leftover + data
if len(data) < 3:
leftover = data
continue
# Init stage
if stage == INIT_STAGE:
# If no auth required
if not self.local_auth and data == b'\x05\x01\x00':
self.request.sendall(b'\x05\x00')
stage = FINAL_STAGE
continue
# if username/password auth required
elif self.local_auth and data == b'\x05\x01\x02':
self.request.sendall(b'\x05\x02')
stage = AUTH_STAGE
continue
# no auth method accepted
else:
self.request.sendall(b'\x05\xFF')
#print(hexstring(data))
raise Socks5NoAuthMethodAccepted
# Auth stage
elif stage == AUTH_STAGE:
name_length, = struct.unpack('B', data[1:2])
if len(data[2:]) < name_length + 1:
leftover = data
continue
pass_length, = struct.unpack('B', data[2 + name_length:2 + name_length + 1])
if len(data[2 + name_length + 1:]) < pass_length:
leftover = data
continue
username = data[2:2 + name_length].decode('utf-8')
password = data[2 + name_length + 1:].decode('utf-8')
self.member_id, error_code = self.authority.auth(username, password)
if error_code != AUTH_SUCCESSFUL:
self.request.sendall(b'\x01' + error_code)
logging.info('Auth failed for user: %s', username)
raise Socks5AuthFailed
else:
self.request.sendall(b'\x01\x00')
logging.info('Auth succeeded for user: %s', username)
stage = FINAL_STAGE
# Final stage
elif stage == FINAL_STAGE:
if len(data) < 10:
leftover = data
continue
# Only TCP connections and IPV4 are allowed
if data[:2] != b'\x05\x01' or data[3:4] == b'\x04':
# Protocol error
self.request.sendall(b'\x05\x07')
raise Socks5NotImplemented
else:
domain = port = None
# Connect by domain name
if data[3:4] == b'\x03' or data[3:4] == b'\x02':
length, = struct.unpack('B', data[4:5])
domain = data[5:5 + length]
port, = struct.unpack('!H', data[5 + length:])
# Connect by ip address
elif data[3:4] == b'\x01':
domain = socket.inet_ntoa(data[4:8])
port, = struct.unpack('!H', data[8:])
try:
# Resolve domain to ip
if data[3:4] == b'\x02':
_, _, _, _, sa = \
filter(lambda x: x[0] == 2, socket.getaddrinfo(domain, port, 0, socket.SOCK_STREAM))[0]
ip, _ = sa
ip_bytes = my_inet_aton(ip)
port_bytes = struct.pack('!H', port)
self.request.sendall(b'\x05\x00\x00\x02' + ip_bytes + port_bytes)
# Return without actually connecting to domain
break
# Connect to destination
else:
dest = self.connect(domain, port, data)
# If connected to upstream/destination, let client know
dsockname = dest.getsockname()
client_ip = dsockname[0]
client_port = dsockname[1]
ip_bytes = my_inet_aton(client_ip)
port_bytes = struct.pack('!H', client_port)
self.request.sendall(b'\x05\x00\x00\x01' + ip_bytes + port_bytes)
stage = CONN_ACCEPTED
except:
logging.exception('Error when trying to resolve/connect to: %s', (domain, port))
self.request.sendall(b'\x05\x01')
raise
# Starting to forward data
try:
if dest:
result = self.forward(self.request, dest)
if result:
logging.debug("Forwarding finished")
else:
logging.debug('Exception/timeout when forwarding')
except:
logging.exception('Error when forwarding')
finally:
if dest:
dest.close()
logging.info("%d bytes out, %d bytes in. Socks5 session finished %s <-> %s.", self.bytes_out,
self.bytes_in, self.client_name, self.server_name)
if self.local_auth and (self.bytes_in or self.bytes_out):
self.authority.usage(self.member_id, self.bytes_in + self.bytes_out)
except Socks5Exception:
logging.exception('Connection closed')
except:
logging.exception('Error when proxying')
#traceback.print_exc()
finally:
try:
self.request.shutdown(socket.SHUT_RDWR)
except:
self.request.close()
return
def connect(self, domain, port, data):
# Connect to upstream instead of destination
if self.upstream_addr:
sc = Socks5Client(self.upstream_addr, self.upstream_username, self.upstream_password,
data, enable_ssl=self.upstream_ssl)
logging.info("Connecting to %s via upstream %s.", domain, self.upstream_addr)
return sc.connect()
else:
# Connect to destination directly
if len(self.allowed_ports) > 0 and port not in self.allowed_ports:
raise Socks5PortForbidden(port)
my_ip = self.request.getsockname()[0]
logging.info("Connecting to %s.", domain)
return make_connection((domain, port), my_ip)
def forward(self, client, server):
"""forward data between sockets"""
self.client_name = client.getpeername()
self.server_name = server.getpeername()
while True:
readables, writeables, exceptions = select.select(
[client, server], [], [], TIME_OUT)
# exception or timeout
if exceptions or (readables, writeables, exceptions) == ([], [], []):
return False
data = ''
for readable in readables:
data = readable.recv(BUF_SIZE)
if data:
if readable == client:
self.bytes_out += len(data)
server.send(data)
else:
self.bytes_in += len(data)
client.send(data)
else:
return True
class Socks5Client:
"""A socks5 client with optional SSL support"""
def __init__(self, addr, username='', password='', data='',
enable_ssl=True, bind_to=None, to_upstream=True, dns_only=False):
"""
:param addr: socket server address tuple
:param username: username
:param password: password
:param data: a tuple of remote address you plan to connect to, or packed data of it.
:param enable_ssl: if ssl should be enabled
:param bind_to: ip to bind to for the local socket
:param to_upstream: if an upstream is used
:return: established socket or resolved address when dns_only is True
"""
self.addr = addr
self.enable_ssl = enable_ssl
self.username = username.encode('utf-8')
self.password = password.encode('utf-8')
self.data = data
self.bind_to = bind_to
self.to_upstream = to_upstream
self.dns_only = dns_only
def connect(self):
dest = make_connection(self.addr, self.bind_to, self.to_upstream)
# SSL enabled
if dest and self.enable_ssl:
dest = ssl.wrap_socket(dest)
if not dest:
trigger_upstream_check()
raise Socks5ConnectionFailed()
# Server needs authentication
if self.username and self.password:
# Send auth method (username/password auth)
dest.sendall(b'\x05\x01\x02')
ans = dest.recv(BUF_SIZE)
# Method accepted
if ans == b'\x05\x02':
name_length = struct.pack('B', len(self.username))
pass_length = struct.pack('B', len(self.password))
# Start auth
dest.sendall(b'\x01' + name_length + self.username + pass_length + self.password)
ans = dest.recv(BUF_SIZE)
# Auth failed
if ans != b'\x01\x00':
if not ans or ans[1] == AUTH_ERR_SERVER:
raise Socks5AuthFailed("An error occurred on server")
elif ans[1] == AUTH_ERR_BANDWIDTH:
raise Socks5AuthFailed("Bandwidth usage exceeded quota")
elif ans[1] == AUTH_ERR_NOPLANFOUND:
raise Socks5AuthFailed("Can't find a subscribed plan for user")
elif ans[1] == AUTH_ERR_USERNOTFOUND:
raise Socks5AuthFailed("User not found or wrong password")
else:
raise Socks5AuthFailed
else:
raise Socks5AuthFailed("No accepted authentication method")
# No auth needed
else:
dest.sendall(b'\x05\x01\x00')
ans = dest.recv(BUF_SIZE)
if ans != b'\x05\x00':
raise Socks5AuthFailed
if type(self.data) is tuple:
domain, port = self.data
domain = domain.encode('utf-8')
port_str = struct.pack('!H', port)
len_str = struct.pack('B', len(domain))
if self.dns_only:
addr_type = b'\x02'
else:
addr_type = b'\x03'
data = b'\x05\x01\x00' + addr_type + len_str + domain + port_str
else:
data = self.data
dest.sendall(data)
ans = dest.recv(BUF_SIZE)
if ans.startswith(b'\x05\x00'):
if ans[3] == b'\x02':
return socket.inet_ntoa(ans[4:8])
else:
return dest
else:
raise Socks5ConnectionFailed
| bsd-2-clause |
Jack301/toolkit | handleExcel/exportor/scripts/xlrd-1.0.0/xlrd/formula.py | 77 | 94301 | # -*- coding: cp1252 -*-
##
# Module for parsing/evaluating Microsoft Excel formulas.
#
# <p>Copyright © 2005-2012 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under
# a BSD-style licence.</p>
##
# No part of the content of this file was derived from the works of David Giffin.
from __future__ import print_function
import copy
from struct import unpack
from .timemachine import *
from .biffh import unpack_unicode_update_pos, unpack_string_update_pos, \
XLRDError, hex_char_dump, error_text_from_code, BaseObject
__all__ = [
'oBOOL', 'oERR', 'oNUM', 'oREF', 'oREL', 'oSTRG', 'oUNK',
'decompile_formula',
'dump_formula',
'evaluate_name_formula',
'okind_dict',
'rangename3d', 'rangename3drel', 'cellname', 'cellnameabs', 'colname',
'FMLA_TYPE_CELL',
'FMLA_TYPE_SHARED',
'FMLA_TYPE_ARRAY',
'FMLA_TYPE_COND_FMT',
'FMLA_TYPE_DATA_VAL',
'FMLA_TYPE_NAME',
]
FMLA_TYPE_CELL = 1
FMLA_TYPE_SHARED = 2
FMLA_TYPE_ARRAY = 4
FMLA_TYPE_COND_FMT = 8
FMLA_TYPE_DATA_VAL = 16
FMLA_TYPE_NAME = 32
ALL_FMLA_TYPES = 63
FMLA_TYPEDESCR_MAP = {
1 : 'CELL',
2 : 'SHARED',
4 : 'ARRAY',
8 : 'COND-FMT',
16: 'DATA-VAL',
32: 'NAME',
}
_TOKEN_NOT_ALLOWED = {
0x01: ALL_FMLA_TYPES - FMLA_TYPE_CELL, # tExp
0x02: ALL_FMLA_TYPES - FMLA_TYPE_CELL, # tTbl
0x0F: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tIsect
0x10: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tUnion/List
0x11: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tRange
0x20: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tArray
0x23: FMLA_TYPE_SHARED, # tName
0x39: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tNameX
0x3A: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tRef3d
0x3B: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tArea3d
0x2C: FMLA_TYPE_CELL + FMLA_TYPE_ARRAY, # tRefN
0x2D: FMLA_TYPE_CELL + FMLA_TYPE_ARRAY, # tAreaN
# plus weird stuff like tMem*
}.get
oBOOL = 3
oERR = 4
oMSNG = 5 # tMissArg
oNUM = 2
oREF = -1
oREL = -2
oSTRG = 1
oUNK = 0
okind_dict = {
-2: "oREL",
-1: "oREF",
0 : "oUNK",
1 : "oSTRG",
2 : "oNUM",
3 : "oBOOL",
4 : "oERR",
5 : "oMSNG",
}
listsep = ',' #### probably should depend on locale
# sztabN[opcode] -> the number of bytes to consume.
# -1 means variable
# -2 means this opcode not implemented in this version.
# Which N to use? Depends on biff_version; see szdict.
sztab0 = [-2, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, 8, 4, 2, 2, 3, 9, 8, 2, 3, 8, 4, 7, 5, 5, 5, 2, 4, 7, 4, 7, 2, 2, -2, -2, -2, -2, -2, -2, -2, -2, 3, -2, -2, -2, -2, -2, -2, -2]
sztab1 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, 11, 5, 2, 2, 3, 9, 9, 2, 3, 11, 4, 7, 7, 7, 7, 3, 4, 7, 4, 7, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, 3, -2, -2, -2, -2, -2, -2, -2]
sztab2 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, 11, 5, 2, 2, 3, 9, 9, 3, 4, 11, 4, 7, 7, 7, 7, 3, 4, 7, 4, 7, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2]
sztab3 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, -2, -2, 2, 2, 3, 9, 9, 3, 4, 15, 4, 7, 7, 7, 7, 3, 4, 7, 4, 7, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, -2, 25, 18, 21, 18, 21, -2, -2]
sztab4 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -2, -2, 2, 2, 3, 9, 9, 3, 4, 5, 5, 9, 7, 7, 7, 3, 5, 9, 5, 9, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, -2, 7, 7, 11, 7, 11, -2, -2]
szdict = {
20 : sztab0,
21 : sztab0,
30 : sztab1,
40 : sztab2,
45 : sztab2,
50 : sztab3,
70 : sztab3,
80 : sztab4,
}
# For debugging purposes ... the name for each opcode
# (without the prefix "t" used on OOo docs)
onames = ['Unk00', 'Exp', 'Tbl', 'Add', 'Sub', 'Mul', 'Div', 'Power', 'Concat', 'LT', 'LE', 'EQ', 'GE', 'GT', 'NE', 'Isect', 'List', 'Range', 'Uplus', 'Uminus', 'Percent', 'Paren', 'MissArg', 'Str', 'Extended', 'Attr', 'Sheet', 'EndSheet', 'Err', 'Bool', 'Int', 'Num', 'Array', 'Func', 'FuncVar', 'Name', 'Ref', 'Area', 'MemArea', 'MemErr', 'MemNoMem', 'MemFunc', 'RefErr', 'AreaErr', 'RefN', 'AreaN', 'MemAreaN', 'MemNoMemN', '', '', '', '', '', '', '', '', 'FuncCE', 'NameX', 'Ref3d', 'Area3d', 'RefErr3d', 'AreaErr3d', '', '']
func_defs = {
# index: (name, min#args, max#args, flags, #known_args, return_type, kargs)
0 : ('COUNT', 0, 30, 0x04, 1, 'V', 'R'),
1 : ('IF', 2, 3, 0x04, 3, 'V', 'VRR'),
2 : ('ISNA', 1, 1, 0x02, 1, 'V', 'V'),
3 : ('ISERROR', 1, 1, 0x02, 1, 'V', 'V'),
4 : ('SUM', 0, 30, 0x04, 1, 'V', 'R'),
5 : ('AVERAGE', 1, 30, 0x04, 1, 'V', 'R'),
6 : ('MIN', 1, 30, 0x04, 1, 'V', 'R'),
7 : ('MAX', 1, 30, 0x04, 1, 'V', 'R'),
8 : ('ROW', 0, 1, 0x04, 1, 'V', 'R'),
9 : ('COLUMN', 0, 1, 0x04, 1, 'V', 'R'),
10 : ('NA', 0, 0, 0x02, 0, 'V', ''),
11 : ('NPV', 2, 30, 0x04, 2, 'V', 'VR'),
12 : ('STDEV', 1, 30, 0x04, 1, 'V', 'R'),
13 : ('DOLLAR', 1, 2, 0x04, 1, 'V', 'V'),
14 : ('FIXED', 2, 3, 0x04, 3, 'V', 'VVV'),
15 : ('SIN', 1, 1, 0x02, 1, 'V', 'V'),
16 : ('COS', 1, 1, 0x02, 1, 'V', 'V'),
17 : ('TAN', 1, 1, 0x02, 1, 'V', 'V'),
18 : ('ATAN', 1, 1, 0x02, 1, 'V', 'V'),
19 : ('PI', 0, 0, 0x02, 0, 'V', ''),
20 : ('SQRT', 1, 1, 0x02, 1, 'V', 'V'),
21 : ('EXP', 1, 1, 0x02, 1, 'V', 'V'),
22 : ('LN', 1, 1, 0x02, 1, 'V', 'V'),
23 : ('LOG10', 1, 1, 0x02, 1, 'V', 'V'),
24 : ('ABS', 1, 1, 0x02, 1, 'V', 'V'),
25 : ('INT', 1, 1, 0x02, 1, 'V', 'V'),
26 : ('SIGN', 1, 1, 0x02, 1, 'V', 'V'),
27 : ('ROUND', 2, 2, 0x02, 2, 'V', 'VV'),
28 : ('LOOKUP', 2, 3, 0x04, 2, 'V', 'VR'),
29 : ('INDEX', 2, 4, 0x0c, 4, 'R', 'RVVV'),
30 : ('REPT', 2, 2, 0x02, 2, 'V', 'VV'),
31 : ('MID', 3, 3, 0x02, 3, 'V', 'VVV'),
32 : ('LEN', 1, 1, 0x02, 1, 'V', 'V'),
33 : ('VALUE', 1, 1, 0x02, 1, 'V', 'V'),
34 : ('TRUE', 0, 0, 0x02, 0, 'V', ''),
35 : ('FALSE', 0, 0, 0x02, 0, 'V', ''),
36 : ('AND', 1, 30, 0x04, 1, 'V', 'R'),
37 : ('OR', 1, 30, 0x04, 1, 'V', 'R'),
38 : ('NOT', 1, 1, 0x02, 1, 'V', 'V'),
39 : ('MOD', 2, 2, 0x02, 2, 'V', 'VV'),
40 : ('DCOUNT', 3, 3, 0x02, 3, 'V', 'RRR'),
41 : ('DSUM', 3, 3, 0x02, 3, 'V', 'RRR'),
42 : ('DAVERAGE', 3, 3, 0x02, 3, 'V', 'RRR'),
43 : ('DMIN', 3, 3, 0x02, 3, 'V', 'RRR'),
44 : ('DMAX', 3, 3, 0x02, 3, 'V', 'RRR'),
45 : ('DSTDEV', 3, 3, 0x02, 3, 'V', 'RRR'),
46 : ('VAR', 1, 30, 0x04, 1, 'V', 'R'),
47 : ('DVAR', 3, 3, 0x02, 3, 'V', 'RRR'),
48 : ('TEXT', 2, 2, 0x02, 2, 'V', 'VV'),
49 : ('LINEST', 1, 4, 0x04, 4, 'A', 'RRVV'),
50 : ('TREND', 1, 4, 0x04, 4, 'A', 'RRRV'),
51 : ('LOGEST', 1, 4, 0x04, 4, 'A', 'RRVV'),
52 : ('GROWTH', 1, 4, 0x04, 4, 'A', 'RRRV'),
56 : ('PV', 3, 5, 0x04, 5, 'V', 'VVVVV'),
57 : ('FV', 3, 5, 0x04, 5, 'V', 'VVVVV'),
58 : ('NPER', 3, 5, 0x04, 5, 'V', 'VVVVV'),
59 : ('PMT', 3, 5, 0x04, 5, 'V', 'VVVVV'),
60 : ('RATE', 3, 6, 0x04, 6, 'V', 'VVVVVV'),
61 : ('MIRR', 3, 3, 0x02, 3, 'V', 'RVV'),
62 : ('IRR', 1, 2, 0x04, 2, 'V', 'RV'),
63 : ('RAND', 0, 0, 0x0a, 0, 'V', ''),
64 : ('MATCH', 2, 3, 0x04, 3, 'V', 'VRR'),
65 : ('DATE', 3, 3, 0x02, 3, 'V', 'VVV'),
66 : ('TIME', 3, 3, 0x02, 3, 'V', 'VVV'),
67 : ('DAY', 1, 1, 0x02, 1, 'V', 'V'),
68 : ('MONTH', 1, 1, 0x02, 1, 'V', 'V'),
69 : ('YEAR', 1, 1, 0x02, 1, 'V', 'V'),
70 : ('WEEKDAY', 1, 2, 0x04, 2, 'V', 'VV'),
71 : ('HOUR', 1, 1, 0x02, 1, 'V', 'V'),
72 : ('MINUTE', 1, 1, 0x02, 1, 'V', 'V'),
73 : ('SECOND', 1, 1, 0x02, 1, 'V', 'V'),
74 : ('NOW', 0, 0, 0x0a, 0, 'V', ''),
75 : ('AREAS', 1, 1, 0x02, 1, 'V', 'R'),
76 : ('ROWS', 1, 1, 0x02, 1, 'V', 'R'),
77 : ('COLUMNS', 1, 1, 0x02, 1, 'V', 'R'),
78 : ('OFFSET', 3, 5, 0x04, 5, 'R', 'RVVVV'),
82 : ('SEARCH', 2, 3, 0x04, 3, 'V', 'VVV'),
83 : ('TRANSPOSE', 1, 1, 0x02, 1, 'A', 'A'),
86 : ('TYPE', 1, 1, 0x02, 1, 'V', 'V'),
92 : ('SERIESSUM', 4, 4, 0x02, 4, 'V', 'VVVA'),
97 : ('ATAN2', 2, 2, 0x02, 2, 'V', 'VV'),
98 : ('ASIN', 1, 1, 0x02, 1, 'V', 'V'),
99 : ('ACOS', 1, 1, 0x02, 1, 'V', 'V'),
100: ('CHOOSE', 2, 30, 0x04, 2, 'V', 'VR'),
101: ('HLOOKUP', 3, 4, 0x04, 4, 'V', 'VRRV'),
102: ('VLOOKUP', 3, 4, 0x04, 4, 'V', 'VRRV'),
105: ('ISREF', 1, 1, 0x02, 1, 'V', 'R'),
109: ('LOG', 1, 2, 0x04, 2, 'V', 'VV'),
111: ('CHAR', 1, 1, 0x02, 1, 'V', 'V'),
112: ('LOWER', 1, 1, 0x02, 1, 'V', 'V'),
113: ('UPPER', 1, 1, 0x02, 1, 'V', 'V'),
114: ('PROPER', 1, 1, 0x02, 1, 'V', 'V'),
115: ('LEFT', 1, 2, 0x04, 2, 'V', 'VV'),
116: ('RIGHT', 1, 2, 0x04, 2, 'V', 'VV'),
117: ('EXACT', 2, 2, 0x02, 2, 'V', 'VV'),
118: ('TRIM', 1, 1, 0x02, 1, 'V', 'V'),
119: ('REPLACE', 4, 4, 0x02, 4, 'V', 'VVVV'),
120: ('SUBSTITUTE', 3, 4, 0x04, 4, 'V', 'VVVV'),
121: ('CODE', 1, 1, 0x02, 1, 'V', 'V'),
124: ('FIND', 2, 3, 0x04, 3, 'V', 'VVV'),
125: ('CELL', 1, 2, 0x0c, 2, 'V', 'VR'),
126: ('ISERR', 1, 1, 0x02, 1, 'V', 'V'),
127: ('ISTEXT', 1, 1, 0x02, 1, 'V', 'V'),
128: ('ISNUMBER', 1, 1, 0x02, 1, 'V', 'V'),
129: ('ISBLANK', 1, 1, 0x02, 1, 'V', 'V'),
130: ('T', 1, 1, 0x02, 1, 'V', 'R'),
131: ('N', 1, 1, 0x02, 1, 'V', 'R'),
140: ('DATEVALUE', 1, 1, 0x02, 1, 'V', 'V'),
141: ('TIMEVALUE', 1, 1, 0x02, 1, 'V', 'V'),
142: ('SLN', 3, 3, 0x02, 3, 'V', 'VVV'),
143: ('SYD', 4, 4, 0x02, 4, 'V', 'VVVV'),
144: ('DDB', 4, 5, 0x04, 5, 'V', 'VVVVV'),
148: ('INDIRECT', 1, 2, 0x0c, 2, 'R', 'VV'),
162: ('CLEAN', 1, 1, 0x02, 1, 'V', 'V'),
163: ('MDETERM', 1, 1, 0x02, 1, 'V', 'A'),
164: ('MINVERSE', 1, 1, 0x02, 1, 'A', 'A'),
165: ('MMULT', 2, 2, 0x02, 2, 'A', 'AA'),
167: ('IPMT', 4, 6, 0x04, 6, 'V', 'VVVVVV'),
168: ('PPMT', 4, 6, 0x04, 6, 'V', 'VVVVVV'),
169: ('COUNTA', 0, 30, 0x04, 1, 'V', 'R'),
183: ('PRODUCT', 0, 30, 0x04, 1, 'V', 'R'),
184: ('FACT', 1, 1, 0x02, 1, 'V', 'V'),
189: ('DPRODUCT', 3, 3, 0x02, 3, 'V', 'RRR'),
190: ('ISNONTEXT', 1, 1, 0x02, 1, 'V', 'V'),
193: ('STDEVP', 1, 30, 0x04, 1, 'V', 'R'),
194: ('VARP', 1, 30, 0x04, 1, 'V', 'R'),
195: ('DSTDEVP', 3, 3, 0x02, 3, 'V', 'RRR'),
196: ('DVARP', 3, 3, 0x02, 3, 'V', 'RRR'),
197: ('TRUNC', 1, 2, 0x04, 2, 'V', 'VV'),
198: ('ISLOGICAL', 1, 1, 0x02, 1, 'V', 'V'),
199: ('DCOUNTA', 3, 3, 0x02, 3, 'V', 'RRR'),
204: ('USDOLLAR', 1, 2, 0x04, 2, 'V', 'VV'),
205: ('FINDB', 2, 3, 0x04, 3, 'V', 'VVV'),
206: ('SEARCHB', 2, 3, 0x04, 3, 'V', 'VVV'),
207: ('REPLACEB', 4, 4, 0x02, 4, 'V', 'VVVV'),
208: ('LEFTB', 1, 2, 0x04, 2, 'V', 'VV'),
209: ('RIGHTB', 1, 2, 0x04, 2, 'V', 'VV'),
210: ('MIDB', 3, 3, 0x02, 3, 'V', 'VVV'),
211: ('LENB', 1, 1, 0x02, 1, 'V', 'V'),
212: ('ROUNDUP', 2, 2, 0x02, 2, 'V', 'VV'),
213: ('ROUNDDOWN', 2, 2, 0x02, 2, 'V', 'VV'),
214: ('ASC', 1, 1, 0x02, 1, 'V', 'V'),
215: ('DBCS', 1, 1, 0x02, 1, 'V', 'V'),
216: ('RANK', 2, 3, 0x04, 3, 'V', 'VRV'),
219: ('ADDRESS', 2, 5, 0x04, 5, 'V', 'VVVVV'),
220: ('DAYS360', 2, 3, 0x04, 3, 'V', 'VVV'),
221: ('TODAY', 0, 0, 0x0a, 0, 'V', ''),
222: ('VDB', 5, 7, 0x04, 7, 'V', 'VVVVVVV'),
227: ('MEDIAN', 1, 30, 0x04, 1, 'V', 'R'),
228: ('SUMPRODUCT', 1, 30, 0x04, 1, 'V', 'A'),
229: ('SINH', 1, 1, 0x02, 1, 'V', 'V'),
230: ('COSH', 1, 1, 0x02, 1, 'V', 'V'),
231: ('TANH', 1, 1, 0x02, 1, 'V', 'V'),
232: ('ASINH', 1, 1, 0x02, 1, 'V', 'V'),
233: ('ACOSH', 1, 1, 0x02, 1, 'V', 'V'),
234: ('ATANH', 1, 1, 0x02, 1, 'V', 'V'),
235: ('DGET', 3, 3, 0x02, 3, 'V', 'RRR'),
244: ('INFO', 1, 1, 0x02, 1, 'V', 'V'),
247: ('DB', 4, 5, 0x04, 5, 'V', 'VVVVV'),
252: ('FREQUENCY', 2, 2, 0x02, 2, 'A', 'RR'),
261: ('ERROR.TYPE', 1, 1, 0x02, 1, 'V', 'V'),
269: ('AVEDEV', 1, 30, 0x04, 1, 'V', 'R'),
270: ('BETADIST', 3, 5, 0x04, 1, 'V', 'V'),
271: ('GAMMALN', 1, 1, 0x02, 1, 'V', 'V'),
272: ('BETAINV', 3, 5, 0x04, 1, 'V', 'V'),
273: ('BINOMDIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
274: ('CHIDIST', 2, 2, 0x02, 2, 'V', 'VV'),
275: ('CHIINV', 2, 2, 0x02, 2, 'V', 'VV'),
276: ('COMBIN', 2, 2, 0x02, 2, 'V', 'VV'),
277: ('CONFIDENCE', 3, 3, 0x02, 3, 'V', 'VVV'),
278: ('CRITBINOM', 3, 3, 0x02, 3, 'V', 'VVV'),
279: ('EVEN', 1, 1, 0x02, 1, 'V', 'V'),
280: ('EXPONDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
281: ('FDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
282: ('FINV', 3, 3, 0x02, 3, 'V', 'VVV'),
283: ('FISHER', 1, 1, 0x02, 1, 'V', 'V'),
284: ('FISHERINV', 1, 1, 0x02, 1, 'V', 'V'),
285: ('FLOOR', 2, 2, 0x02, 2, 'V', 'VV'),
286: ('GAMMADIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
287: ('GAMMAINV', 3, 3, 0x02, 3, 'V', 'VVV'),
288: ('CEILING', 2, 2, 0x02, 2, 'V', 'VV'),
289: ('HYPGEOMDIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
290: ('LOGNORMDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
291: ('LOGINV', 3, 3, 0x02, 3, 'V', 'VVV'),
292: ('NEGBINOMDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
293: ('NORMDIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
294: ('NORMSDIST', 1, 1, 0x02, 1, 'V', 'V'),
295: ('NORMINV', 3, 3, 0x02, 3, 'V', 'VVV'),
296: ('NORMSINV', 1, 1, 0x02, 1, 'V', 'V'),
297: ('STANDARDIZE', 3, 3, 0x02, 3, 'V', 'VVV'),
298: ('ODD', 1, 1, 0x02, 1, 'V', 'V'),
299: ('PERMUT', 2, 2, 0x02, 2, 'V', 'VV'),
300: ('POISSON', 3, 3, 0x02, 3, 'V', 'VVV'),
301: ('TDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
302: ('WEIBULL', 4, 4, 0x02, 4, 'V', 'VVVV'),
303: ('SUMXMY2', 2, 2, 0x02, 2, 'V', 'AA'),
304: ('SUMX2MY2', 2, 2, 0x02, 2, 'V', 'AA'),
305: ('SUMX2PY2', 2, 2, 0x02, 2, 'V', 'AA'),
306: ('CHITEST', 2, 2, 0x02, 2, 'V', 'AA'),
307: ('CORREL', 2, 2, 0x02, 2, 'V', 'AA'),
308: ('COVAR', 2, 2, 0x02, 2, 'V', 'AA'),
309: ('FORECAST', 3, 3, 0x02, 3, 'V', 'VAA'),
310: ('FTEST', 2, 2, 0x02, 2, 'V', 'AA'),
311: ('INTERCEPT', 2, 2, 0x02, 2, 'V', 'AA'),
312: ('PEARSON', 2, 2, 0x02, 2, 'V', 'AA'),
313: ('RSQ', 2, 2, 0x02, 2, 'V', 'AA'),
314: ('STEYX', 2, 2, 0x02, 2, 'V', 'AA'),
315: ('SLOPE', 2, 2, 0x02, 2, 'V', 'AA'),
316: ('TTEST', 4, 4, 0x02, 4, 'V', 'AAVV'),
317: ('PROB', 3, 4, 0x04, 3, 'V', 'AAV'),
318: ('DEVSQ', 1, 30, 0x04, 1, 'V', 'R'),
319: ('GEOMEAN', 1, 30, 0x04, 1, 'V', 'R'),
320: ('HARMEAN', 1, 30, 0x04, 1, 'V', 'R'),
321: ('SUMSQ', 0, 30, 0x04, 1, 'V', 'R'),
322: ('KURT', 1, 30, 0x04, 1, 'V', 'R'),
323: ('SKEW', 1, 30, 0x04, 1, 'V', 'R'),
324: ('ZTEST', 2, 3, 0x04, 2, 'V', 'RV'),
325: ('LARGE', 2, 2, 0x02, 2, 'V', 'RV'),
326: ('SMALL', 2, 2, 0x02, 2, 'V', 'RV'),
327: ('QUARTILE', 2, 2, 0x02, 2, 'V', 'RV'),
328: ('PERCENTILE', 2, 2, 0x02, 2, 'V', 'RV'),
329: ('PERCENTRANK', 2, 3, 0x04, 2, 'V', 'RV'),
330: ('MODE', 1, 30, 0x04, 1, 'V', 'A'),
331: ('TRIMMEAN', 2, 2, 0x02, 2, 'V', 'RV'),
332: ('TINV', 2, 2, 0x02, 2, 'V', 'VV'),
336: ('CONCATENATE', 0, 30, 0x04, 1, 'V', 'V'),
337: ('POWER', 2, 2, 0x02, 2, 'V', 'VV'),
342: ('RADIANS', 1, 1, 0x02, 1, 'V', 'V'),
343: ('DEGREES', 1, 1, 0x02, 1, 'V', 'V'),
344: ('SUBTOTAL', 2, 30, 0x04, 2, 'V', 'VR'),
345: ('SUMIF', 2, 3, 0x04, 3, 'V', 'RVR'),
346: ('COUNTIF', 2, 2, 0x02, 2, 'V', 'RV'),
347: ('COUNTBLANK', 1, 1, 0x02, 1, 'V', 'R'),
350: ('ISPMT', 4, 4, 0x02, 4, 'V', 'VVVV'),
351: ('DATEDIF', 3, 3, 0x02, 3, 'V', 'VVV'),
352: ('DATESTRING', 1, 1, 0x02, 1, 'V', 'V'),
353: ('NUMBERSTRING', 2, 2, 0x02, 2, 'V', 'VV'),
354: ('ROMAN', 1, 2, 0x04, 2, 'V', 'VV'),
358: ('GETPIVOTDATA', 2, 2, 0x02, 2, 'V', 'RV'),
359: ('HYPERLINK', 1, 2, 0x04, 2, 'V', 'VV'),
360: ('PHONETIC', 1, 1, 0x02, 1, 'V', 'V'),
361: ('AVERAGEA', 1, 30, 0x04, 1, 'V', 'R'),
362: ('MAXA', 1, 30, 0x04, 1, 'V', 'R'),
363: ('MINA', 1, 30, 0x04, 1, 'V', 'R'),
364: ('STDEVPA', 1, 30, 0x04, 1, 'V', 'R'),
365: ('VARPA', 1, 30, 0x04, 1, 'V', 'R'),
366: ('STDEVA', 1, 30, 0x04, 1, 'V', 'R'),
367: ('VARA', 1, 30, 0x04, 1, 'V', 'R'),
368: ('BAHTTEXT', 1, 1, 0x02, 1, 'V', 'V'),
369: ('THAIDAYOFWEEK', 1, 1, 0x02, 1, 'V', 'V'),
370: ('THAIDIGIT', 1, 1, 0x02, 1, 'V', 'V'),
371: ('THAIMONTHOFYEAR', 1, 1, 0x02, 1, 'V', 'V'),
372: ('THAINUMSOUND', 1, 1, 0x02, 1, 'V', 'V'),
373: ('THAINUMSTRING', 1, 1, 0x02, 1, 'V', 'V'),
374: ('THAISTRINGLENGTH', 1, 1, 0x02, 1, 'V', 'V'),
375: ('ISTHAIDIGIT', 1, 1, 0x02, 1, 'V', 'V'),
376: ('ROUNDBAHTDOWN', 1, 1, 0x02, 1, 'V', 'V'),
377: ('ROUNDBAHTUP', 1, 1, 0x02, 1, 'V', 'V'),
378: ('THAIYEAR', 1, 1, 0x02, 1, 'V', 'V'),
379: ('RTD', 2, 5, 0x04, 1, 'V', 'V'),
}
tAttrNames = {
0x00: "Skip??", # seen in SAMPLES.XLS which shipped with Excel 5.0
0x01: "Volatile",
0x02: "If",
0x04: "Choose",
0x08: "Skip",
0x10: "Sum",
0x20: "Assign",
0x40: "Space",
0x41: "SpaceVolatile",
}
error_opcodes = set([0x07, 0x08, 0x0A, 0x0B, 0x1C, 0x1D, 0x2F])
tRangeFuncs = (min, max, min, max, min, max)
tIsectFuncs = (max, min, max, min, max, min)
def do_box_funcs(box_funcs, boxa, boxb):
return tuple([
func(numa, numb)
for func, numa, numb in zip(box_funcs, boxa.coords, boxb.coords)
])
def adjust_cell_addr_biff8(rowval, colval, reldelta, browx=None, bcolx=None):
row_rel = (colval >> 15) & 1
col_rel = (colval >> 14) & 1
rowx = rowval
colx = colval & 0xff
if reldelta:
if row_rel and rowx >= 32768:
rowx -= 65536
if col_rel and colx >= 128:
colx -= 256
else:
if row_rel:
rowx -= browx
if col_rel:
colx -= bcolx
return rowx, colx, row_rel, col_rel
def adjust_cell_addr_biff_le7(
rowval, colval, reldelta, browx=None, bcolx=None):
row_rel = (rowval >> 15) & 1
col_rel = (rowval >> 14) & 1
rowx = rowval & 0x3fff
colx = colval
if reldelta:
if row_rel and rowx >= 8192:
rowx -= 16384
if col_rel and colx >= 128:
colx -= 256
else:
if row_rel:
rowx -= browx
if col_rel:
colx -= bcolx
return rowx, colx, row_rel, col_rel
def get_cell_addr(data, pos, bv, reldelta, browx=None, bcolx=None):
if bv >= 80:
rowval, colval = unpack("<HH", data[pos:pos+4])
# print " rv=%04xh cv=%04xh" % (rowval, colval)
return adjust_cell_addr_biff8(rowval, colval, reldelta, browx, bcolx)
else:
rowval, colval = unpack("<HB", data[pos:pos+3])
# print " rv=%04xh cv=%04xh" % (rowval, colval)
return adjust_cell_addr_biff_le7(
rowval, colval, reldelta, browx, bcolx)
def get_cell_range_addr(data, pos, bv, reldelta, browx=None, bcolx=None):
if bv >= 80:
row1val, row2val, col1val, col2val = unpack("<HHHH", data[pos:pos+8])
# print " rv=%04xh cv=%04xh" % (row1val, col1val)
# print " rv=%04xh cv=%04xh" % (row2val, col2val)
res1 = adjust_cell_addr_biff8(row1val, col1val, reldelta, browx, bcolx)
res2 = adjust_cell_addr_biff8(row2val, col2val, reldelta, browx, bcolx)
return res1, res2
else:
row1val, row2val, col1val, col2val = unpack("<HHBB", data[pos:pos+6])
# print " rv=%04xh cv=%04xh" % (row1val, col1val)
# print " rv=%04xh cv=%04xh" % (row2val, col2val)
res1 = adjust_cell_addr_biff_le7(
row1val, col1val, reldelta, browx, bcolx)
res2 = adjust_cell_addr_biff_le7(
row2val, col2val, reldelta, browx, bcolx)
return res1, res2
def get_externsheet_local_range(bk, refx, blah=0):
try:
info = bk._externsheet_info[refx]
except IndexError:
print("!!! get_externsheet_local_range: refx=%d, not in range(%d)" \
% (refx, len(bk._externsheet_info)), file=bk.logfile)
return (-101, -101)
ref_recordx, ref_first_sheetx, ref_last_sheetx = info
if ref_recordx == bk._supbook_addins_inx:
if blah:
print("/// get_externsheet_local_range(refx=%d) -> addins %r" % (refx, info), file=bk.logfile)
assert ref_first_sheetx == 0xFFFE == ref_last_sheetx
return (-5, -5)
if ref_recordx != bk._supbook_locals_inx:
if blah:
print("/// get_externsheet_local_range(refx=%d) -> external %r" % (refx, info), file=bk.logfile)
return (-4, -4) # external reference
if ref_first_sheetx == 0xFFFE == ref_last_sheetx:
if blah:
print("/// get_externsheet_local_range(refx=%d) -> unspecified sheet %r" % (refx, info), file=bk.logfile)
return (-1, -1) # internal reference, any sheet
if ref_first_sheetx == 0xFFFF == ref_last_sheetx:
if blah:
print("/// get_externsheet_local_range(refx=%d) -> deleted sheet(s)" % (refx, ), file=bk.logfile)
return (-2, -2) # internal reference, deleted sheet(s)
nsheets = len(bk._all_sheets_map)
if not(0 <= ref_first_sheetx <= ref_last_sheetx < nsheets):
if blah:
print("/// get_externsheet_local_range(refx=%d) -> %r" % (refx, info), file=bk.logfile)
print("--- first/last sheet not in range(%d)" % nsheets, file=bk.logfile)
return (-102, -102) # stuffed up somewhere :-(
xlrd_sheetx1 = bk._all_sheets_map[ref_first_sheetx]
xlrd_sheetx2 = bk._all_sheets_map[ref_last_sheetx]
if not(0 <= xlrd_sheetx1 <= xlrd_sheetx2):
return (-3, -3) # internal reference, but to a macro sheet
return xlrd_sheetx1, xlrd_sheetx2
def get_externsheet_local_range_b57(
bk, raw_extshtx, ref_first_sheetx, ref_last_sheetx, blah=0):
if raw_extshtx > 0:
if blah:
print("/// get_externsheet_local_range_b57(raw_extshtx=%d) -> external" % raw_extshtx, file=bk.logfile)
return (-4, -4) # external reference
if ref_first_sheetx == -1 and ref_last_sheetx == -1:
return (-2, -2) # internal reference, deleted sheet(s)
nsheets = len(bk._all_sheets_map)
if not(0 <= ref_first_sheetx <= ref_last_sheetx < nsheets):
if blah:
print("/// get_externsheet_local_range_b57(%d, %d, %d) -> ???" \
% (raw_extshtx, ref_first_sheetx, ref_last_sheetx), file=bk.logfile)
print("--- first/last sheet not in range(%d)" % nsheets, file=bk.logfile)
return (-103, -103) # stuffed up somewhere :-(
xlrd_sheetx1 = bk._all_sheets_map[ref_first_sheetx]
xlrd_sheetx2 = bk._all_sheets_map[ref_last_sheetx]
if not(0 <= xlrd_sheetx1 <= xlrd_sheetx2):
return (-3, -3) # internal reference, but to a macro sheet
return xlrd_sheetx1, xlrd_sheetx2
class FormulaError(Exception):
pass
##
# Used in evaluating formulas.
# The following table describes the kinds and how their values
# are represented.</p>
#
# <table border="1" cellpadding="7">
# <tr>
# <th>Kind symbol</th>
# <th>Kind number</th>
# <th>Value representation</th>
# </tr>
# <tr>
# <td>oBOOL</td>
# <td align="center">3</td>
# <td>integer: 0 => False; 1 => True</td>
# </tr>
# <tr>
# <td>oERR</td>
# <td align="center">4</td>
# <td>None, or an int error code (same as XL_CELL_ERROR in the Cell class).
# </td>
# </tr>
# <tr>
# <td>oMSNG</td>
# <td align="center">5</td>
# <td>Used by Excel as a placeholder for a missing (not supplied) function
# argument. Should *not* appear as a final formula result. Value is None.</td>
# </tr>
# <tr>
# <td>oNUM</td>
# <td align="center">2</td>
# <td>A float. Note that there is no way of distinguishing dates.</td>
# </tr>
# <tr>
# <td>oREF</td>
# <td align="center">-1</td>
# <td>The value is either None or a non-empty list of
# absolute Ref3D instances.<br>
# </td>
# </tr>
# <tr>
# <td>oREL</td>
# <td align="center">-2</td>
# <td>The value is None or a non-empty list of
# fully or partially relative Ref3D instances.
# </td>
# </tr>
# <tr>
# <td>oSTRG</td>
# <td align="center">1</td>
# <td>A Unicode string.</td>
# </tr>
# <tr>
# <td>oUNK</td>
# <td align="center">0</td>
# <td>The kind is unknown or ambiguous. The value is None</td>
# </tr>
# </table>
#<p></p>
class Operand(object):
##
# None means that the actual value of the operand is a variable
# (depends on cell data), not a constant.
value = None
##
# oUNK means that the kind of operand is not known unambiguously.
kind = oUNK
##
# The reconstituted text of the original formula. Function names will be
# in English irrespective of the original language, which doesn't seem
# to be recorded anywhere. The separator is ",", not ";" or whatever else
# might be more appropriate for the end-user's locale; patches welcome.
text = '?'
def __init__(self, akind=None, avalue=None, arank=0, atext='?'):
if akind is not None:
self.kind = akind
if avalue is not None:
self.value = avalue
self.rank = arank
# rank is an internal gizmo (operator precedence);
# it's used in reconstructing formula text.
self.text = atext
def __repr__(self):
kind_text = okind_dict.get(self.kind, "?Unknown kind?")
return "Operand(kind=%s, value=%r, text=%r)" \
% (kind_text, self.value, self.text)
##
# <p>Represents an absolute or relative 3-dimensional reference to a box
# of one or more cells.<br />
# -- New in version 0.6.0
# </p>
#
# <p>The <i>coords</i> attribute is a tuple of the form:<br />
# (shtxlo, shtxhi, rowxlo, rowxhi, colxlo, colxhi)<br />
# where 0 <= thingxlo <= thingx < thingxhi.<br />
# Note that it is quite possible to have thingx > nthings; for example
# Print_Titles could have colxhi == 256 and/or rowxhi == 65536
# irrespective of how many columns/rows are actually used in the worksheet.
# The caller will need to decide how to handle this situation.
# Keyword: IndexError :-)
# </p>
#
# <p>The components of the coords attribute are also available as individual
# attributes: shtxlo, shtxhi, rowxlo, rowxhi, colxlo, and colxhi.</p>
#
# <p>The <i>relflags</i> attribute is a 6-tuple of flags which indicate whether
# the corresponding (sheet|row|col)(lo|hi) is relative (1) or absolute (0).<br>
# Note that there is necessarily no information available as to what cell(s)
# the reference could possibly be relative to. The caller must decide what if
# any use to make of oREL operands. Note also that a partially relative
# reference may well be a typo.
# For example, define name A1Z10 as $a$1:$z10 (missing $ after z)
# while the cursor is on cell Sheet3!A27.<br>
# The resulting Ref3D instance will have coords = (2, 3, 0, -16, 0, 26)
# and relflags = (0, 0, 0, 1, 0, 0).<br>
# So far, only one possibility of a sheet-relative component in
# a reference has been noticed: a 2D reference located in the "current sheet".
# <br /> This will appear as coords = (0, 1, ...) and relflags = (1, 1, ...).
class Ref3D(tuple):
def __init__(self, atuple):
self.coords = atuple[0:6]
self.relflags = atuple[6:12]
if not self.relflags:
self.relflags = (0, 0, 0, 0, 0, 0)
(self.shtxlo, self.shtxhi,
self.rowxlo, self.rowxhi,
self.colxlo, self.colxhi) = self.coords
def __repr__(self):
if not self.relflags or self.relflags == (0, 0, 0, 0, 0, 0):
return "Ref3D(coords=%r)" % (self.coords, )
else:
return "Ref3D(coords=%r, relflags=%r)" \
% (self.coords, self.relflags)
tAdd = 0x03
tSub = 0x04
tMul = 0x05
tDiv = 0x06
tPower = 0x07
tConcat = 0x08
tLT, tLE, tEQ, tGE, tGT, tNE = range(0x09, 0x0F)
import operator as opr
def nop(x):
return x
def _opr_pow(x, y): return x ** y
def _opr_lt(x, y): return x < y
def _opr_le(x, y): return x <= y
def _opr_eq(x, y): return x == y
def _opr_ge(x, y): return x >= y
def _opr_gt(x, y): return x > y
def _opr_ne(x, y): return x != y
def num2strg(num):
"""Attempt to emulate Excel's default conversion
from number to string.
"""
s = str(num)
if s.endswith(".0"):
s = s[:-2]
return s
_arith_argdict = {oNUM: nop, oSTRG: float}
_cmp_argdict = {oNUM: nop, oSTRG: nop}
# Seems no conversions done on relops; in Excel, "1" > 9 produces TRUE.
_strg_argdict = {oNUM:num2strg, oSTRG:nop}
binop_rules = {
tAdd: (_arith_argdict, oNUM, opr.add, 30, '+'),
tSub: (_arith_argdict, oNUM, opr.sub, 30, '-'),
tMul: (_arith_argdict, oNUM, opr.mul, 40, '*'),
tDiv: (_arith_argdict, oNUM, opr.truediv, 40, '/'),
tPower: (_arith_argdict, oNUM, _opr_pow, 50, '^',),
tConcat:(_strg_argdict, oSTRG, opr.add, 20, '&'),
tLT: (_cmp_argdict, oBOOL, _opr_lt, 10, '<'),
tLE: (_cmp_argdict, oBOOL, _opr_le, 10, '<='),
tEQ: (_cmp_argdict, oBOOL, _opr_eq, 10, '='),
tGE: (_cmp_argdict, oBOOL, _opr_ge, 10, '>='),
tGT: (_cmp_argdict, oBOOL, _opr_gt, 10, '>'),
tNE: (_cmp_argdict, oBOOL, _opr_ne, 10, '<>'),
}
unop_rules = {
0x13: (lambda x: -x, 70, '-', ''), # unary minus
0x12: (lambda x: x, 70, '+', ''), # unary plus
0x14: (lambda x: x / 100.0, 60, '', '%'),# percent
}
LEAF_RANK = 90
FUNC_RANK = 90
STACK_ALARM_LEVEL = 5
STACK_PANIC_LEVEL = 10
def evaluate_name_formula(bk, nobj, namex, blah=0, level=0):
if level > STACK_ALARM_LEVEL:
blah = 1
data = nobj.raw_formula
fmlalen = nobj.basic_formula_len
bv = bk.biff_version
reldelta = 1 # All defined name formulas use "Method B" [OOo docs]
if blah:
print("::: evaluate_name_formula %r %r %d %d %r level=%d" \
% (namex, nobj.name, fmlalen, bv, data, level), file=bk.logfile)
hex_char_dump(data, 0, fmlalen, fout=bk.logfile)
if level > STACK_PANIC_LEVEL:
raise XLRDError("Excessive indirect references in NAME formula")
sztab = szdict[bv]
pos = 0
stack = []
any_rel = 0
any_err = 0
any_external = 0
unk_opnd = Operand(oUNK, None)
error_opnd = Operand(oERR, None)
spush = stack.append
def do_binop(opcd, stk):
assert len(stk) >= 2
bop = stk.pop()
aop = stk.pop()
argdict, result_kind, func, rank, sym = binop_rules[opcd]
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
resop = Operand(result_kind, None, rank, otext)
try:
bconv = argdict[bop.kind]
aconv = argdict[aop.kind]
except KeyError:
stk.append(resop)
return
if bop.value is None or aop.value is None:
stk.append(resop)
return
bval = bconv(bop.value)
aval = aconv(aop.value)
result = func(aval, bval)
if result_kind == oBOOL:
result = 1 if result else 0
resop.value = result
stk.append(resop)
def do_unaryop(opcode, result_kind, stk):
assert len(stk) >= 1
aop = stk.pop()
val = aop.value
func, rank, sym1, sym2 = unop_rules[opcode]
otext = ''.join([
sym1,
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym2,
])
if val is not None:
val = func(val)
stk.append(Operand(result_kind, val, rank, otext))
def not_in_name_formula(op_arg, oname_arg):
msg = "ERROR *** Token 0x%02x (%s) found in NAME formula" \
% (op_arg, oname_arg)
raise FormulaError(msg)
if fmlalen == 0:
stack = [unk_opnd]
while 0 <= pos < fmlalen:
op = BYTES_ORD(data[pos])
opcode = op & 0x1f
optype = (op & 0x60) >> 5
if optype:
opx = opcode + 32
else:
opx = opcode
oname = onames[opx] # + [" RVA"][optype]
sz = sztab[opx]
if blah:
print("Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" \
% (pos, op, oname, sz, opcode, optype), file=bk.logfile)
print("Stack =", stack, file=bk.logfile)
if sz == -2:
msg = 'ERROR *** Unexpected token 0x%02x ("%s"); biff_version=%d' \
% (op, oname, bv)
raise FormulaError(msg)
if not optype:
if 0x00 <= opcode <= 0x02: # unk_opnd, tExp, tTbl
not_in_name_formula(op, oname)
elif 0x03 <= opcode <= 0x0E:
# Add, Sub, Mul, Div, Power
# tConcat
# tLT, ..., tNE
do_binop(opcode, stack)
elif opcode == 0x0F: # tIsect
if blah: print("tIsect pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ' '
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF)
res.text = otext
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind == oUNK or aop.kind == oUNK:
# This can happen with undefined
# (go search in the current sheet) labels.
# For example =Bob Sales
# Each label gets a NAME record with an empty formula (!)
# Evaluation of the tName token classifies it as oUNK
# res.kind = oREF
pass
elif bop.kind == oREF == aop.kind:
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tIsectFuncs, aop.value[0], bop.value[0])
res.value = [Ref3D(coords)]
elif bop.kind == oREL == aop.kind:
res.kind = oREL
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tIsectFuncs, aop.value[0], bop.value[0])
relfa = aop.value[0].relflags
relfb = bop.value[0].relflags
if relfa == relfb:
res.value = [Ref3D(coords + relfa)]
else:
pass
spush(res)
if blah: print("tIsect post", stack, file=bk.logfile)
elif opcode == 0x10: # tList
if blah: print("tList pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ','
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind in (oREF, oREL) and aop.kind in (oREF, oREL):
res.kind = oREF
if aop.kind == oREL or bop.kind == oREL:
res.kind = oREL
if aop.value is not None and bop.value is not None:
assert len(aop.value) >= 1
assert len(bop.value) == 1
res.value = aop.value + bop.value
else:
pass
spush(res)
if blah: print("tList post", stack, file=bk.logfile)
elif opcode == 0x11: # tRange
if blah: print("tRange pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ':'
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res = oERR
elif bop.kind == oREF == aop.kind:
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tRangeFuncs, aop.value[0], bop.value[0])
res.value = [Ref3D(coords)]
elif bop.kind == oREL == aop.kind:
res.kind = oREL
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tRangeFuncs, aop.value[0], bop.value[0])
relfa = aop.value[0].relflags
relfb = bop.value[0].relflags
if relfa == relfb:
res.value = [Ref3D(coords + relfa)]
else:
pass
spush(res)
if blah: print("tRange post", stack, file=bk.logfile)
elif 0x12 <= opcode <= 0x14: # tUplus, tUminus, tPercent
do_unaryop(opcode, oNUM, stack)
elif opcode == 0x15: # tParen
# source cosmetics
pass
elif opcode == 0x16: # tMissArg
spush(Operand(oMSNG, None, LEAF_RANK, ''))
elif opcode == 0x17: # tStr
if bv <= 70:
strg, newpos = unpack_string_update_pos(
data, pos+1, bk.encoding, lenlen=1)
else:
strg, newpos = unpack_unicode_update_pos(
data, pos+1, lenlen=1)
sz = newpos - pos
if blah: print(" sz=%d strg=%r" % (sz, strg), file=bk.logfile)
text = '"' + strg.replace('"', '""') + '"'
spush(Operand(oSTRG, strg, LEAF_RANK, text))
elif opcode == 0x18: # tExtended
# new with BIFF 8
assert bv >= 80
# not in OOo docs
raise FormulaError("tExtended token not implemented")
elif opcode == 0x19: # tAttr
subop, nc = unpack("<BH", data[pos+1:pos+4])
subname = tAttrNames.get(subop, "??Unknown??")
if subop == 0x04: # Choose
sz = nc * 2 + 6
elif subop == 0x10: # Sum (single arg)
sz = 4
if blah: print("tAttrSum", stack, file=bk.logfile)
assert len(stack) >= 1
aop = stack[-1]
otext = 'SUM(%s)' % aop.text
stack[-1] = Operand(oNUM, None, FUNC_RANK, otext)
else:
sz = 4
if blah:
print(" subop=%02xh subname=t%s sz=%d nc=%02xh" \
% (subop, subname, sz, nc), file=bk.logfile)
elif 0x1A <= opcode <= 0x1B: # tSheet, tEndSheet
assert bv < 50
raise FormulaError("tSheet & tEndsheet tokens not implemented")
elif 0x1C <= opcode <= 0x1F: # tErr, tBool, tInt, tNum
inx = opcode - 0x1C
nb = [1, 1, 2, 8][inx]
kind = [oERR, oBOOL, oNUM, oNUM][inx]
value, = unpack("<" + "BBHd"[inx], data[pos+1:pos+1+nb])
if inx == 2: # tInt
value = float(value)
text = str(value)
elif inx == 3: # tNum
text = str(value)
elif inx == 1: # tBool
text = ('FALSE', 'TRUE')[value]
else:
text = '"' +error_text_from_code[value] + '"'
spush(Operand(kind, value, LEAF_RANK, text))
else:
raise FormulaError("Unhandled opcode: 0x%02x" % opcode)
if sz <= 0:
raise FormulaError("Size not set for opcode 0x%02x" % opcode)
pos += sz
continue
if opcode == 0x00: # tArray
spush(unk_opnd)
elif opcode == 0x01: # tFunc
nb = 1 + int(bv >= 40)
funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])[0]
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print("*** formula/tFunc unknown FuncID:%d" \
% funcx, file=bk.logfile)
spush(unk_opnd)
else:
func_name, nargs = func_attrs[:2]
if blah:
print(" FuncID=%d name=%s nargs=%d" \
% (funcx, func_name, nargs), file=bk.logfile)
assert len(stack) >= nargs
if nargs:
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
del stack[-nargs:]
else:
otext = func_name + "()"
res = Operand(oUNK, None, FUNC_RANK, otext)
spush(res)
elif opcode == 0x02: #tFuncVar
nb = 1 + int(bv >= 40)
nargs, funcx = unpack("<B" + " BH"[nb], data[pos+1:pos+2+nb])
prompt, nargs = divmod(nargs, 128)
macro, funcx = divmod(funcx, 32768)
if blah:
print(" FuncID=%d nargs=%d macro=%d prompt=%d" \
% (funcx, nargs, macro, prompt), file=bk.logfile)
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print("*** formula/tFuncVar unknown FuncID:%d" \
% funcx, file=bk.logfile)
spush(unk_opnd)
else:
func_name, minargs, maxargs = func_attrs[:3]
if blah:
print(" name: %r, min~max args: %d~%d" \
% (func_name, minargs, maxargs), file=bk.logfile)
assert minargs <= nargs <= maxargs
assert len(stack) >= nargs
assert len(stack) >= nargs
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
res = Operand(oUNK, None, FUNC_RANK, otext)
if funcx == 1: # IF
testarg = stack[-nargs]
if testarg.kind not in (oNUM, oBOOL):
if blah and testarg.kind != oUNK:
print("IF testarg kind?", file=bk.logfile)
elif testarg.value not in (0, 1):
if blah and testarg.value is not None:
print("IF testarg value?", file=bk.logfile)
else:
if nargs == 2 and not testarg.value:
# IF(FALSE, tv) => FALSE
res.kind, res.value = oBOOL, 0
else:
respos = -nargs + 2 - int(testarg.value)
chosen = stack[respos]
if chosen.kind == oMSNG:
res.kind, res.value = oNUM, 0
else:
res.kind, res.value = chosen.kind, chosen.value
if blah:
print("$$$$$$ IF => constant", file=bk.logfile)
elif funcx == 100: # CHOOSE
testarg = stack[-nargs]
if testarg.kind == oNUM:
if 1 <= testarg.value < nargs:
chosen = stack[-nargs + int(testarg.value)]
if chosen.kind == oMSNG:
res.kind, res.value = oNUM, 0
else:
res.kind, res.value = chosen.kind, chosen.value
del stack[-nargs:]
spush(res)
elif opcode == 0x03: #tName
tgtnamex = unpack("<H", data[pos+1:pos+3])[0] - 1
# Only change with BIFF version is number of trailing UNUSED bytes!
if blah: print(" tgtnamex=%d" % tgtnamex, file=bk.logfile)
tgtobj = bk.name_obj_list[tgtnamex]
if not tgtobj.evaluated:
### recursive ###
evaluate_name_formula(bk, tgtobj, tgtnamex, blah, level+1)
if tgtobj.macro or tgtobj.binary \
or tgtobj.any_err:
if blah:
tgtobj.dump(
bk.logfile,
header="!!! tgtobj has problems!!!",
footer="----------- --------",
)
res = Operand(oUNK, None)
any_err = any_err or tgtobj.macro or tgtobj.binary or tgtobj.any_err
any_rel = any_rel or tgtobj.any_rel
else:
assert len(tgtobj.stack) == 1
res = copy.deepcopy(tgtobj.stack[0])
res.rank = LEAF_RANK
if tgtobj.scope == -1:
res.text = tgtobj.name
else:
res.text = "%s!%s" \
% (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print(" tName: setting text to", repr(res.text), file=bk.logfile)
spush(res)
elif opcode == 0x04: # tRef
# not_in_name_formula(op, oname)
res = get_cell_addr(data, pos+1, bv, reldelta)
if blah: print(" ", res, file=bk.logfile)
rowx, colx, row_rel, col_rel = res
shx1 = shx2 = 0 ####### N.B. relative to the CURRENT SHEET
any_rel = 1
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if optype == 1:
relflags = (1, 1, row_rel, row_rel, col_rel, col_rel)
res = Operand(oREL, [Ref3D(coords + relflags)])
spush(res)
elif opcode == 0x05: # tArea
# not_in_name_formula(op, oname)
res1, res2 = get_cell_range_addr(data, pos+1, bv, reldelta)
if blah: print(" ", res1, res2, file=bk.logfile)
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
shx1 = shx2 = 0 ####### N.B. relative to the CURRENT SHEET
any_rel = 1
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if optype == 1:
relflags = (1, 1, row_rel1, row_rel2, col_rel1, col_rel2)
res = Operand(oREL, [Ref3D(coords + relflags)])
spush(res)
elif opcode == 0x06: # tMemArea
not_in_name_formula(op, oname)
elif opcode == 0x09: # tMemFunc
nb = unpack("<H", data[pos+1:pos+3])[0]
if blah: print(" %d bytes of cell ref formula" % nb, file=bk.logfile)
# no effect on stack
elif opcode == 0x0C: #tRefN
not_in_name_formula(op, oname)
# res = get_cell_addr(data, pos+1, bv, reldelta=1)
# # note *ALL* tRefN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
# spush(res)
elif opcode == 0x0D: #tAreaN
not_in_name_formula(op, oname)
# res = get_cell_range_addr(data, pos+1, bv, reldelta=1)
# # note *ALL* tAreaN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
elif opcode == 0x1A: # tRef3d
if bv >= 80:
res = get_cell_addr(data, pos+3, bv, reldelta)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res = get_cell_addr(data, pos+15, bv, reldelta)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print("tRef3d", raw_extshtx, raw_shx1, raw_shx2, file=bk.logfile)
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
any_err |= shx1 < -1
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel, row_rel, col_rel, col_rel)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d, r1c1=1)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
if optype == 1:
res.value = [ref3d]
spush(res)
elif opcode == 0x1B: # tArea3d
if bv >= 80:
res1, res2 = get_cell_range_addr(data, pos+3, bv, reldelta)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res1, res2 = get_cell_range_addr(data, pos+15, bv, reldelta)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print("tArea3d", raw_extshtx, raw_shx1, raw_shx2, file=bk.logfile)
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
any_err |= shx1 < -1
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
is_rel = row_rel1 or col_rel1 or row_rel2 or col_rel2
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel1, row_rel2, col_rel1, col_rel2)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d, r1c1=1)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
if optype == 1:
res.value = [ref3d]
spush(res)
elif opcode == 0x19: # tNameX
dodgy = 0
res = Operand(oUNK, None)
if bv >= 80:
refx, tgtnamex = unpack("<HH", data[pos+1:pos+5])
tgtnamex -= 1
origrefx = refx
else:
refx, tgtnamex = unpack("<hxxxxxxxxH", data[pos+1:pos+13])
tgtnamex -= 1
origrefx = refx
if refx > 0:
refx -= 1
elif refx < 0:
refx = -refx - 1
else:
dodgy = 1
if blah:
print(" origrefx=%d refx=%d tgtnamex=%d dodgy=%d" \
% (origrefx, refx, tgtnamex, dodgy), file=bk.logfile)
if tgtnamex == namex:
if blah: print("!!!! Self-referential !!!!", file=bk.logfile)
dodgy = any_err = 1
if not dodgy:
if bv >= 80:
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
elif origrefx > 0:
shx1, shx2 = (-4, -4) # external ref
else:
exty = bk._externsheet_type_b57[refx]
if exty == 4: # non-specific sheet in own doc't
shx1, shx2 = (-1, -1) # internal, any sheet
else:
shx1, shx2 = (-666, -666)
if dodgy or shx1 < -1:
otext = "<<Name #%d in external(?) file #%d>>" \
% (tgtnamex, origrefx)
res = Operand(oUNK, None, LEAF_RANK, otext)
else:
tgtobj = bk.name_obj_list[tgtnamex]
if not tgtobj.evaluated:
### recursive ###
evaluate_name_formula(bk, tgtobj, tgtnamex, blah, level+1)
if tgtobj.macro or tgtobj.binary \
or tgtobj.any_err:
if blah:
tgtobj.dump(
bk.logfile,
header="!!! bad tgtobj !!!",
footer="------------------",
)
res = Operand(oUNK, None)
any_err = any_err or tgtobj.macro or tgtobj.binary or tgtobj.any_err
any_rel = any_rel or tgtobj.any_rel
else:
assert len(tgtobj.stack) == 1
res = copy.deepcopy(tgtobj.stack[0])
res.rank = LEAF_RANK
if tgtobj.scope == -1:
res.text = tgtobj.name
else:
res.text = "%s!%s" \
% (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print(" tNameX: setting text to", repr(res.text), file=bk.logfile)
spush(res)
elif opcode in error_opcodes:
any_err = 1
spush(error_opnd)
else:
if blah:
print("FORMULA: /// Not handled yet: t" + oname, file=bk.logfile)
any_err = 1
if sz <= 0:
raise FormulaError("Fatal: token size is not positive")
pos += sz
any_rel = not not any_rel
if blah:
fprintf(bk.logfile, "End of formula. level=%d any_rel=%d any_err=%d stack=%r\n",
level, not not any_rel, any_err, stack)
if len(stack) >= 2:
print("*** Stack has unprocessed args", file=bk.logfile)
print(file=bk.logfile)
nobj.stack = stack
if len(stack) != 1:
nobj.result = None
else:
nobj.result = stack[0]
nobj.any_rel = any_rel
nobj.any_err = any_err
nobj.any_external = any_external
nobj.evaluated = 1
#### under construction #############################################################################
def decompile_formula(bk, fmla, fmlalen,
fmlatype=None, browx=None, bcolx=None,
blah=0, level=0, r1c1=0):
if level > STACK_ALARM_LEVEL:
blah = 1
reldelta = fmlatype in (FMLA_TYPE_SHARED, FMLA_TYPE_NAME, FMLA_TYPE_COND_FMT, FMLA_TYPE_DATA_VAL)
data = fmla
bv = bk.biff_version
if blah:
print("::: decompile_formula len=%d fmlatype=%r browx=%r bcolx=%r reldelta=%d %r level=%d" \
% (fmlalen, fmlatype, browx, bcolx, reldelta, data, level), file=bk.logfile)
hex_char_dump(data, 0, fmlalen, fout=bk.logfile)
if level > STACK_PANIC_LEVEL:
raise XLRDError("Excessive indirect references in formula")
sztab = szdict[bv]
pos = 0
stack = []
any_rel = 0
any_err = 0
any_external = 0
unk_opnd = Operand(oUNK, None)
error_opnd = Operand(oERR, None)
spush = stack.append
def do_binop(opcd, stk):
assert len(stk) >= 2
bop = stk.pop()
aop = stk.pop()
argdict, result_kind, func, rank, sym = binop_rules[opcd]
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
resop = Operand(result_kind, None, rank, otext)
stk.append(resop)
def do_unaryop(opcode, result_kind, stk):
assert len(stk) >= 1
aop = stk.pop()
func, rank, sym1, sym2 = unop_rules[opcode]
otext = ''.join([
sym1,
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym2,
])
stk.append(Operand(result_kind, None, rank, otext))
def unexpected_opcode(op_arg, oname_arg):
msg = "ERROR *** Unexpected token 0x%02x (%s) found in formula type %s" \
% (op_arg, oname_arg, FMLA_TYPEDESCR_MAP[fmlatype])
print(msg, file=bk.logfile)
# raise FormulaError(msg)
if fmlalen == 0:
stack = [unk_opnd]
while 0 <= pos < fmlalen:
op = BYTES_ORD(data[pos])
opcode = op & 0x1f
optype = (op & 0x60) >> 5
if optype:
opx = opcode + 32
else:
opx = opcode
oname = onames[opx] # + [" RVA"][optype]
sz = sztab[opx]
if blah:
print("Pos:%d Op:0x%02x opname:t%s Sz:%d opcode:%02xh optype:%02xh" \
% (pos, op, oname, sz, opcode, optype), file=bk.logfile)
print("Stack =", stack, file=bk.logfile)
if sz == -2:
msg = 'ERROR *** Unexpected token 0x%02x ("%s"); biff_version=%d' \
% (op, oname, bv)
raise FormulaError(msg)
if _TOKEN_NOT_ALLOWED(opx, 0) & fmlatype:
unexpected_opcode(op, oname)
if not optype:
if opcode <= 0x01: # tExp
if bv >= 30:
fmt = '<x2H'
else:
fmt = '<xHB'
assert pos == 0 and fmlalen == sz and not stack
rowx, colx = unpack(fmt, data)
text = "SHARED FMLA at rowx=%d colx=%d" % (rowx, colx)
spush(Operand(oUNK, None, LEAF_RANK, text))
if not fmlatype & (FMLA_TYPE_CELL | FMLA_TYPE_ARRAY):
unexpected_opcode(op, oname)
elif 0x03 <= opcode <= 0x0E:
# Add, Sub, Mul, Div, Power
# tConcat
# tLT, ..., tNE
do_binop(opcode, stack)
elif opcode == 0x0F: # tIsect
if blah: print("tIsect pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ' '
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF)
res.text = otext
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind == oUNK or aop.kind == oUNK:
# This can happen with undefined
# (go search in the current sheet) labels.
# For example =Bob Sales
# Each label gets a NAME record with an empty formula (!)
# Evaluation of the tName token classifies it as oUNK
# res.kind = oREF
pass
elif bop.kind == oREF == aop.kind:
pass
elif bop.kind == oREL == aop.kind:
res.kind = oREL
else:
pass
spush(res)
if blah: print("tIsect post", stack, file=bk.logfile)
elif opcode == 0x10: # tList
if blah: print("tList pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ','
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind in (oREF, oREL) and aop.kind in (oREF, oREL):
res.kind = oREF
if aop.kind == oREL or bop.kind == oREL:
res.kind = oREL
else:
pass
spush(res)
if blah: print("tList post", stack, file=bk.logfile)
elif opcode == 0x11: # tRange
if blah: print("tRange pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ':'
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res = oERR
elif bop.kind == oREF == aop.kind:
pass
else:
pass
spush(res)
if blah: print("tRange post", stack, file=bk.logfile)
elif 0x12 <= opcode <= 0x14: # tUplus, tUminus, tPercent
do_unaryop(opcode, oNUM, stack)
elif opcode == 0x15: # tParen
# source cosmetics
pass
elif opcode == 0x16: # tMissArg
spush(Operand(oMSNG, None, LEAF_RANK, ''))
elif opcode == 0x17: # tStr
if bv <= 70:
strg, newpos = unpack_string_update_pos(
data, pos+1, bk.encoding, lenlen=1)
else:
strg, newpos = unpack_unicode_update_pos(
data, pos+1, lenlen=1)
sz = newpos - pos
if blah: print(" sz=%d strg=%r" % (sz, strg), file=bk.logfile)
text = '"' + strg.replace('"', '""') + '"'
spush(Operand(oSTRG, None, LEAF_RANK, text))
elif opcode == 0x18: # tExtended
# new with BIFF 8
assert bv >= 80
# not in OOo docs, don't even know how to determine its length
raise FormulaError("tExtended token not implemented")
elif opcode == 0x19: # tAttr
subop, nc = unpack("<BH", data[pos+1:pos+4])
subname = tAttrNames.get(subop, "??Unknown??")
if subop == 0x04: # Choose
sz = nc * 2 + 6
elif subop == 0x10: # Sum (single arg)
sz = 4
if blah: print("tAttrSum", stack, file=bk.logfile)
assert len(stack) >= 1
aop = stack[-1]
otext = 'SUM(%s)' % aop.text
stack[-1] = Operand(oNUM, None, FUNC_RANK, otext)
else:
sz = 4
if blah:
print(" subop=%02xh subname=t%s sz=%d nc=%02xh" \
% (subop, subname, sz, nc), file=bk.logfile)
elif 0x1A <= opcode <= 0x1B: # tSheet, tEndSheet
assert bv < 50
raise FormulaError("tSheet & tEndsheet tokens not implemented")
elif 0x1C <= opcode <= 0x1F: # tErr, tBool, tInt, tNum
inx = opcode - 0x1C
nb = [1, 1, 2, 8][inx]
kind = [oERR, oBOOL, oNUM, oNUM][inx]
value, = unpack("<" + "BBHd"[inx], data[pos+1:pos+1+nb])
if inx == 2: # tInt
value = float(value)
text = str(value)
elif inx == 3: # tNum
text = str(value)
elif inx == 1: # tBool
text = ('FALSE', 'TRUE')[value]
else:
text = '"' +error_text_from_code[value] + '"'
spush(Operand(kind, None, LEAF_RANK, text))
else:
raise FormulaError("Unhandled opcode: 0x%02x" % opcode)
if sz <= 0:
raise FormulaError("Size not set for opcode 0x%02x" % opcode)
pos += sz
continue
if opcode == 0x00: # tArray
spush(unk_opnd)
elif opcode == 0x01: # tFunc
nb = 1 + int(bv >= 40)
funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])[0]
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print("*** formula/tFunc unknown FuncID:%d" % funcx, file=bk.logfile)
spush(unk_opnd)
else:
func_name, nargs = func_attrs[:2]
if blah:
print(" FuncID=%d name=%s nargs=%d" \
% (funcx, func_name, nargs), file=bk.logfile)
assert len(stack) >= nargs
if nargs:
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
del stack[-nargs:]
else:
otext = func_name + "()"
res = Operand(oUNK, None, FUNC_RANK, otext)
spush(res)
elif opcode == 0x02: #tFuncVar
nb = 1 + int(bv >= 40)
nargs, funcx = unpack("<B" + " BH"[nb], data[pos+1:pos+2+nb])
prompt, nargs = divmod(nargs, 128)
macro, funcx = divmod(funcx, 32768)
if blah:
print(" FuncID=%d nargs=%d macro=%d prompt=%d" \
% (funcx, nargs, macro, prompt), file=bk.logfile)
#### TODO #### if funcx == 255: # call add-in function
if funcx == 255:
func_attrs = ("CALL_ADDIN", 1, 30)
else:
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print("*** formula/tFuncVar unknown FuncID:%d" \
% funcx, file=bk.logfile)
spush(unk_opnd)
else:
func_name, minargs, maxargs = func_attrs[:3]
if blah:
print(" name: %r, min~max args: %d~%d" \
% (func_name, minargs, maxargs), file=bk.logfile)
assert minargs <= nargs <= maxargs
assert len(stack) >= nargs
assert len(stack) >= nargs
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
res = Operand(oUNK, None, FUNC_RANK, otext)
del stack[-nargs:]
spush(res)
elif opcode == 0x03: #tName
tgtnamex = unpack("<H", data[pos+1:pos+3])[0] - 1
# Only change with BIFF version is number of trailing UNUSED bytes!
if blah: print(" tgtnamex=%d" % tgtnamex, file=bk.logfile)
tgtobj = bk.name_obj_list[tgtnamex]
if tgtobj.scope == -1:
otext = tgtobj.name
else:
otext = "%s!%s" % (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print(" tName: setting text to", repr(otext), file=bk.logfile)
res = Operand(oUNK, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x04: # tRef
res = get_cell_addr(data, pos+1, bv, reldelta, browx, bcolx)
if blah: print(" ", res, file=bk.logfile)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
if is_rel:
okind = oREL
else:
okind = oREF
otext = cellnamerel(rowx, colx, row_rel, col_rel, browx, bcolx, r1c1)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x05: # tArea
res1, res2 = get_cell_range_addr(
data, pos+1, bv, reldelta, browx, bcolx)
if blah: print(" ", res1, res2, file=bk.logfile)
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
coords = (rowx1, rowx2+1, colx1, colx2+1)
relflags = (row_rel1, row_rel2, col_rel1, col_rel2)
if sum(relflags): # relative
okind = oREL
else:
okind = oREF
if blah: print(" ", coords, relflags, file=bk.logfile)
otext = rangename2drel(coords, relflags, browx, bcolx, r1c1)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x06: # tMemArea
not_in_name_formula(op, oname)
elif opcode == 0x09: # tMemFunc
nb = unpack("<H", data[pos+1:pos+3])[0]
if blah: print(" %d bytes of cell ref formula" % nb, file=bk.logfile)
# no effect on stack
elif opcode == 0x0C: #tRefN
res = get_cell_addr(data, pos+1, bv, reldelta, browx, bcolx)
# note *ALL* tRefN usage has signed offset for relative addresses
any_rel = 1
if blah: print(" ", res, file=bk.logfile)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
if is_rel:
okind = oREL
else:
okind = oREF
otext = cellnamerel(rowx, colx, row_rel, col_rel, browx, bcolx, r1c1)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x0D: #tAreaN
# res = get_cell_range_addr(data, pos+1, bv, reldelta, browx, bcolx)
# # note *ALL* tAreaN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
res1, res2 = get_cell_range_addr(
data, pos+1, bv, reldelta, browx, bcolx)
if blah: print(" ", res1, res2, file=bk.logfile)
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
coords = (rowx1, rowx2+1, colx1, colx2+1)
relflags = (row_rel1, row_rel2, col_rel1, col_rel2)
if sum(relflags): # relative
okind = oREL
else:
okind = oREF
if blah: print(" ", coords, relflags, file=bk.logfile)
otext = rangename2drel(coords, relflags, browx, bcolx, r1c1)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x1A: # tRef3d
if bv >= 80:
res = get_cell_addr(data, pos+3, bv, reldelta, browx, bcolx)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res = get_cell_addr(data, pos+15, bv, reldelta, browx, bcolx)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print("tRef3d", raw_extshtx, raw_shx1, raw_shx2, file=bk.logfile)
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
any_err |= shx1 < -1
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel, row_rel, col_rel, col_rel)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d, browx, bcolx, r1c1)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
res.value = None
spush(res)
elif opcode == 0x1B: # tArea3d
if bv >= 80:
res1, res2 = get_cell_range_addr(data, pos+3, bv, reldelta)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res1, res2 = get_cell_range_addr(data, pos+15, bv, reldelta)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print("tArea3d", raw_extshtx, raw_shx1, raw_shx2, file=bk.logfile)
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
any_err |= shx1 < -1
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
is_rel = row_rel1 or col_rel1 or row_rel2 or col_rel2
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel1, row_rel2, col_rel1, col_rel2)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d, browx, bcolx, r1c1)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
spush(res)
elif opcode == 0x19: # tNameX
dodgy = 0
res = Operand(oUNK, None)
if bv >= 80:
refx, tgtnamex = unpack("<HH", data[pos+1:pos+5])
tgtnamex -= 1
origrefx = refx
else:
refx, tgtnamex = unpack("<hxxxxxxxxH", data[pos+1:pos+13])
tgtnamex -= 1
origrefx = refx
if refx > 0:
refx -= 1
elif refx < 0:
refx = -refx - 1
else:
dodgy = 1
if blah:
print(" origrefx=%d refx=%d tgtnamex=%d dodgy=%d" \
% (origrefx, refx, tgtnamex, dodgy), file=bk.logfile)
# if tgtnamex == namex:
# if blah: print >> bk.logfile, "!!!! Self-referential !!!!"
# dodgy = any_err = 1
if not dodgy:
if bv >= 80:
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
elif origrefx > 0:
shx1, shx2 = (-4, -4) # external ref
else:
exty = bk._externsheet_type_b57[refx]
if exty == 4: # non-specific sheet in own doc't
shx1, shx2 = (-1, -1) # internal, any sheet
else:
shx1, shx2 = (-666, -666)
okind = oUNK
ovalue = None
if shx1 == -5: # addin func name
okind = oSTRG
ovalue = bk.addin_func_names[tgtnamex]
otext = '"' + ovalue.replace('"', '""') + '"'
elif dodgy or shx1 < -1:
otext = "<<Name #%d in external(?) file #%d>>" \
% (tgtnamex, origrefx)
else:
tgtobj = bk.name_obj_list[tgtnamex]
if tgtobj.scope == -1:
otext = tgtobj.name
else:
otext = "%s!%s" \
% (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print(" tNameX: setting text to", repr(res.text), file=bk.logfile)
res = Operand(okind, ovalue, LEAF_RANK, otext)
spush(res)
elif opcode in error_opcodes:
any_err = 1
spush(error_opnd)
else:
if blah:
print("FORMULA: /// Not handled yet: t" + oname, file=bk.logfile)
any_err = 1
if sz <= 0:
raise FormulaError("Fatal: token size is not positive")
pos += sz
any_rel = not not any_rel
if blah:
print("End of formula. level=%d any_rel=%d any_err=%d stack=%r" % \
(level, not not any_rel, any_err, stack), file=bk.logfile)
if len(stack) >= 2:
print("*** Stack has unprocessed args", file=bk.logfile)
print(file=bk.logfile)
if len(stack) != 1:
result = None
else:
result = stack[0].text
return result
#### under deconstruction ###
def dump_formula(bk, data, fmlalen, bv, reldelta, blah=0, isname=0):
if blah:
print("dump_formula", fmlalen, bv, len(data), file=bk.logfile)
hex_char_dump(data, 0, fmlalen, fout=bk.logfile)
assert bv >= 80 #### this function needs updating ####
sztab = szdict[bv]
pos = 0
stack = []
any_rel = 0
any_err = 0
spush = stack.append
while 0 <= pos < fmlalen:
op = BYTES_ORD(data[pos])
opcode = op & 0x1f
optype = (op & 0x60) >> 5
if optype:
opx = opcode + 32
else:
opx = opcode
oname = onames[opx] # + [" RVA"][optype]
sz = sztab[opx]
if blah:
print("Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" \
% (pos, op, oname, sz, opcode, optype), file=bk.logfile)
if not optype:
if 0x01 <= opcode <= 0x02: # tExp, tTbl
# reference to a shared formula or table record
rowx, colx = unpack("<HH", data[pos+1:pos+5])
if blah: print(" ", (rowx, colx), file=bk.logfile)
elif opcode == 0x10: # tList
if blah: print("tList pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
spush(aop + bop)
if blah: print("tlist post", stack, file=bk.logfile)
elif opcode == 0x11: # tRange
if blah: print("tRange pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
assert len(aop) == 1
assert len(bop) == 1
result = do_box_funcs(tRangeFuncs, aop[0], bop[0])
spush(result)
if blah: print("tRange post", stack, file=bk.logfile)
elif opcode == 0x0F: # tIsect
if blah: print("tIsect pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
assert len(aop) == 1
assert len(bop) == 1
result = do_box_funcs(tIsectFuncs, aop[0], bop[0])
spush(result)
if blah: print("tIsect post", stack, file=bk.logfile)
elif opcode == 0x19: # tAttr
subop, nc = unpack("<BH", data[pos+1:pos+4])
subname = tAttrNames.get(subop, "??Unknown??")
if subop == 0x04: # Choose
sz = nc * 2 + 6
else:
sz = 4
if blah: print(" subop=%02xh subname=t%s sz=%d nc=%02xh" % (subop, subname, sz, nc), file=bk.logfile)
elif opcode == 0x17: # tStr
if bv <= 70:
nc = BYTES_ORD(data[pos+1])
strg = data[pos+2:pos+2+nc] # left in 8-bit encoding
sz = nc + 2
else:
strg, newpos = unpack_unicode_update_pos(data, pos+1, lenlen=1)
sz = newpos - pos
if blah: print(" sz=%d strg=%r" % (sz, strg), file=bk.logfile)
else:
if sz <= 0:
print("**** Dud size; exiting ****", file=bk.logfile)
return
pos += sz
continue
if opcode == 0x00: # tArray
pass
elif opcode == 0x01: # tFunc
nb = 1 + int(bv >= 40)
funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])
if blah: print(" FuncID=%d" % funcx, file=bk.logfile)
elif opcode == 0x02: #tFuncVar
nb = 1 + int(bv >= 40)
nargs, funcx = unpack("<B" + " BH"[nb], data[pos+1:pos+2+nb])
prompt, nargs = divmod(nargs, 128)
macro, funcx = divmod(funcx, 32768)
if blah: print(" FuncID=%d nargs=%d macro=%d prompt=%d" % (funcx, nargs, macro, prompt), file=bk.logfile)
elif opcode == 0x03: #tName
namex = unpack("<H", data[pos+1:pos+3])
# Only change with BIFF version is the number of trailing UNUSED bytes!!!
if blah: print(" namex=%d" % namex, file=bk.logfile)
elif opcode == 0x04: # tRef
res = get_cell_addr(data, pos+1, bv, reldelta)
if blah: print(" ", res, file=bk.logfile)
elif opcode == 0x05: # tArea
res = get_cell_range_addr(data, pos+1, bv, reldelta)
if blah: print(" ", res, file=bk.logfile)
elif opcode == 0x09: # tMemFunc
nb = unpack("<H", data[pos+1:pos+3])[0]
if blah: print(" %d bytes of cell ref formula" % nb, file=bk.logfile)
elif opcode == 0x0C: #tRefN
res = get_cell_addr(data, pos+1, bv, reldelta=1)
# note *ALL* tRefN usage has signed offset for relative addresses
any_rel = 1
if blah: print(" ", res, file=bk.logfile)
elif opcode == 0x0D: #tAreaN
res = get_cell_range_addr(data, pos+1, bv, reldelta=1)
# note *ALL* tAreaN usage has signed offset for relative addresses
any_rel = 1
if blah: print(" ", res, file=bk.logfile)
elif opcode == 0x1A: # tRef3d
refx = unpack("<H", data[pos+1:pos+3])[0]
res = get_cell_addr(data, pos+3, bv, reldelta)
if blah: print(" ", refx, res, file=bk.logfile)
rowx, colx, row_rel, col_rel = res
any_rel = any_rel or row_rel or col_rel
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
any_err |= shx1 < -1
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
if blah: print(" ", coords, file=bk.logfile)
if optype == 1: spush([coords])
elif opcode == 0x1B: # tArea3d
refx = unpack("<H", data[pos+1:pos+3])[0]
res1, res2 = get_cell_range_addr(data, pos+3, bv, reldelta)
if blah: print(" ", refx, res1, res2, file=bk.logfile)
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
any_rel = any_rel or row_rel1 or col_rel1 or row_rel2 or col_rel2
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
any_err |= shx1 < -1
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print(" ", coords, file=bk.logfile)
if optype == 1: spush([coords])
elif opcode == 0x19: # tNameX
refx, namex = unpack("<HH", data[pos+1:pos+5])
if blah: print(" refx=%d namex=%d" % (refx, namex), file=bk.logfile)
elif opcode in error_opcodes:
any_err = 1
else:
if blah: print("FORMULA: /// Not handled yet: t" + oname, file=bk.logfile)
any_err = 1
if sz <= 0:
print("**** Dud size; exiting ****", file=bk.logfile)
return
pos += sz
if blah:
print("End of formula. any_rel=%d any_err=%d stack=%r" % \
(not not any_rel, any_err, stack), file=bk.logfile)
if len(stack) >= 2:
print("*** Stack has unprocessed args", file=bk.logfile)
# === Some helper functions for displaying cell references ===
# I'm aware of only one possibility of a sheet-relative component in
# a reference: a 2D reference located in the "current sheet".
# xlrd stores this internally with bounds of (0, 1, ...) and
# relative flags of (1, 1, ...). These functions display the
# sheet component as empty, just like Excel etc.
def rownamerel(rowx, rowxrel, browx=None, r1c1=0):
# if no base rowx is provided, we have to return r1c1
if browx is None:
r1c1 = True
if not rowxrel:
if r1c1:
return "R%d" % (rowx+1)
return "$%d" % (rowx+1)
if r1c1:
if rowx:
return "R[%d]" % rowx
return "R"
return "%d" % ((browx + rowx) % 65536 + 1)
def colnamerel(colx, colxrel, bcolx=None, r1c1=0):
# if no base colx is provided, we have to return r1c1
if bcolx is None:
r1c1 = True
if not colxrel:
if r1c1:
return "C%d" % (colx + 1)
return "$" + colname(colx)
if r1c1:
if colx:
return "C[%d]" % colx
return "C"
return colname((bcolx + colx) % 256)
##
# Utility function: (5, 7) => 'H6'
def cellname(rowx, colx):
""" (5, 7) => 'H6' """
return "%s%d" % (colname(colx), rowx+1)
##
# Utility function: (5, 7) => '$H$6'
def cellnameabs(rowx, colx, r1c1=0):
""" (5, 7) => '$H$6' or 'R8C6'"""
if r1c1:
return "R%dC%d" % (rowx+1, colx+1)
return "$%s$%d" % (colname(colx), rowx+1)
def cellnamerel(rowx, colx, rowxrel, colxrel, browx=None, bcolx=None, r1c1=0):
if not rowxrel and not colxrel:
return cellnameabs(rowx, colx, r1c1)
if (rowxrel and browx is None) or (colxrel and bcolx is None):
# must flip the whole cell into R1C1 mode
r1c1 = True
c = colnamerel(colx, colxrel, bcolx, r1c1)
r = rownamerel(rowx, rowxrel, browx, r1c1)
if r1c1:
return r + c
return c + r
##
# Utility function: 7 => 'H', 27 => 'AB'
def colname(colx):
""" 7 => 'H', 27 => 'AB' """
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if colx <= 25:
return alphabet[colx]
else:
xdiv26, xmod26 = divmod(colx, 26)
return alphabet[xdiv26 - 1] + alphabet[xmod26]
def rangename2d(rlo, rhi, clo, chi, r1c1=0):
""" (5, 20, 7, 10) => '$H$6:$J$20' """
if r1c1:
return
if rhi == rlo+1 and chi == clo+1:
return cellnameabs(rlo, clo, r1c1)
return "%s:%s" % (cellnameabs(rlo, clo, r1c1), cellnameabs(rhi-1, chi-1, r1c1))
def rangename2drel(rlo_rhi_clo_chi, rlorel_rhirel_clorel_chirel, browx=None, bcolx=None, r1c1=0):
rlo, rhi, clo, chi = rlo_rhi_clo_chi
rlorel, rhirel, clorel, chirel = rlorel_rhirel_clorel_chirel
if (rlorel or rhirel) and browx is None:
r1c1 = True
if (clorel or chirel) and bcolx is None:
r1c1 = True
return "%s:%s" % (
cellnamerel(rlo, clo, rlorel, clorel, browx, bcolx, r1c1),
cellnamerel(rhi-1, chi-1, rhirel, chirel, browx, bcolx, r1c1)
)
##
# Utility function:
# <br /> Ref3D((1, 4, 5, 20, 7, 10)) => 'Sheet2:Sheet3!$H$6:$J$20'
def rangename3d(book, ref3d):
""" Ref3D(1, 4, 5, 20, 7, 10) => 'Sheet2:Sheet3!$H$6:$J$20'
(assuming Excel's default sheetnames) """
coords = ref3d.coords
return "%s!%s" % (
sheetrange(book, *coords[:2]),
rangename2d(*coords[2:6]))
##
# Utility function:
# <br /> Ref3D(coords=(0, 1, -32, -22, -13, 13), relflags=(0, 0, 1, 1, 1, 1))
# R1C1 mode => 'Sheet1!R[-32]C[-13]:R[-23]C[12]'
# A1 mode => depends on base cell (browx, bcolx)
def rangename3drel(book, ref3d, browx=None, bcolx=None, r1c1=0):
coords = ref3d.coords
relflags = ref3d.relflags
shdesc = sheetrangerel(book, coords[:2], relflags[:2])
rngdesc = rangename2drel(coords[2:6], relflags[2:6], browx, bcolx, r1c1)
if not shdesc:
return rngdesc
return "%s!%s" % (shdesc, rngdesc)
def quotedsheetname(shnames, shx):
if shx >= 0:
shname = shnames[shx]
else:
shname = {
-1: "?internal; any sheet?",
-2: "internal; deleted sheet",
-3: "internal; macro sheet",
-4: "<<external>>",
}.get(shx, "?error %d?" % shx)
if "'" in shname:
return "'" + shname.replace("'", "''") + "'"
if " " in shname:
return "'" + shname + "'"
return shname
def sheetrange(book, slo, shi):
shnames = book.sheet_names()
shdesc = quotedsheetname(shnames, slo)
if slo != shi-1:
shdesc += ":" + quotedsheetname(shnames, shi-1)
return shdesc
def sheetrangerel(book, srange, srangerel):
slo, shi = srange
slorel, shirel = srangerel
if not slorel and not shirel:
return sheetrange(book, slo, shi)
assert (slo == 0 == shi-1) and slorel and shirel
return ""
# ==============================================================
| mit |
cliqz/socorro | socorro/unittest/external/filesystem/create_json_dump_store.py | 20 | 5435 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import errno
import json
import os
import time
import socorro.external.filesystem.json_dump_storage as JDS
from socorro.lib.datetimeutil import utc_now, UTC
jsonFileData = {
'0bba61c5-dfc3-43e7-dead-8afd20071025': ('2007-10-25-05-04','webhead02','0b/ba/61/c5','2007/10/25/05/00/webhead02_0'),
'0bba929f-8721-460c-dead-a43c20071025': ('2007-10-25-05-04','webhead02','0b/ba/92/9f','2007/10/25/05/00/webhead02_0'),
'0b9ff107-8672-4aac-dead-b2bd20081225': ('2008-12-25-05-00','webhead01','0b/9f/f1/07','2008/12/25/05/00/webhead01_0'),
'22adfb61-f75b-11dc-dead-001320081225': ('2008-12-25-05-01','webhead01','22/ad/fb/61','2008/12/25/05/00/webhead01_0'),
'b965de73-ae90-a935-dead-03ae20081225': ('2008-12-25-05-04','webhead01','b9/65/de/73','2008/12/25/05/00/webhead01_0'),
'0b781b88-ecbe-4cc4-dead-6bbb20081225': ('2008-12-25-05-05','webhead01','0b/78/1b/88','2008/12/25/05/05/webhead01_0'),
'0b8344d6-9021-4db9-dead-a15320081225': ('2008-12-25-05-06','webhead01','0b/83/44/d6','2008/12/25/05/05/webhead01_0'),
'0b94199b-b90b-4683-dead-411420081226': ('2008-12-26-05-21','webhead01','0b/94/19/9b','2008/12/26/05/20/webhead01_0'),
'0b9eedc3-9a79-4ce2-dead-155920081226': ('2008-12-26-05-24','webhead01','0b/9e/ed/c3','2008/12/26/05/20/webhead01_0'),
'0b9fd6da-27e4-46aa-dead-3deb20081226': ('2008-12-26-05-25','webhead02','0b/9f/d6/da','2008/12/26/05/25/webhead02_0'),
'0ba32a30-2476-4724-dead-de17e3081125': ('2008-11-25-05-00','webhead02','0b/a3/2a', '2008/11/25/05/00/webhead02_0'),
'0bad640f-5825-4d42-dead-21b8e3081125': ('2008-11-25-05-04','webhead02','0b/ad/64', '2008/11/25/05/00/webhead02_0'),
'0bae7049-bbff-49f2-dead-7e9fe2081125': ('2008-11-25-05-05','webhead02','0b/ae', '2008/11/25/05/05/webhead02_0'),
'0baf1b4d-dad3-4d35-dead-b9dce2081125': ('2008-11-25-05-06','webhead02','0b/af', '2008/11/25/05/05/webhead02_0'),
}
jsonMoreData = {
'28adfb61-f75b-11dc-b6be-001320081225': ('2008-12-25-05-01','webhead01','28/ad/fb/61','2008/12/25/05/00'),
'29adfb61-f75b-11dc-b6be-001320081225': ('2008-12-25-05-00','webhead01','29/ad/fb/61','2008/12/25/05/00'),
}
jsonTooMany = {
'23adfb61-f75b-11dc-b6be-001320081225': ('2008-12-25-05-01','webhead01','23/ad/fb/61','2008/12/25/05/00'),
'24adfb61-f75b-11dc-b6be-001320081225': ('2008-12-25-05-01','webhead01','24/ad/fb/61','2008/12/25/05/00'),
'25adfb61-f75b-11dc-b6be-001320081225': ('2008-12-25-05-02','webhead01','25/ad/fb/61','2008/12/25/05/00'),
'26adfb61-f75b-11dc-b6be-001320081225': ('2008-12-25-05-02','webhead01','26/ad/fb/61','2008/12/25/05/00'),
'27adfb61-f75b-11dc-b6be-001320081225': ('2008-12-25-05-03','webhead01','27/ad/fb/61','2008/12/25/05/00'),
}
jsonBadUuid = '66666666-6666-6666-6666-666620081225'
def getSlot(minsperslot,minute):
"""Return the beginning minute of the slot of length minsperslot that contains minute"""
return minsperslot * int(minute/minsperslot)
def minimalJsonFileContents(dataMap = None):
"""
Generate minimal json file contents encoding by default:
a map of 'ProductName', 'Version' and 'BuildID'
or if dataMap is provided the contents of the map. Note that values in that map MUST be strings that can be formatted to contain a distinguishing integer
"""
if not dataMap:
dataMap = {'ProductName':'bogusName-%02d',
'Version':'bogusVersion-%02d',
'BuildID':'bogusBuildID-%02d',
}
cookie = 0
while True:
retMap = {}
for k,v in dataMap.items():
retMap[k] = v%cookie
yield json.dumps(retMap)
cookie += 1
def createTestSet(testData,jsonKwargs,rootDir):
try:
os.makedirs(rootDir)
except OSError,x:
if errno.EEXIST != x.errno: raise
storage = JDS.JsonDumpStorage(rootDir, **jsonKwargs)
jsonIsEmpty = jsonKwargs.get('jsonIsEmpty', False)
jsonIsBogus = jsonKwargs.get('jsonIsBogus', True)
jsonFileGenerator = jsonKwargs.get('jsonFileGenerator',None)
if 'default' == jsonFileGenerator:
jsonFileGenerator = minimalJsonFileContents()
thedt = utc_now()
for uuid,data in testData.items():
if data[0].startswith('+'):
if thedt.second >= 58:
print "\nSleeping for %d seconds" %(61-thedt.second)
time.sleep(61-thedt.second)
thedt = utc_now()
slot = {
'+0': getSlot(storage.minutesPerSlot,thedt.minute),
'+5': getSlot(storage.minutesPerSlot,thedt.minute+5),
'+10':getSlot(storage.minutesPerSlot,thedt.minute+10),
}
d3h = '%d/%02d/%02d/%02d/%s' %(thedt.year,thedt.month,thedt.day,thedt.hour,slot[data[0]])
data[3] = "%s/%s" % (d3h,data[3])
else:
thedt = datetime.datetime(*[int(x) for x in data[0].split('-')], tzinfo=UTC)
fj,fd = storage.newEntry(uuid,webheadHostName=data[1],timestamp = thedt)
try:
if jsonIsEmpty:
pass
elif jsonIsBogus:
fj.write('json test of %s\n' % uuid)
else:
if jsonFileGenerator:
fileContents = jsonFileGenerator.next()
else:
fileContents = '{"what": "legal json, bad contents", "uuid": "%s\"}\n'% uuid
fj.write(fileContents)
finally:
if fj: fj.close()
try:
fd.write('dump test of %s\n' % uuid)
finally:
if fd: fd.close()
| mpl-2.0 |
adrian-ionescu/apache-spark | python/pyspark/mllib/tree.py | 46 | 24110 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import random
from pyspark import SparkContext, RDD, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc, JavaModelWrapper
from pyspark.mllib.linalg import _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['DecisionTreeModel', 'DecisionTree', 'RandomForestModel',
'RandomForest', 'GradientBoostedTreesModel', 'GradientBoostedTrees']
class TreeEnsembleModel(JavaModelWrapper, JavaSaveable):
"""TreeEnsembleModel
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def predict(self, x):
"""
Predict values for a single data point or an RDD of points using
the model trained.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.3.0")
def numTrees(self):
"""
Get number of trees in ensemble.
"""
return self.call("numTrees")
@since("1.3.0")
def totalNumNodes(self):
"""
Get total number of nodes, summed over all trees in the ensemble.
"""
return self.call("totalNumNodes")
def __repr__(self):
""" Summary of model """
return self._java_model.toString()
@since("1.3.0")
def toDebugString(self):
""" Full model """
return self._java_model.toDebugString()
class DecisionTreeModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A decision tree model for classification or regression.
.. versionadded:: 1.1.0
"""
@since("1.1.0")
def predict(self, x):
"""
Predict the label of one or more examples.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
:param x:
Data point (feature vector), or an RDD of data points (feature
vectors).
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.1.0")
def numNodes(self):
"""Get number of nodes in tree, including leaf nodes."""
return self._java_model.numNodes()
@since("1.1.0")
def depth(self):
"""
Get depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
"""
return self._java_model.depth()
def __repr__(self):
""" summary of model. """
return self._java_model.toString()
@since("1.2.0")
def toDebugString(self):
""" full model. """
return self._java_model.toDebugString()
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.DecisionTreeModel"
class DecisionTree(object):
"""
Learning algorithm for a decision tree model for classification or
regression.
.. versionadded:: 1.1.0
"""
@classmethod
def _train(cls, data, type, numClasses, features, impurity="gini", maxDepth=5, maxBins=32,
minInstancesPerNode=1, minInfoGain=0.0):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainDecisionTreeModel", data, type, numClasses, features,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
return DecisionTreeModel(model)
@classmethod
@since("1.1.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo,
impurity="gini", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for classification.
:param data:
Training data: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from numpy import array
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {})
>>> print(model)
DecisionTreeModel classifier of depth 1 with 3 nodes
>>> print(model.toDebugString())
DecisionTreeModel classifier of depth 1 with 3 nodes
If (feature 0 <= 0.5)
Predict: 0.0
Else (feature 0 > 0.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict(array([1.0]))
1.0
>>> model.predict(array([0.0]))
0.0
>>> rdd = sc.parallelize([[1.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@classmethod
@since("1.1.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for regression.
:param data:
Training data: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {})
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {1: 0.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@inherit_doc
class RandomForestModel(TreeEnsembleModel, JavaLoader):
"""
Represents a random forest model.
.. versionadded:: 1.2.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.RandomForestModel"
class RandomForest(object):
"""
Learning algorithm for a random forest model for classification or
regression.
.. versionadded:: 1.2.0
"""
supportedFeatureSubsetStrategies = ("auto", "all", "sqrt", "log2", "onethird")
@classmethod
def _train(cls, data, algo, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
if featureSubsetStrategy not in cls.supportedFeatureSubsetStrategies:
raise ValueError("unsupported featureSubsetStrategy: %s" % featureSubsetStrategy)
if seed is None:
seed = random.randint(0, 1 << 30)
model = callMLlibFunc("trainRandomForestModel", data, algo, numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
return RandomForestModel(model)
@classmethod
@since("1.2.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32,
seed=None):
"""
Train a random forest model for binary or multiclass
classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "sqrt".
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42)
>>> model.numTrees()
3
>>> model.totalNumNodes()
7
>>> print(model)
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
>>> print(model.toDebugString())
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
Tree 0:
Predict: 1.0
Tree 1:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
Tree 2:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[3.0], [1.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
@classmethod
@since("1.2.0")
def trainRegressor(cls, data, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto",
impurity="variance", maxDepth=4, maxBins=32, seed=None):
"""
Train a random forest model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "onethird" for regression.
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42)
>>> model.numTrees()
2
>>> model.totalNumNodes()
4
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.5
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.5]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
@inherit_doc
class GradientBoostedTreesModel(TreeEnsembleModel, JavaLoader):
"""
Represents a gradient-boosted tree model.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.GradientBoostedTreesModel"
class GradientBoostedTrees(object):
"""
Learning algorithm for a gradient boosted trees model for
classification or regression.
.. versionadded:: 1.3.0
"""
@classmethod
def _train(cls, data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainGradientBoostedTreesModel", data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
return GradientBoostedTreesModel(model)
@classmethod
@since("1.3.0")
def trainClassifier(cls, data, categoricalFeaturesInfo,
loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1}.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "logLoss")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>>
>>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
30
>>> print(model) # it already has newline
TreeEnsembleModel classifier with 10 trees
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[2.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
@classmethod
@since("1.3.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
loss="leastSquaresError", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "leastSquaresError")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> data = sc.parallelize(sparse_data)
>>> model = GradientBoostedTrees.trainRegressor(data, {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
12
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
def _test():
import doctest
globs = globals().copy()
from pyspark.sql import SparkSession
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.tree tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
adamend/django-oscar | src/oscar/apps/shipping/methods.py | 40 | 5061 | from decimal import Decimal as D
from django.utils.translation import ugettext_lazy as _
from oscar.core import prices
class Base(object):
"""
Shipping method interface class
This is the superclass to the classes in methods.py, and a de-facto
superclass to the classes in models.py. This allows using all
shipping methods interchangeably (aka polymorphism).
The interface is all properties.
"""
#: Used to store this method in the session. Each shipping method should
# have a unique code.
code = '__default__'
#: The name of the shipping method, shown to the customer during checkout
name = 'Default shipping'
#: A more detailed description of the shipping method shown to the customer
# during checkout. Can contain HTML.
description = ''
#: Whether the charge includes a discount
is_discounted = False
def calculate(self, basket):
"""
Return the shipping charge for the given basket
"""
raise NotImplemented()
def discount(self, basket):
"""
Return the discount on the standard shipping charge
"""
return D('0.00')
class Free(Base):
"""
This shipping method specifies that shipping is free.
"""
code = 'free-shipping'
name = _('Free shipping')
def calculate(self, basket):
# If the charge is free then tax must be free (musn't it?) and so we
# immediately set the tax to zero
return prices.Price(
currency=basket.currency,
excl_tax=D('0.00'), tax=D('0.00'))
class NoShippingRequired(Free):
"""
This is a special shipping method that indicates that no shipping is
actually required (eg for digital goods).
"""
code = 'no-shipping-required'
name = _('No shipping required')
class FixedPrice(Base):
"""
This shipping method indicates that shipping costs a fixed price and
requires no special calculation.
"""
code = 'fixed-price-shipping'
name = _('Fixed price shipping')
# Charges can be either declared by subclassing and overriding the
# class attributes or by passing them to the constructor
charge_excl_tax = None
charge_incl_tax = None
def __init__(self, charge_excl_tax=None, charge_incl_tax=None):
if charge_excl_tax is not None:
self.charge_excl_tax = charge_excl_tax
if charge_incl_tax is not None:
self.charge_incl_tax = charge_incl_tax
def calculate(self, basket):
return prices.Price(
currency=basket.currency,
excl_tax=self.charge_excl_tax,
incl_tax=self.charge_incl_tax)
class OfferDiscount(Base):
"""
Wrapper class that applies a discount to an existing shipping
method's charges.
"""
is_discounted = True
def __init__(self, method, offer):
self.method = method
self.offer = offer
# Forwarded properties
@property
def code(self):
return self.method.code
@property
def name(self):
return self.method.name
@property
def discount_name(self):
return self.offer.name
@property
def description(self):
return self.method.description
def calculate_excl_discount(self, basket):
return self.method.calculate(basket)
class TaxExclusiveOfferDiscount(OfferDiscount):
"""
Wrapper class which extends OfferDiscount to be exclusive of tax.
"""
def calculate(self, basket):
base_charge = self.method.calculate(basket)
discount = self.offer.shipping_discount(base_charge.excl_tax)
excl_tax = base_charge.excl_tax - discount
return prices.Price(
currency=base_charge.currency,
excl_tax=excl_tax)
def discount(self, basket):
base_charge = self.method.calculate(basket)
return self.offer.shipping_discount(base_charge.excl_tax)
class TaxInclusiveOfferDiscount(OfferDiscount):
"""
Wrapper class which extends OfferDiscount to be inclusive of tax.
"""
def calculate(self, basket):
base_charge = self.method.calculate(basket)
discount = self.offer.shipping_discount(base_charge.incl_tax)
incl_tax = base_charge.incl_tax - discount
excl_tax = self.calculate_excl_tax(base_charge, incl_tax)
return prices.Price(
currency=base_charge.currency,
excl_tax=excl_tax, incl_tax=incl_tax)
def calculate_excl_tax(self, base_charge, incl_tax):
"""
Return the charge excluding tax (but including discount).
"""
if incl_tax == D('0.00'):
return D('0.00')
# We assume we can linearly scale down the excl tax price before
# discount.
excl_tax = base_charge.excl_tax * (
incl_tax / base_charge.incl_tax)
return excl_tax.quantize(D('0.01'))
def discount(self, basket):
base_charge = self.method.calculate(basket)
return self.offer.shipping_discount(base_charge.incl_tax)
| bsd-3-clause |
ericdill/PyXRF | pyxrf/model/fileio.py | 1 | 9404 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
__author__ = 'Li Li'
import six
import h5py
import numpy as np
import os
from collections import OrderedDict
from atom.api import Atom, Str, observe, Typed, Dict, List, Int, Enum
import logging
logger = logging.getLogger(__name__)
class FileIOModel(Atom):
"""
This class focuses on file input and output.
Attributes
----------
working_directory : str
file_names : list
list of loaded files
data_file : str
file_path : str
data : array
Experiment data.
load_status : str
Description of file loading status
data_dict : dict
Dict has filename as key and group data as value.
"""
working_directory = Str()
data_file = Str()
file_names = List()
file_path = Str()
data = Typed(np.ndarray)
load_status = Str()
data_dict = Dict()
img_dict = Dict()
img_dict_flat = Dict()
data_sets = OrderedDict()
def __init__(self,
working_directory=None,
data_file=None, *args, **kwargs):
if working_directory is None:
working_directory = os.path.expanduser('~')
with self.suppress_notifications():
self.working_directory = working_directory
self.data_file = data_file
@observe('file_names')
def update_more_data(self, change):
self.data_sets.clear()
self.file_names.sort()
logger.info('Loaded files : {}'.format(self.file_names))
#detID = 'det1'
detID = 'detector'
for fname in self.file_names:
try:
self.file_path = os.path.join(self.working_directory, fname)
f = h5py.File(self.file_path, 'r+')
#data = f['MAPS']
#data = f['xrfmap']
data = f['entry/instrument']
exp_data = np.asarray(data[detID]['data'])
logger.info('File : {} with total counts {}'.format(fname, np.sum(exp_data)))
#exp_data = np.reshape(exp_data, [2, 4, 4096])
# dict has filename as key and group data as value
self.data_dict.update({fname: data})
DS = DataSelection(filename=fname,
raw_data=exp_data)
self.data_sets.update({fname: DS})
# get roi sum data
#roi_result = get_roi_sum(data[detID]['roi_name'].value,
# data[detID]['roi_limits'].value,
# data[detID]['counts'])
#self.img_dict_flat.update({fname.split('.')[0]+'_roi': roi_result})
# read fitting results
if 'xrf_fit' in data[detID]:
fit_result = get_fit_data(data[detID]['xrf_fit_name'].value,
data[detID]['xrf_fit'].value)
self.img_dict_flat.update({fname.split('.')[0]+'_fit': fit_result})
except ValueError:
continue
def get_roi_data(self):
"""
Get roi sum data from data_dict.
"""
# for k, v in six.iteritems(self.data_dict):
# roi_dict = {d[0]: d[1] for d in zip(v['channel_names'], v['XRF_roi'])}
# self.img_dict.update({str(k): {'roi_sum': roi_dict}})
#
# self.img_dict_flat.update({str(k).split('.')[0]+'_roi_sum': roi_dict})
pass
def get_roi_sum(namelist, data_range, data):
data_temp = dict()
for i in range(len(namelist)):
lowv = data_range[i, 0]
highv = data_range[i, 1]
data_sum = np.sum(data[:, :, lowv: highv], axis=2)
data_temp.update({namelist[i].replace(' ', '_'): data_sum})
return data_temp
def get_fit_data(namelist, data):
"""
Read fit data from h5 file. This is to be moved to filestore part.
Parameters
---------
namelist : list
list of str for element lines
data : array
3D array of fitting results
"""
data_temp = dict()
for i in range(len(namelist)):
data_temp.update({namelist[i]: data[i, :, :]})
return data_temp
#self.img_dict_flat.update({fname.split('.')[0]: data_temp})
plot_as = ['Sum', 'Point', 'Roi']
class DataSelection(Atom):
"""
Attributes
----------
filename : str
plot_choice : enum
methods ot plot
point1 : str
starting position
point2 : str
ending position
roi : list
raw_data : array
experiment 3D data
data : array
plot_index : int
"""
filename = Str()
plot_choice = Enum(*plot_as)
point1 = Str('0, 0')
point2 = Str('0, 0')
#roi = List()
raw_data = Typed(np.ndarray)
data = Typed(np.ndarray)
plot_index = Int(0)
fit_name = Str()
fit_data = Typed(np.ndarray)
@observe('plot_index', 'point1', 'point2')
def _update_roi(self, change):
if self.plot_index == 0:
return
elif self.plot_index == 1:
self.data = self.get_sum()
elif self.plot_index == 2:
SC = SpectrumCalculator(self.raw_data, pos1=self.point1)
self.data = SC.get_spectrum()
else:
SC = SpectrumCalculator(self.raw_data,
pos1=self.point1,
pos2=self.point2)
self.data = SC.get_spectrum()
def get_sum(self):
SC = SpectrumCalculator(self.raw_data)
return SC.get_spectrum()
class SpectrumCalculator(object):
"""
Calculate summed spectrum according to starting and ending positions.
Attributes
----------
data : array
3D array of experiment data
pos1 : str
starting position
pos2 : str
ending position
"""
def __init__(self, data,
pos1=None, pos2=None):
self.data = data
if pos1:
self.pos1 = self._parse_pos(pos1)
else:
self.pos1 = None
if pos2:
self.pos2 = self._parse_pos(pos2)
else:
self.pos2 = None
def _parse_pos(self, pos):
if isinstance(pos, list):
return pos
return [int(v) for v in pos.split(',')]
def get_spectrum(self):
if not self.pos1 and not self.pos2:
return np.sum(self.data, axis=(0, 1))
elif self.pos1 and not self.pos2:
print('shape: {}'.format(self.data.shape))
print('pos1: {}'.format(self.pos1))
return self.data[self.pos1[0], self.pos1[1], :]
#return self.data[:, self.pos1[0], self.pos1[1]]
else:
return np.sum(self.data[self.pos1[0]:self.pos2[0], self.pos1[1]:self.pos2[1], :],
axis=(0, 1))
#return np.sum(self.data[:, self.pos1[0]:self.pos2[0], self.pos1[1]:self.pos2[1]],
# axis=(1, 2))
| bsd-3-clause |
jakesyl/pychess | lib/pychess/Utils/lutils/attack.py | 20 | 11103 | from __future__ import absolute_import
from .bitboard import *
from .ldata import *
from pychess.Utils.const import *
#
# Caveat: Many functions in this module has very similar code. If you fix a
# bug, or write a perforance enchace, please update all functions. Apologies
# for the inconvenience
#
def isAttacked (board, cord, color, ischecked=False):
""" To determine if cord is attacked by any pieces from color. """
_moveArray = moveArray
pboards = board.boards[color]
# Knights
if pboards[KNIGHT] & _moveArray[KNIGHT][cord]:
return True
rayto = fromToRay[cord]
blocker = board.blocker
# Bishops & Queens
bitboard = (pboards[BISHOP] | pboards[QUEEN]) & _moveArray[BISHOP][cord]
if bitboard:
others = ~bitboard & blocker
# inlined iterBits()
while bitboard:
bit = bitboard & -bitboard
ray = rayto[lsb[bit]]
# If there is a path and no other piece stand in our way
if ray and not ray & others:
return True
bitboard -= bit
# Rooks & Queens
bitboard = (pboards[ROOK] | pboards[QUEEN]) & _moveArray[ROOK][cord]
if bitboard:
others = ~bitboard & blocker
# inlined iterBits()
while bitboard:
bit = bitboard & -bitboard
ray = rayto[lsb[bit]]
# If there is a path and no other piece stand in our way
if ray and not ray & others:
return True
bitboard -= bit
# Pawns
# Would a pawn of the opposite color, standing at out kings cord, be able
# to attack any of our pawns?
ptype = color == WHITE and BPAWN or PAWN
if pboards[PAWN] & _moveArray[ptype][cord]:
return True
# King
if pboards[KING] & _moveArray[KING][cord]:
if board.variant == ATOMICCHESS and ischecked:
return False
else:
return True
return False
def getAttacks (board, cord, color):
""" To create a bitboard of pieces of color, which attacks cord """
_moveArray = moveArray
pieces = board.boards[color]
# Knights
bits = pieces[KNIGHT] & _moveArray[KNIGHT][cord]
# Kings
bits |= pieces[KING] & _moveArray[KING][cord]
# Pawns
bits |= pieces[PAWN] & _moveArray[color == WHITE and BPAWN or PAWN][cord]
rayto = fromToRay[cord]
blocker = board.blocker
# Bishops and Queens
bitboard = (pieces[BISHOP] | pieces[QUEEN]) & _moveArray[BISHOP][cord]
# inlined iterBits()
while bitboard:
bit = bitboard & -bitboard
c = lsb[bit]
ray = rayto[c]
if ray and not clearBit(ray & blocker, c):
bits |= bitPosArray[c]
bitboard -= bit
# Rooks and queens
bitboard = (pieces[ROOK] | pieces[QUEEN]) & _moveArray[ROOK][cord]
# inlined iterBits()
while bitboard:
bit = bitboard & -bitboard
c = lsb[bit]
ray = rayto[c]
if ray and not clearBit(ray & blocker, c):
bits |= bitPosArray[c]
bitboard -= bit
return bits
def getPieceMoves (board, cord, color, piece):
""" To create a bitboard of specified pieces of color, which can move to
cord """
_moveArray = moveArray
color = board.color
pieces = board.boards[color]
if piece == KNIGHT or piece == KING:
return pieces[piece] & _moveArray[piece][cord]
rayto = fromToRay[cord]
blocker = board.blocker
if sliders[piece]:
cords = pieces[piece] & _moveArray[piece][cord]
bits = 0
for c in iterBits(cords):
ray = rayto[c]
if ray and not clearBit(ray & blocker, c):
bits |= bitPosArray[c]
return bits
if piece == PAWN:
pawns = pieces[PAWN]
bits = pawns & _moveArray[color == WHITE and BPAWN or PAWN][cord]
bits |= pawns & bitPosArray[cord + (color == WHITE and -8 or 8)]
if not blocker & bitPosArray[cord + (color == WHITE and -8 or 8)]:
bits |= pawns & rankBits[color == WHITE and 1 or 6]
return bits
def pinnedOnKing (board, cord, color):
# Determine if the piece on cord is pinned against its colors king.
# In chess, a pin is a situation in which a piece is forced to stay put
# because moving it would expose a more valuable piece behind it to
# capture.
# Caveat: pinnedOnKing should only be called by genCheckEvasions().
kingCord = board.kings[color]
dir = directions[kingCord][cord]
if dir == -1: return False
opcolor = 1 - color
blocker = board.blocker
# Path from piece to king is blocked, so no pin
if clearBit(fromToRay[kingCord][cord], cord) & blocker:
return False
b = (rays[kingCord][dir] ^ fromToRay[kingCord][cord]) & blocker
if not b: return False
cord1 = cord > kingCord and firstBit (b) or lastBit (b)
# If diagonal
if dir <= 3 and bitPosArray[cord1] & \
(board.boards[opcolor][QUEEN] | board.boards[opcolor][BISHOP]):
return True
# Rank / file
if dir >= 4 and bitPosArray[cord1] & \
(board.boards[opcolor][QUEEN] | board.boards[opcolor][ROOK]):
return True
return False
def staticExchangeEvaluate (board, moveOrTcord, color=None):
""" The GnuChess Static Exchange Evaluator (or SEE for short).
First determine the target square. Create a bitboard of all squares
attacking the target square for both sides. Using these 2 bitboards,
we take turn making captures from smallest piece to largest piece.
When a sliding piece makes a capture, we check behind it to see if
another attacker piece has been exposed. If so, add this to the bitboard
as well. When performing the "captures", we stop if one side is ahead
and doesn't need to capture, a form of pseudo-minimaxing. """
#
# Notice: If you use the tcord version, the color is the color attacked, and
# the color to witch the score is relative.
#
swaplist = [0]
if color == None:
move = moveOrTcord
flag = move >> 12
fcord = (move >> 6) & 63
tcord = move & 63
color = board.friends[BLACK] & bitPosArray[fcord] and BLACK or WHITE
opcolor = 1-color
boards = board.boards[color]
opboards = board.boards[opcolor]
ours = getAttacks (board, tcord, color)
ours = clearBit (ours, fcord)
theirs = getAttacks (board, tcord, opcolor)
if xray[board.arBoard[fcord]]:
ours, theirs = addXrayPiece (board, tcord, fcord, color, ours, theirs)
if flag in PROMOTIONS:
swaplist = [PIECE_VALUES[flag-3] - PAWN_VALUE]
lastval = -PIECE_VALUES[flag-3]
else:
if flag == ENPASSANT:
swaplist = [PAWN_VALUE]
else: swaplist = [PIECE_VALUES[board.arBoard[tcord]]]
lastval = -PIECE_VALUES[board.arBoard[fcord]]
else:
tcord = moveOrTcord
opcolor = 1-color
boards = board.boards[color]
opboards = board.boards[opcolor]
ours = getAttacks (board, tcord, color)
theirs = getAttacks (board, tcord, opcolor)
lastval = -PIECE_VALUES[board.arBoard[tcord]]
while theirs:
for piece in range(PAWN, KING+1):
r = theirs & opboards[piece]
if r:
cord = firstBit(r)
theirs = clearBit(theirs, cord)
if xray[piece]:
ours, theirs = addXrayPiece (board, tcord, cord,
color, ours, theirs)
swaplist.append(swaplist[-1] + lastval)
lastval = PIECE_VALUES[piece]
break
if not ours:
break
for piece in range(PAWN, KING+1):
r = ours & boards[piece]
if r:
cord = firstBit(r)
ours = clearBit(ours, cord)
if xray[piece]:
ours, theirs = addXrayPiece (board, tcord, cord,
color, ours, theirs)
swaplist.append(swaplist[-1] + lastval)
lastval = -PIECE_VALUES[piece]
break
# At this stage, we have the swap scores in a list. We just need to
# mini-max the scores from the bottom up to the top of the list.
for n in range(len(swaplist)-1, 0, -1):
if n & 1:
if swaplist[n] <= swaplist[n-1]:
swaplist[n-1] = swaplist[n]
else:
if swaplist[n] >= swaplist[n-1]:
swaplist[n-1] = swaplist[n]
return swaplist[0]
xray = (False, True, False, True, True, True, False)
def addXrayPiece (board, tcord, fcord, color, ours, theirs):
""" This is used by swapOff.
The purpose of this routine is to find a piece which attack through
another piece (e.g. two rooks, Q+B, B+P, etc.) Color is the side attacking
the square where the swapping is to be done. """
dir = directions[tcord][fcord]
a = rays[fcord][dir] & board.blocker
if not a: return ours, theirs
if tcord < fcord:
ncord = firstBit(a)
else: ncord = lastBit(a)
piece = board.arBoard[ncord]
if piece == QUEEN or (piece == ROOK and dir > 3) or \
(piece == BISHOP and dir < 4):
bit = bitPosArray[ncord]
if bit & board.friends[color]:
ours |= bit
else:
theirs |= bit
return ours, theirs
def defends (board, fcord, tcord):
""" Could fcord attack tcord if the piece on tcord wasn't on the team of
fcord?
Doesn't test check. """
# Work on a board copy, as we are going to change some stuff
board = board.clone()
if board.friends[WHITE] & bitPosArray[fcord]:
color = WHITE
else: color = BLACK
opcolor = 1-color
boards = board.boards[color]
opboards = board.boards[opcolor]
# To see if we now defend the piece, we have to "give" it to the other team
piece = board.arBoard[tcord]
#backup = boards[piece]
#opbackup = opboards[piece]
boards[piece] &= notBitPosArray[tcord]
opboards[piece] |= bitPosArray[tcord]
board.friends[color] &= notBitPosArray[tcord]
board.friends[opcolor] |= bitPosArray[tcord]
# Can we "attack" the piece now?
backupColor = board.color
board.setColor(color)
from .lmovegen import newMove
from .validator import validateMove
islegal = validateMove (board, newMove(fcord, tcord))
board.setColor(backupColor)
# We don't need to set the board back, as we work on a copy
#boards[piece] = backup
#opboards[piece] = opbackup
#board.friends[color] |= bitPosArray[tcord]
#board.friends[opcolor] &= notBitPosArray[tcord]
return islegal
| gpl-3.0 |
JosephLutz/serialCommTest | tests/packetGenerator_test.py | 1 | 4467 | # packetGenerator_test
import traceback
import threading
import unittest
import Queue
import time
import sys
import os
if __name__ == '__main__':
importDirectory = os.getcwd()
if os.path.basename(importDirectory) in ['tests']:
importDirectory = os.path.dirname(importDirectory)
sys.path = [importDirectory] + sys.path
# Module to test
import packetGenerator
def get_exception_info():
"""
Gathers information about a caught exception.
This is used when I cause other exceptions in an except clause
:rtype : string
"""
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
if exc_type is None or exc_obj is None or exc_tb is None:
return 'No Exception Encountered'
error_out = 'Exception Encountered'
error_out += '{0}\n'.format('=' * 80)
error_out += 'lineno:{lineno}, fname:{fname}'.format(fname=fname, lineno=exc_tb.tb_lineno)
for line in traceback.format_tb(exc_tb):
error_out += '{0}\n'.format(line)
return '\n{line:80}\n{out}\n{line:80}'.format(line='#' * 80, out=error_out)
class TestPacketGenerator(unittest.TestCase):
def test_object_creation(self):
msgQueue = Queue.Queue()
# test that the object is created with minimal arguments
pktGen = packetGenerator.PacketGenerator('unitTest', 1)
self.assertTrue(isinstance(pktGen, packetGenerator.PacketGenerator))
pktGen = None
# test that the object is created with all arguments
pktGen = packetGenerator.PacketGenerator('unitTest', 1, msgQueue, 2, True, 'Seed String')
self.assertTrue(isinstance(pktGen, packetGenerator.PacketGenerator))
# test the msgQueue gets a message (a message is a tupe of three items)
msg = msgQueue.get()
self.assertTrue(isinstance(msg, tuple) and len(msg) is 3)
def test_make_packets(self):
pktGen = packetGenerator.PacketGenerator('unitTest', 1, numBytes=2)
self.assertTrue(pktGen.queue.empty())
pktGen.makePackets(1)
self.assertFalse(pktGen.queue.empty())
packet = pktGen.queue.get()
self.assertTrue(isinstance(packet, tuple) and len(packet) is 4)
self.assertTrue(pktGen.queue.empty())
def test_thread(self):
testAssert = True
pktGen = packetGenerator.PacketGenerator('unitTest', 1, numBytes=2)
try:
pktGen.start()
self.assertTrue(pktGen.queue.empty())
# tell thread to create some packets
pktGen.runLock.acquire()
pktGen.number = 1
pktGen.runLock.release()
# give time to create first packets
time.sleep(2.0)
startTime = time.time()
# run the thread for 3 seconds
while (time.time() - startTime) < 3.0:
pktGen.runLock.acquire() # lock around calls that need to be atomic
self.assertFalse(pktGen.queue.empty())
queueSize = pktGen.queue.qsize()
pktGen.runLock.release()
# pull packet off of Queue
pktGen.runLock.acquire() # lock around calls that need to be atomic
pktGen.queue.get()
newQueueSize = pktGen.queue.qsize()
self.assertTrue((newQueueSize + 1) is queueSize)
pktGen.packetUsed.set()
pktGen.runLock.release()
# wait some time for the new packet to be generated
time.sleep(0.3)
pktGen.runLock.acquire() # lock around calls that need to be atomic
newQueueSize = pktGen.queue.qsize()
self.assertTrue(newQueueSize is queueSize)
pktGen.runLock.release()
# Stop the thread
pktGen.runLock.acquire()
pktGen.running = False
pktGen.runLock.release()
except:
print '\n' + get_exception_info()
testAssert = False
# clean up the thread
if not pktGen.packetUsed.is_set():
pktGen.packetUsed.set()
pktGen.running = False
try:
pktGen.runLock.release()
except threading.ThreadError:
pass
pktGen.join()
self.assertTrue(pktGen.queue.empty())
self.assertTrue(testAssert)
def runtests():
unittest.main()
if __name__ == '__main__':
runtests()
| mit |
pztrn/spicecm-prototype | src/lib/edit_server.py | 1 | 1953 | # -*- coding: utf-8 -*-
# Edit server dialog module
import os
from PyQt4.QtGui import QDialog
from PyQt4 import uic
class Edit_Server_Dialog(QDialog):
def __init__(self, servers_list_instance, core_instance, server_name, item_index):
QDialog.__init__(self)
self.servers_list = servers_list_instance
self.core = core_instance
self.servers_data = self.servers_list.get_servers()
self.item_index = item_index
self.ui = uic.loadUi("ui/server.ui", self)
self.ui.setWindowTitle("Edit SPICE server")
self.ui.show()
self.ui.add_button.setText("Edit")
self.ui.server_name.setText(self.servers_data[server_name]["name"])
self.ui.server_address.setText(self.servers_data[server_name]["address"])
self.ui.server_port.setText(self.servers_data[server_name]["port"])
self.ui.server_password.setText(self.servers_data[server_name]["password"])
self.ui.add_button.clicked.connect(self.edit_server)
self.ui.cancel_button.clicked.connect(self.close)
def edit_server(self):
server_name = self.ui.server_name.text()
server_address = self.ui.server_address.text()
server_port = self.ui.server_port.text()
server_password = self.ui.server_password.text()
color_depth = self.ui.color_depth.itemText(self.ui.color_depth.currentIndex())
fullscreen = self.ui.fullscreen_checkbox.checkState()
server_data = {
"name" : str(server_name),
"address" : str(server_address),
"port" : str(server_port),
"password" : str(server_password),
"depth" : str(color_depth),
"fullscreen" : str(fullscreen)
}
self.servers_list.add_server(server_data)
self.core.populate_servers_list()
self.core.set_current_item(self.item_index)
self.close()
| gpl-3.0 |
FUNSEPA/fsp-paypal | pagos/migrations/0003_auto_20161014_1550.py | 1 | 1150 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 15:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pagos', '0002_auto_20161014_1514'),
]
operations = [
migrations.RemoveField(
model_name='donation',
name='cvv2',
),
migrations.RemoveField(
model_name='donation',
name='expire_month',
),
migrations.RemoveField(
model_name='donation',
name='expire_year',
),
migrations.RemoveField(
model_name='donation',
name='first_name',
),
migrations.RemoveField(
model_name='donation',
name='last_name',
),
migrations.RemoveField(
model_name='donation',
name='number',
),
migrations.AddField(
model_name='donation',
name='payment_ref',
field=models.CharField(default=' ', max_length=225),
preserve_default=False,
),
]
| mit |
yoelk/kivy | kivy/tests/visual_test_label.py | 37 | 8999 | from kivy.app import runTouchApp
from kivy.uix.gridlayout import GridLayout
from kivy.properties import StringProperty
from kivy.lang import Builder
from kivy.utils import get_hex_from_color, get_random_color
import timeit
import re
import random
from functools import partial
def layout_perf(label, repeat):
if repeat:
repeat = int(repeat)
else:
return 'None'
return str(timeit.Timer(label._label.render).repeat(1, repeat))
def layout_real_perf(label, repeat):
if repeat:
repeat = int(repeat)
else:
return 'None'
old_text = label._label.texture
label._label.texture = label._label.texture_1px
res = str(timeit.Timer(partial(label._label.render, True)).repeat(1,
repeat))
label._label.texture = old_text
return res
kv = '''
#:import tlp visual_test_label.layout_perf
#:import tlrp visual_test_label.layout_real_perf
<TSliderButton@ToggleButton>:
size_hint: None, None
size: 100, 50
group: 'slider'
on_press: self.parent.slider.name = self.text if self.state =='down' else\
'dummy'
<TSpinner@Spinner>:
size_hint: None, None
size: 100, 50
name: ''
on_text: setattr(self.parent.label, self.name, self.text)
<TBoolButton@ToggleButton>:
size_hint: None, None
size: 100, 50
on_state: setattr(self.parent.label, self.text, self.state == 'down')
<TLabel@Label>:
size_hint: None, None
size: 100, 50
<LabelTest>:
cols: 1
spacing: 10
padding: 20
TabbedPanel:
do_default_tab: False
tab_width: self.width / 11 * 3
TabbedPanelItem:
text: 'Label'
BoxLayout:
ScrollView:
id: scrollview
Label:
size_hint: None, None
size: self.texture_size
id: label
text: record.text
dummy: 0
canvas:
Color:
rgba: 0, 1, 0, 0.5
Rectangle:
pos: self.pos
size: self.width, self.padding_y
Rectangle:
pos: self.x, self.y + self.height -\
self.padding_y
size: self.width, self.padding_y
Color:
rgba: 0, 0, 1, 0.5
Rectangle:
pos: self.pos
size: self.padding_x, self.height
Rectangle:
pos: self.x + self.width - self.padding_x,\
self.y
size: self.padding_x, self.height
Splitter:
sizable_from: 'left'
TextInput:
id: record
text: label.text
text: root.text
TabbedPanelItem:
text: 'Test performance'
BoxLayout:
orientation: 'vertical'
Label:
text: 'Test timeit performance with current label settings'
BoxLayout:
size_hint_y: None
height: 40
padding: [20, 0]
Label:
text: 'Repeat count: '
TextInput:
id: repeat
text: '1000'
Button:
text: 'Go (render - layout)'
on_press: results.text = tlp(label, repeat.text)
Button:
text: 'Go (render_real)'
on_press: results.text = tlrp(label, repeat.text)
Label:
id: results
text: 'Results:'
StackLayout:
id: slider_ctrl
size_hint_y: None
height: self.minimum_height
slider: slider
label: label
TLabel:
text: 'halign: '
TSpinner:
name: 'halign'
values: ['left', 'center', 'right', 'justify']
text: 'left'
TLabel:
text: 'valign: '
TSpinner:
name: 'valign'
values: ['top', 'middle', 'bottom']
text: 'bottom'
TBoolButton:
text: 'markup'
TBoolButton:
text: 'shorten'
TextInput:
size_hint: None, None
size: 100, 50
hint_text: 'split_str'
on_text_validate: label.split_str = self.text
multiline: False
TLabel:
text: 'shorten_from: '
TSpinner:
name: 'shorten_from'
values: ['left', 'center', 'right']
text: 'right'
TBoolButton:
text: 'strip'
state: 'down'
ToggleButton:
size_hint: None, None
size: 100, 50
text: 'random size'
on_state: label.text = root.sized_text if self.state == 'down'\
else root.text
TLabel:
text: 'Slider control:'
TSliderButton:
text: 'font_size'
TSliderButton:
text: 'line_height'
TSliderButton:
text: 'max_lines'
TSliderButton:
text: 'padding_x'
TSliderButton:
text: 'padding_y'
TextInput:
size_hint: None, None
size: 100, 50
hint_text: 'text_size[0]'
on_text_validate: label.text_size = (int(self.text) if self.text\
else None), label.text_size[1]
multiline: False
TextInput:
size_hint: None, None
size: 100, 50
hint_text: 'text_size[1]'
on_text_validate: label.text_size = label.text_size[0],\
(int(self.text) if self.text else None)
multiline: False
TLabel:
text: '<-- w/ validate'
Label:
size_hint_y: None
height: 40
color: [0, 1, 0, 1]
text_size: self.size
text: 'scrollview size: {}, label size: {}, text_size: {}, '\
'texture_size: {}, padding: {}'.format(scrollview.size, label.size,\
label.text_size, label.texture_size, label.padding)
BoxLayout:
size_hint_y: None
height: 40
Slider:
id: slider
range: -10, 200
value: 15
name: 'dummy'
on_value: setattr(label, self.name, self.value)
Label:
size_hint_x: None
width: 50
text: str(int(slider.value))
'''
text = '''
Because it would spare your Majesty all fear of future \
annoyance. If the lady loves her husband, she does not love your \
Majesty. If she does not love your Majesty, there is no reason \
why she should interfere with your Majesty's plan.
"It is true. And yet--Well! I wish she had been of my own \
station! What a queen she would have made!" He relapsed into a \
moody silence, which was not broken until we drew up in \
Serpentine Avenue.
The door of Briony Lodge was open, and an elderly woman stood \
upon the steps. She watched us with a sardonic eye as we stepped \
from the brougham.
Mr. Sherlock Holmes, I believe?" said she.
I am Mr. Holmes," answered my companion, looking at her with a \
questioning and rather startled gaze.
Indeed! My mistress told me that you were likely to call. She \
left this morning with her husband by the 5:15 train from Charing \
Cross for the Continent."
"What!" Sherlock Holmes staggered back, white with chagrin and \
surprise. "Do you mean that she has left England?"
Never to return.
"And the papers?" asked the King hoarsely. "All is lost."
'''
words = re.split('( +|\\n+)', text)
def annotate(pre, post, callable, words):
state = False
i = random.randint(0, 4)
while i < len(words):
if ' ' in words[i] or '\n' in words[i]: # skip spaces
i += 1
continue
if not state:
words[i] = pre.format(callable(), words[i])
else:
words[i] = post.format(words[i])
state = not state
i += random.randint(1, 7)
annotate('[size={0}]{1}', '{0}[/size]', partial(random.randint, 8, 24), words)
annotate('[b]{1}', '{0}[/b]', str, words)
annotate('[i]{1}', '{0}[/i]', str, words)
annotate('[color={0}]{1}', '{0}[/color]',
lambda: get_hex_from_color(get_random_color()), words)
annotated_text = ''.join(words)
class LabelTest(GridLayout):
text = StringProperty(text)
sized_text = StringProperty(annotated_text)
if __name__ in ('__main__', ):
Builder.load_string(kv)
runTouchApp(LabelTest())
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/scipy/linalg/flinalg.py | 138 | 1727 | #
# Author: Pearu Peterson, March 2002
#
from __future__ import division, print_function, absolute_import
__all__ = ['get_flinalg_funcs']
# The following ensures that possibly missing flavor (C or Fortran) is
# replaced with the available one. If none is available, exception
# is raised at the first attempt to use the resources.
try:
from . import _flinalg
except ImportError:
_flinalg = None
# from numpy.distutils.misc_util import PostponedException
# _flinalg = PostponedException()
# print _flinalg.__doc__
has_column_major_storage = lambda a:0
def has_column_major_storage(arr):
return arr.flags['FORTRAN']
_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',..
def get_flinalg_funcs(names,arrays=(),debug=0):
"""Return optimal available _flinalg function objects with
names. arrays are used to determine optimal prefix."""
ordering = []
for i in range(len(arrays)):
t = arrays[i].dtype.char
if t not in _type_conv:
t = 'd'
ordering.append((t,i))
if ordering:
ordering.sort()
required_prefix = _type_conv[ordering[0][0]]
else:
required_prefix = 'd'
# Some routines may require special treatment.
# Handle them here before the default lookup.
# Default lookup:
if ordering and has_column_major_storage(arrays[ordering[0][1]]):
suffix1,suffix2 = '_c','_r'
else:
suffix1,suffix2 = '_r','_c'
funcs = []
for name in names:
func_name = required_prefix + name
func = getattr(_flinalg,func_name+suffix1,
getattr(_flinalg,func_name+suffix2,None))
funcs.append(func)
return tuple(funcs)
| agpl-3.0 |
BigBrother-International/gst-cerbero | setup.py | 19 | 2428 | import os
import sys
import shutil
from setuptools import setup, find_packages
from cerbero.utils import shell
sys.path.insert(0, './cerbero')
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# Utility function to parse directories
def parse_dir(dirpath, extension=None):
if os.path.exists('.git'):
files = shell.check_call('git ls-files %s' % dirpath).split('\n')
files.remove('')
else:
files = shell.check_call('find %s -type f' % dirpath).split('\n')
files.remove('')
if extension is None:
return files
return [f for f in files if f.endswith(extension)]
# Utility function to create the list of data files
def datafiles(prefix):
files = []
datadir = os.path.join(prefix, 'share', 'cerbero')
for dirname, extension in [('recipes', '.recipe'), ('packages', '.package')]:
for f in parse_dir(dirname, extension):
files.append((os.path.join(datadir, dirname), [f]))
for dirname in ['config']:
for f in parse_dir(dirname):
files.append((os.path.join(datadir, dirname), [f]))
for dirname in ['data']:
for f in parse_dir(dirname):
dirpath = os.path.split(f.split('/', 1)[1])[0]
files.append((os.path.join(datadir, dirpath), [f]))
return files
#Fill manifest
shutil.copy('MANIFEST.in.in', 'MANIFEST.in')
with open('MANIFEST.in', 'a+') as f:
for dirname in ['recipes', 'packages', 'data', 'config', 'tools']:
f.write('\n'.join(['include %s' % x for x in parse_dir(dirname)]))
f.write('\n')
# Intercept prefix
prefix = [x for x in sys.argv if x.startswith('--prefix=')]
if len(prefix) == 1:
prefix = prefix[0].split('--prefix=')[1]
else:
prefix = '/usr/local'
setup(
name = "cerbero",
version = "0.1.0",
author = "Andoni Morales",
author_email = "amorales@fluendo.com",
description = ("Multi platform build system for Open Source projects"),
license = "LGPL",
url = "http://gstreamer.com",
packages = find_packages(exclude=['tests']),
long_description=read('README'),
zip_safe = False,
include_package_data=True,
data_files = datafiles(prefix),
entry_points = """
[console_scripts]
cerbero = cerbero.main:main""",
classifiers=[
"License :: OSI Approved :: LGPL License",
],
)
| lgpl-2.1 |
sikmir/QGIS | python/plugins/processing/algs/gdal/GridNearestNeighbor.py | 15 | 8685 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GridNearestNeighbor.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class GridNearestNeighbor(GdalAlgorithm):
INPUT = 'INPUT'
Z_FIELD = 'Z_FIELD'
RADIUS_1 = 'RADIUS_1'
RADIUS_2 = 'RADIUS_2'
ANGLE = 'ANGLE'
NODATA = 'NODATA'
OPTIONS = 'OPTIONS'
EXTRA = 'EXTRA'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Point layer'),
[QgsProcessing.TypeVectorPoint]))
z_field_param = QgsProcessingParameterField(self.Z_FIELD,
self.tr('Z value from field'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=True)
z_field_param.setFlags(z_field_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(z_field_param)
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_1,
self.tr('The first radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_2,
self.tr('The second radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.ANGLE,
self.tr('Angle of search ellipse rotation in degrees (counter clockwise)'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
maxValue=360.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.NODATA,
self.tr('NODATA marker to fill empty points'),
type=QgsProcessingParameterNumber.Double,
defaultValue=0.0))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=5)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Interpolated (Nearest neighbor)')))
def name(self):
return 'gridnearestneighbor'
def displayName(self):
return self.tr('Grid (Nearest neighbor)')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'grid.png'))
def commandName(self):
return 'gdal_grid'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
arguments = ['-l']
arguments.append(layerName)
fieldName = self.parameterAsString(parameters, self.Z_FIELD, context)
if fieldName:
arguments.append('-zfield')
arguments.append(fieldName)
params = 'nearest'
params += ':radius1={}'.format(self.parameterAsDouble(parameters, self.RADIUS_1, context))
params += ':radius2={}'.format(self.parameterAsDouble(parameters, self.RADIUS_2, context))
params += ':angle={}'.format(self.parameterAsDouble(parameters, self.ANGLE, context))
params += ':nodata={}'.format(self.parameterAsDouble(parameters, self.NODATA, context))
arguments.append('-a')
arguments.append(params)
arguments.append('-ot')
arguments.append(self.TYPES[self.parameterAsEnum(parameters, self.DATA_TYPE, context)])
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
arguments.append(ogrLayer)
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
baslr/ArangoDB | 3rdParty/V8/V8-5.0.71.39/tools/swarming_client/third_party/requests/__init__.py | 28 | 1855 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print r.text
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2013 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.0.0'
__build__ = 0x020000
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| apache-2.0 |
danieljaouen/ansible | lib/ansible/modules/network/ios/ios_facts.py | 10 | 17003 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_facts
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Collect facts from remote devices running Cisco IOS
description:
- Collects a base set of device facts from a remote device that
is running IOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: ios
notes:
- Tested against IOS 15.6
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Collect all facts from the device
- ios_facts:
gather_subset: all
# Collect only the config and default facts
- ios_facts:
gather_subset:
- config
# Do not collect hardware facts
- ios_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: string
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: string
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: string
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
ansible_net_image:
description: The image file the device is running
returned: always
type: string
ansible_net_stacked_models:
description: The model names of each device in the stack
returned: when multiple devices are configured in a stack
type: list
ansible_net_stacked_serialnums:
description: The serial numbers of each device in the stack
returned: when multiple devices are configured in a stack
type: list
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_filesystems_info:
description: A hash of all file systems containing info about each file system (e.g. free and total space)
returned: when hardware is configured
type: dict
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: string
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.network.ios.ios import run_commands
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, commands=self.COMMANDS, check_rc=False)
def run(self, cmd):
return run_commands(self.module, commands=cmd, check_rc=False)
class Default(FactsBase):
COMMANDS = ['show version']
def populate(self):
super(Default, self).populate()
data = self.responses[0]
if data:
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
self.facts['hostname'] = self.parse_hostname(data)
self.parse_stacks(data)
def parse_version(self, data):
match = re.search(r'Version (\S+?)(?:,\s|\s)', data)
if match:
return match.group(1)
def parse_hostname(self, data):
match = re.search(r'^(.+) uptime', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'^[Cc]isco (\S+).+bytes of .*memory', data, re.M)
if match:
return match.group(1)
def parse_image(self, data):
match = re.search(r'image file is "(.+)"', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'board ID (\S+)', data)
if match:
return match.group(1)
def parse_stacks(self, data):
match = re.findall(r'^Model [Nn]umber\s+: (\S+)', data, re.M)
if match:
self.facts['stacked_models'] = match
match = re.findall(r'^System [Ss]erial [Nn]umber\s+: (\S+)', data, re.M)
if match:
self.facts['stacked_serialnums'] = match
class Hardware(FactsBase):
COMMANDS = [
'dir',
'show memory statistics'
]
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.facts['filesystems'] = self.parse_filesystems(data)
self.facts['filesystems_info'] = self.parse_filesystems_info(data)
data = self.responses[1]
if data:
if 'Invalid input detected' in data:
warnings.append('Unable to gather memory statistics')
else:
processor_line = [l for l in data.splitlines()
if 'Processor' in l].pop()
match = re.findall(r'\s(\d+)\s', processor_line)
if match:
self.facts['memtotal_mb'] = int(match[0]) / 1024
self.facts['memfree_mb'] = int(match[3]) / 1024
def parse_filesystems(self, data):
return re.findall(r'^Directory of (\S+)/', data, re.M)
def parse_filesystems_info(self, data):
facts = dict()
fs = ''
for line in data.split('\n'):
match = re.match(r'^Directory of (\S+)/', line)
if match:
fs = match.group(1)
facts[fs] = dict()
continue
match = re.match(r'^(\d+) bytes total \((\d+) bytes free\)', line)
if match:
facts[fs]['spacetotal_kb'] = int(match.group(1)) / 1024
facts[fs]['spacefree_kb'] = int(match.group(2)) / 1024
return facts
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = [
'show interfaces',
'show ip interface',
'show ipv6 interface',
'show lldp'
]
def populate(self):
super(Interfaces, self).populate()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.responses[0]
if data:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
data = self.responses[1]
if data:
data = self.parse_interfaces(data)
self.populate_ipv4_interfaces(data)
data = self.responses[2]
if data:
data = self.parse_interfaces(data)
self.populate_ipv6_interfaces(data)
data = self.responses[3]
lldp_errs = ['Invalid input', 'LLDP is not enabled']
if data and not any(err in data for err in lldp_errs):
neighbors = self.run(['show lldp neighbors detail'])
if neighbors:
self.facts['neighbors'] = self.parse_neighbors(neighbors[0])
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['mediatype'] = self.parse_mediatype(value)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv4_interfaces(self, data):
for key, value in data.items():
self.facts['interfaces'][key]['ipv4'] = list()
primary_address = addresses = []
primary_address = re.findall(r'Internet address is (.+)$', value, re.M)
addresses = re.findall(r'Secondary address (.+)$', value, re.M)
if len(primary_address) == 0:
continue
addresses.append(primary_address[0])
for address in addresses:
addr, subnet = address.split("/")
ipv4 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv4')
self.facts['interfaces'][key]['ipv4'].append(ipv4)
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
try:
self.facts['interfaces'][key]['ipv6'] = list()
except KeyError:
self.facts['interfaces'][key] = dict()
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_neighbors(self, neighbors):
facts = dict()
for entry in neighbors.split('------------------------------------------------'):
if entry == '':
continue
intf = self.parse_lldp_intf(entry)
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['host'] = self.parse_lldp_host(entry)
fact['port'] = self.parse_lldp_port(entry)
facts[intf].append(fact)
return facts
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+)', line)
if match:
key = match.group(1)
parsed[key] = line
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'Hardware is (?:.*), address is (\S+)', data)
if match:
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Internet address is (\S+)', data)
if match:
addr, masklen = match.group(1).split('/')
return dict(address=addr, masklen=int(masklen))
def parse_mtu(self, data):
match = re.search(r'MTU (\d+)', data)
if match:
return int(match.group(1))
def parse_bandwidth(self, data):
match = re.search(r'BW (\d+)', data)
if match:
return int(match.group(1))
def parse_duplex(self, data):
match = re.search(r'(\w+) Duplex', data, re.M)
if match:
return match.group(1)
def parse_mediatype(self, data):
match = re.search(r'media type is (.+)$', data, re.M)
if match:
return match.group(1)
def parse_type(self, data):
match = re.search(r'Hardware is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lineprotocol(self, data):
match = re.search(r'line protocol is (.+)$', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lldp_intf(self, data):
match = re.search(r'^Local Intf: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_host(self, data):
match = re.search(r'System Name: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_port(self, data):
match = re.search(r'Port id: (.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
global warnings
warnings = list()
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
check_args(module, warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
Changaco/oh-mainline | vendor/packages/twisted/twisted/internet/test/test_inotify.py | 18 | 16501 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the inotify wrapper in L{twisted.internet.inotify}.
"""
from twisted.internet import defer, reactor
from twisted.python import filepath, runtime
from twisted.trial import unittest
try:
from twisted.python import _inotify
except ImportError:
inotify = None
else:
from twisted.internet import inotify
class TestINotify(unittest.TestCase):
"""
Define all the tests for the basic functionality exposed by
L{inotify.INotify}.
"""
if not runtime.platform.supportsINotify():
skip = "This platform doesn't support INotify."
def setUp(self):
self.dirname = filepath.FilePath(self.mktemp())
self.dirname.createDirectory()
self.inotify = inotify.INotify()
self.inotify.startReading()
self.addCleanup(self.inotify.loseConnection)
def test_initializationErrors(self):
"""
L{inotify.INotify} emits a C{RuntimeError} when initialized
in an environment that doesn't support inotify as we expect it.
We just try to raise an exception for every possible case in
the for loop in L{inotify.INotify._inotify__init__}.
"""
class FakeINotify:
def init(self):
raise inotify.INotifyError()
self.patch(inotify.INotify, '_inotify', FakeINotify())
self.assertRaises(inotify.INotifyError, inotify.INotify)
def _notificationTest(self, mask, operation, expectedPath=None):
"""
Test notification from some filesystem operation.
@param mask: The event mask to use when setting up the watch.
@param operation: A function which will be called with the
name of a file in the watched directory and which should
trigger the event.
@param expectedPath: Optionally, the name of the path which is
expected to come back in the notification event; this will
also be passed to C{operation} (primarily useful when the
operation is being done to the directory itself, not a
file in it).
@return: A L{Deferred} which fires successfully when the
expected event has been received or fails otherwise.
"""
if expectedPath is None:
expectedPath = self.dirname.child("foo.bar")
notified = defer.Deferred()
def cbNotified((watch, filename, events)):
self.assertEquals(filename, expectedPath)
self.assertTrue(events & mask)
notified.addCallback(cbNotified)
self.inotify.watch(
self.dirname, mask=mask,
callbacks=[lambda *args: notified.callback(args)])
operation(expectedPath)
return notified
def test_access(self):
"""
Reading from a file in a monitored directory sends an
C{inotify.IN_ACCESS} event to the callback.
"""
def operation(path):
path.setContent("foo")
path.getContent()
return self._notificationTest(inotify.IN_ACCESS, operation)
def test_modify(self):
"""
Writing to a file in a monitored directory sends an
C{inotify.IN_MODIFY} event to the callback.
"""
def operation(path):
fObj = path.open("w")
fObj.write('foo')
fObj.close()
return self._notificationTest(inotify.IN_MODIFY, operation)
def test_attrib(self):
"""
Changing the metadata of a a file in a monitored directory
sends an C{inotify.IN_ATTRIB} event to the callback.
"""
def operation(path):
path.touch()
path.touch()
return self._notificationTest(inotify.IN_ATTRIB, operation)
def test_closeWrite(self):
"""
Closing a file which was open for writing in a monitored
directory sends an C{inotify.IN_CLOSE_WRITE} event to the
callback.
"""
def operation(path):
fObj = path.open("w")
fObj.close()
return self._notificationTest(inotify.IN_CLOSE_WRITE, operation)
def test_closeNoWrite(self):
"""
Closing a file which was open for reading but not writing in a
monitored directory sends an C{inotify.IN_CLOSE_NOWRITE} event
to the callback.
"""
def operation(path):
path.touch()
fObj = path.open("r")
fObj.close()
return self._notificationTest(inotify.IN_CLOSE_NOWRITE, operation)
def test_open(self):
"""
Opening a file in a monitored directory sends an
C{inotify.IN_OPEN} event to the callback.
"""
def operation(path):
fObj = path.open("w")
fObj.close()
return self._notificationTest(inotify.IN_OPEN, operation)
def test_movedFrom(self):
"""
Moving a file out of a monitored directory sends an
C{inotify.IN_MOVED_FROM} event to the callback.
"""
def operation(path):
fObj = path.open("w")
fObj.close()
path.moveTo(filepath.FilePath(self.mktemp()))
return self._notificationTest(inotify.IN_MOVED_FROM, operation)
def test_movedTo(self):
"""
Moving a file into a monitored directory sends an
C{inotify.IN_MOVED_TO} event to the callback.
"""
def operation(path):
p = filepath.FilePath(self.mktemp())
p.touch()
p.moveTo(path)
return self._notificationTest(inotify.IN_MOVED_TO, operation)
def test_create(self):
"""
Creating a file in a monitored directory sends an
C{inotify.IN_CREATE} event to the callback.
"""
def operation(path):
fObj = path.open("w")
fObj.close()
return self._notificationTest(inotify.IN_CREATE, operation)
def test_delete(self):
"""
Deleting a file in a monitored directory sends an
C{inotify.IN_DELETE} event to the callback.
"""
def operation(path):
path.touch()
path.remove()
return self._notificationTest(inotify.IN_DELETE, operation)
def test_deleteSelf(self):
"""
Deleting the monitored directory itself sends an
C{inotify.IN_DELETE_SELF} event to the callback.
"""
def operation(path):
path.remove()
return self._notificationTest(
inotify.IN_DELETE_SELF, operation, expectedPath=self.dirname)
def test_moveSelf(self):
"""
Renaming the monitored directory itself sends an
C{inotify.IN_MOVE_SELF} event to the callback.
"""
def operation(path):
path.moveTo(filepath.FilePath(self.mktemp()))
return self._notificationTest(
inotify.IN_MOVE_SELF, operation, expectedPath=self.dirname)
def test_simpleSubdirectoryAutoAdd(self):
"""
L{inotify.INotify} when initialized with autoAdd==True adds
also adds the created subdirectories to the watchlist.
"""
def _callback(wp, filename, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertTrue(self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True,
callbacks=[_callback])
subdir = self.dirname.child('test')
d = defer.Deferred()
subdir.createDirectory()
return d
def test_simpleDeleteDirectory(self):
"""
L{inotify.INotify} removes a directory from the watchlist when
it's removed from the filesystem.
"""
calls = []
def _callback(wp, filename, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertTrue(self.inotify._isWatched(subdir))
subdir.remove()
except Exception:
d.errback()
def _eb():
# second call, we have just removed the subdir
try:
self.assertTrue(not self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
if not calls:
# first call, it's the create subdir
calls.append(filename)
reactor.callLater(0, _)
else:
reactor.callLater(0, _eb)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True,
callbacks=[_callback])
subdir = self.dirname.child('test')
d = defer.Deferred()
subdir.createDirectory()
return d
def test_ignoreDirectory(self):
"""
L{inotify.INotify.ignore} removes a directory from the watchlist
"""
self.inotify.watch(self.dirname, autoAdd=True)
self.assertTrue(self.inotify._isWatched(self.dirname))
self.inotify.ignore(self.dirname)
self.assertFalse(self.inotify._isWatched(self.dirname))
def test_humanReadableMask(self):
"""
L{inotify.humaReadableMask} translates all the possible event
masks to a human readable string.
"""
for mask, value in inotify._FLAG_TO_HUMAN:
self.assertEquals(inotify.humanReadableMask(mask)[0], value)
checkMask = (
inotify.IN_CLOSE_WRITE | inotify.IN_ACCESS | inotify.IN_OPEN)
self.assertEquals(
set(inotify.humanReadableMask(checkMask)),
set(['close_write', 'access', 'open']))
def test_recursiveWatch(self):
"""
L{inotify.INotify.watch} with recursive==True will add all the
subdirectories under the given path to the watchlist.
"""
subdir = self.dirname.child('test')
subdir2 = subdir.child('test2')
subdir3 = subdir2.child('test3')
subdir3.makedirs()
dirs = [subdir, subdir2, subdir3]
self.inotify.watch(self.dirname, recursive=True)
# let's even call this twice so that we test that nothing breaks
self.inotify.watch(self.dirname, recursive=True)
for d in dirs:
self.assertTrue(self.inotify._isWatched(d))
def test_connectionLostError(self):
"""
L{inotify.INotify.connectionLost} if there's a problem while closing
the fd shouldn't raise the exception but should log the error
"""
import os
in_ = inotify.INotify()
os.close(in_._fd)
in_.loseConnection()
self.flushLoggedErrors()
def test_noAutoAddSubdirectory(self):
"""
L{inotify.INotify.watch} with autoAdd==False will stop inotify
from watching subdirectories created under the watched one.
"""
def _callback(wp, fp, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertFalse(self.inotify._isWatched(subdir.path))
d.callback(None)
except Exception:
d.errback()
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=False,
callbacks=[_callback])
subdir = self.dirname.child('test')
d = defer.Deferred()
subdir.createDirectory()
return d
def test_seriesOfWatchAndIgnore(self):
"""
L{inotify.INotify} will watch a filepath for events even if the same
path is repeatedly added/removed/re-added to the watchpoints.
"""
expectedPath = self.dirname.child("foo.bar2")
expectedPath.touch()
notified = defer.Deferred()
def cbNotified((ignored, filename, events)):
self.assertEquals(filename, expectedPath)
self.assertTrue(events & inotify.IN_DELETE_SELF)
def callIt(*args):
notified.callback(args)
# Watch, ignore, watch again to get into the state being tested.
self.assertTrue(self.inotify.watch(expectedPath, callbacks=[callIt]))
self.inotify.ignore(expectedPath)
self.assertTrue(
self.inotify.watch(
expectedPath, mask=inotify.IN_DELETE_SELF, callbacks=[callIt]))
notified.addCallback(cbNotified)
# Apparently in kernel version < 2.6.25, inofify has a bug in the way
# similar events are coalesced. So, be sure to generate a different
# event here than the touch() at the top of this method might have
# generated.
expectedPath.remove()
return notified
def test_ignoreFilePath(self):
"""
L{inotify.INotify} will ignore a filepath after it has been removed from
the watch list.
"""
expectedPath = self.dirname.child("foo.bar2")
expectedPath.touch()
expectedPath2 = self.dirname.child("foo.bar3")
expectedPath2.touch()
notified = defer.Deferred()
def cbNotified((ignored, filename, events)):
self.assertEquals(filename, expectedPath2)
self.assertTrue(events & inotify.IN_DELETE_SELF)
def callIt(*args):
notified.callback(args)
self.assertTrue(
self.inotify.watch(
expectedPath, inotify.IN_DELETE_SELF, callbacks=[callIt]))
notified.addCallback(cbNotified)
self.assertTrue(
self.inotify.watch(
expectedPath2, inotify.IN_DELETE_SELF, callbacks=[callIt]))
self.inotify.ignore(expectedPath)
expectedPath.remove()
expectedPath2.remove()
return notified
def test_ignoreNonWatchedFile(self):
"""
L{inotify.INotify} will raise KeyError if a non-watched filepath is
ignored.
"""
expectedPath = self.dirname.child("foo.ignored")
expectedPath.touch()
self.assertRaises(KeyError, self.inotify.ignore, expectedPath)
def test_complexSubdirectoryAutoAdd(self):
"""
L{inotify.INotify} with autoAdd==True for a watched path
generates events for every file or directory already present
in a newly created subdirectory under the watched one.
This tests that we solve a race condition in inotify even though
we may generate duplicate events.
"""
calls = set()
def _callback(wp, filename, mask):
calls.add(filename)
if len(calls) == 6:
try:
self.assertTrue(self.inotify._isWatched(subdir))
self.assertTrue(self.inotify._isWatched(subdir2))
self.assertTrue(self.inotify._isWatched(subdir3))
created = someFiles + [subdir, subdir2, subdir3]
self.assertEquals(len(calls), len(created))
self.assertEquals(calls, set(created))
except Exception:
d.errback()
else:
d.callback(None)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True,
callbacks=[_callback])
subdir = self.dirname.child('test')
subdir2 = subdir.child('test2')
subdir3 = subdir2.child('test3')
d = defer.Deferred()
subdir3.makedirs()
someFiles = [subdir.child('file1.dat'),
subdir2.child('file2.dat'),
subdir3.child('file3.dat')]
# Add some files in pretty much all the directories so that we
# see that we process all of them.
for i, filename in enumerate(someFiles):
filename.setContent(filename.path)
return d
| agpl-3.0 |
chrisidefix/devide.johannes | extra/soappy-cvp/SOAPpy/wstools/XMLSchema.py | 10 | 101410 | # Copyright (c) 2003, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of
# any required approvals from the U.S. Dept. of Energy). All rights
# reserved.
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id: XMLSchema.py,v 1.53 2005/02/18 13:50:14 warnes Exp $"
import types, weakref, urllib, sys
from threading import RLock
from Namespaces import XMLNS
from Utility import DOM, DOMException, Collection, SplitQName
from StringIO import StringIO
def GetSchema(component):
"""convience function for finding the parent XMLSchema instance.
"""
parent = component
while not isinstance(parent, XMLSchema):
parent = parent._parent()
return parent
class SchemaReader:
"""A SchemaReader creates XMLSchema objects from urls and xml data.
"""
def __init__(self, domReader=None, base_url=None):
"""domReader -- class must implement DOMAdapterInterface
base_url -- base url string
"""
self.__base_url = base_url
self.__readerClass = domReader
if not self.__readerClass:
self.__readerClass = DOMAdapter
self._includes = {}
self._imports = {}
def __setImports(self, schema):
"""Add dictionary of imports to schema instance.
schema -- XMLSchema instance
"""
for ns,val in schema.imports.items():
if self._imports.has_key(ns):
schema.addImportSchema(self._imports[ns])
def __setIncludes(self, schema):
"""Add dictionary of includes to schema instance.
schema -- XMLSchema instance
"""
for schemaLocation, val in schema.includes.items():
if self._includes.has_key(schemaLocation):
schema.addIncludeSchema(self._imports[schemaLocation])
def addSchemaByLocation(self, location, schema):
"""provide reader with schema document for a location.
"""
self._includes[location] = schema
def addSchemaByNamespace(self, schema):
"""provide reader with schema document for a targetNamespace.
"""
self._imports[schema.targetNamespace] = schema
def loadFromNode(self, parent, element):
"""element -- DOM node or document
parent -- WSDLAdapter instance
"""
reader = self.__readerClass(element)
schema = XMLSchema(parent)
#HACK to keep a reference
schema.wsdl = parent
schema.setBaseUrl(self.__base_url)
schema.load(reader)
return schema
def loadFromStream(self, file, url=None):
"""Return an XMLSchema instance loaded from a file object.
file -- file object
url -- base location for resolving imports/includes.
"""
reader = self.__readerClass()
reader.loadDocument(file)
schema = XMLSchema()
if url is not None:
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromString(self, data):
"""Return an XMLSchema instance loaded from an XML string.
data -- XML string
"""
return self.loadFromStream(StringIO(data))
def loadFromURL(self, url):
"""Return an XMLSchema instance loaded from the given url.
url -- URL to dereference
"""
reader = self.__readerClass()
if self.__base_url:
url = urllib.basejoin(self.__base_url,url)
reader.loadFromURL(url)
schema = XMLSchema()
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromFile(self, filename):
"""Return an XMLSchema instance loaded from the given file.
filename -- name of file to open
"""
if self.__base_url:
filename = urllib.basejoin(self.__base_url,filename)
file = open(filename, 'rb')
try:
schema = self.loadFromStream(file, filename)
finally:
file.close()
return schema
class SchemaError(Exception):
pass
###########################
# DOM Utility Adapters
##########################
class DOMAdapterInterface:
def hasattr(self, attr, ns=None):
"""return true if node has attribute
attr -- attribute to check for
ns -- namespace of attribute, by default None
"""
raise NotImplementedError, 'adapter method not implemented'
def getContentList(self, *contents):
"""returns an ordered list of child nodes
*contents -- list of node names to return
"""
raise NotImplementedError, 'adapter method not implemented'
def setAttributeDictionary(self, attributes):
"""set attribute dictionary
"""
raise NotImplementedError, 'adapter method not implemented'
def getAttributeDictionary(self):
"""returns a dict of node's attributes
"""
raise NotImplementedError, 'adapter method not implemented'
def getNamespace(self, prefix):
"""returns namespace referenced by prefix.
"""
raise NotImplementedError, 'adapter method not implemented'
def getTagName(self):
"""returns tagName of node
"""
raise NotImplementedError, 'adapter method not implemented'
def getParentNode(self):
"""returns parent element in DOMAdapter or None
"""
raise NotImplementedError, 'adapter method not implemented'
def loadDocument(self, file):
"""load a Document from a file object
file --
"""
raise NotImplementedError, 'adapter method not implemented'
def loadFromURL(self, url):
"""load a Document from an url
url -- URL to dereference
"""
raise NotImplementedError, 'adapter method not implemented'
class DOMAdapter(DOMAdapterInterface):
"""Adapter for ZSI.Utility.DOM
"""
def __init__(self, node=None):
"""Reset all instance variables.
element -- DOM document, node, or None
"""
if hasattr(node, 'documentElement'):
self.__node = node.documentElement
else:
self.__node = node
self.__attributes = None
def hasattr(self, attr, ns=None):
"""attr -- attribute
ns -- optional namespace, None means unprefixed attribute.
"""
if not self.__attributes:
self.setAttributeDictionary()
if ns:
return self.__attributes.get(ns,{}).has_key(attr)
return self.__attributes.has_key(attr)
def getContentList(self, *contents):
nodes = []
ELEMENT_NODE = self.__node.ELEMENT_NODE
for child in DOM.getElements(self.__node, None):
if child.nodeType == ELEMENT_NODE and\
SplitQName(child.tagName)[1] in contents:
nodes.append(child)
return map(self.__class__, nodes)
def setAttributeDictionary(self):
self.__attributes = {}
for v in self.__node._attrs.values():
self.__attributes[v.nodeName] = v.nodeValue
def getAttributeDictionary(self):
if not self.__attributes:
self.setAttributeDictionary()
return self.__attributes
def getTagName(self):
return self.__node.tagName
def getParentNode(self):
if self.__node.parentNode.nodeType == self.__node.ELEMENT_NODE:
return DOMAdapter(self.__node.parentNode)
return None
def getNamespace(self, prefix):
"""prefix -- deference namespace prefix in node's context.
Ascends parent nodes until found.
"""
namespace = None
if prefix == 'xmlns':
namespace = DOM.findDefaultNS(prefix, self.__node)
else:
try:
namespace = DOM.findNamespaceURI(prefix, self.__node)
except DOMException, ex:
if prefix != 'xml':
raise SchemaError, '%s namespace not declared for %s'\
%(prefix, self.__node._get_tagName())
namespace = XMLNS.XML
return namespace
def loadDocument(self, file):
self.__node = DOM.loadDocument(file)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
def loadFromURL(self, url):
self.__node = DOM.loadFromURL(url)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
class XMLBase:
""" These class variables are for string indentation.
"""
tag = None
__indent = 0
__rlock = RLock()
def __str__(self):
XMLBase.__rlock.acquire()
XMLBase.__indent += 1
tmp = "<" + str(self.__class__) + '>\n'
for k,v in self.__dict__.items():
tmp += "%s* %s = %s\n" %(XMLBase.__indent*' ', k, v)
XMLBase.__indent -= 1
XMLBase.__rlock.release()
return tmp
"""Marker Interface: can determine something about an instances properties by using
the provided convenience functions.
"""
class DefinitionMarker:
"""marker for definitions
"""
pass
class DeclarationMarker:
"""marker for declarations
"""
pass
class AttributeMarker:
"""marker for attributes
"""
pass
class AttributeGroupMarker:
"""marker for attribute groups
"""
pass
class WildCardMarker:
"""marker for wildcards
"""
pass
class ElementMarker:
"""marker for wildcards
"""
pass
class ReferenceMarker:
"""marker for references
"""
pass
class ModelGroupMarker:
"""marker for model groups
"""
pass
class AllMarker(ModelGroupMarker):
"""marker for all model group
"""
pass
class ChoiceMarker(ModelGroupMarker):
"""marker for choice model group
"""
pass
class SequenceMarker(ModelGroupMarker):
"""marker for sequence model group
"""
pass
class ExtensionMarker:
"""marker for extensions
"""
pass
class RestrictionMarker:
"""marker for restrictions
"""
facets = ['enumeration', 'length', 'maxExclusive', 'maxInclusive',\
'maxLength', 'minExclusive', 'minInclusive', 'minLength',\
'pattern', 'fractionDigits', 'totalDigits', 'whiteSpace']
class SimpleMarker:
"""marker for simple type information
"""
pass
class ListMarker:
"""marker for simple type list
"""
pass
class UnionMarker:
"""marker for simple type Union
"""
pass
class ComplexMarker:
"""marker for complex type information
"""
pass
class LocalMarker:
"""marker for complex type information
"""
pass
class MarkerInterface:
def isDefinition(self):
return isinstance(self, DefinitionMarker)
def isDeclaration(self):
return isinstance(self, DeclarationMarker)
def isAttribute(self):
return isinstance(self, AttributeMarker)
def isAttributeGroup(self):
return isinstance(self, AttributeGroupMarker)
def isElement(self):
return isinstance(self, ElementMarker)
def isReference(self):
return isinstance(self, ReferenceMarker)
def isWildCard(self):
return isinstance(self, WildCardMarker)
def isModelGroup(self):
return isinstance(self, ModelGroupMarker)
def isAll(self):
return isinstance(self, AllMarker)
def isChoice(self):
return isinstance(self, ChoiceMarker)
def isSequence(self):
return isinstance(self, SequenceMarker)
def isExtension(self):
return isinstance(self, ExtensionMarker)
def isRestriction(self):
return isinstance(self, RestrictionMarker)
def isSimple(self):
return isinstance(self, SimpleMarker)
def isComplex(self):
return isinstance(self, ComplexMarker)
def isLocal(self):
return isinstance(self, LocalMarker)
def isList(self):
return isinstance(self, ListMarker)
def isUnion(self):
return isinstance(self, UnionMarker)
##########################################################
# Schema Components
#########################################################
class XMLSchemaComponent(XMLBase, MarkerInterface):
"""
class variables:
required -- list of required attributes
attributes -- dict of default attribute values, including None.
Value can be a function for runtime dependencies.
contents -- dict of namespace keyed content lists.
'xsd' content of xsd namespace.
xmlns_key -- key for declared xmlns namespace.
xmlns -- xmlns is special prefix for namespace dictionary
xml -- special xml prefix for xml namespace.
"""
required = []
attributes = {}
contents = {}
xmlns_key = ''
xmlns = 'xmlns'
xml = 'xml'
def __init__(self, parent=None):
"""parent -- parent instance
instance variables:
attributes -- dictionary of node's attributes
"""
self.attributes = None
self._parent = parent
if self._parent:
self._parent = weakref.ref(parent)
if not self.__class__ == XMLSchemaComponent\
and not (type(self.__class__.required) == type(XMLSchemaComponent.required)\
and type(self.__class__.attributes) == type(XMLSchemaComponent.attributes)\
and type(self.__class__.contents) == type(XMLSchemaComponent.contents)):
raise RuntimeError, 'Bad type for a class variable in %s' %self.__class__
def getItemTrace(self):
"""Returns a node trace up to the <schema> item.
"""
item, path, name, ref = self, [], 'name', 'ref'
while not isinstance(item,XMLSchema) and not isinstance(item,WSDLToolsAdapter):
attr = item.getAttribute(name)
if attr is None:
attr = item.getAttribute(ref)
if attr is None: path.append('<%s>' %(item.tag))
else: path.append('<%s ref="%s">' %(item.tag, attr))
else:
path.append('<%s name="%s">' %(item.tag,attr))
item = item._parent()
try:
tns = item.getTargetNamespace()
except:
tns = ''
path.append('<%s targetNamespace="%s">' %(item.tag, tns))
path.reverse()
return ''.join(path)
def getTargetNamespace(self):
"""return targetNamespace
"""
parent = self
targetNamespace = 'targetNamespace'
tns = self.attributes.get(targetNamespace)
while not tns:
parent = parent._parent()
tns = parent.attributes.get(targetNamespace)
return tns
def getAttributeDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute('attr_decl', attribute)
def getAttributeGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute('attr_groups', attribute)
def getTypeDefinition(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute('types', attribute)
def getElementDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. element).
collection -- check elements collection in parent Schema instance.
"""
return self.getQNameAttribute('elements', attribute)
def getModelGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. ref).
collection -- check model_group collection in parent Schema instance.
"""
return self.getQNameAttribute('model_groups', attribute)
def getQNameAttribute(self, collection, attribute):
"""returns object instance representing QName --> (namespace,name),
or if does not exist return None.
attribute -- an information item attribute, with a QName value.
collection -- collection in parent Schema instance to search.
"""
obj = None
tdc = self.attributes.get(attribute)
if tdc:
parent = GetSchema(self)
targetNamespace = tdc.getTargetNamespace()
if parent.targetNamespace == targetNamespace:
item = tdc.getName()
try:
obj = getattr(parent, collection)[item]
except KeyError, ex:
raise KeyError, "targetNamespace(%s) collection(%s) has no item(%s)"\
%(targetNamespace, collection, item)
elif parent.imports.has_key(targetNamespace):
schema = parent.imports[targetNamespace].getSchema()
item = tdc.getName()
try:
obj = getattr(schema, collection)[item]
except KeyError, ex:
raise KeyError, "targetNamespace(%s) collection(%s) has no item(%s)"\
%(targetNamespace, collection, item)
return obj
def getXMLNS(self, prefix=None):
"""deference prefix or by default xmlns, returns namespace.
"""
if prefix == XMLSchemaComponent.xml:
return XMLNS.XML
parent = self
ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
while not ns:
parent = parent._parent()
ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
if not ns and isinstance(parent, WSDLToolsAdapter):
raise SchemaError, 'unknown prefix %s' %prefix
return ns
def getAttribute(self, attribute):
"""return requested attribute or None
"""
return self.attributes.get(attribute)
def setAttributes(self, node):
"""Sets up attribute dictionary, checks for required attributes and
sets default attribute values. attr is for default attribute values
determined at runtime.
structure of attributes dictionary
['xmlns'][xmlns_key] -- xmlns namespace
['xmlns'][prefix] -- declared namespace prefix
[namespace][prefix] -- attributes declared in a namespace
[attribute] -- attributes w/o prefix, default namespaces do
not directly apply to attributes, ie Name can't collide
with QName.
"""
self.attributes = {XMLSchemaComponent.xmlns:{}}
for k,v in node.getAttributeDictionary().items():
prefix,value = SplitQName(k)
if value == XMLSchemaComponent.xmlns:
self.attributes[value][prefix or XMLSchemaComponent.xmlns_key] = v
elif prefix:
ns = node.getNamespace(prefix)
if not ns:
raise SchemaError, 'no namespace for attribute prefix %s'\
%prefix
if not self.attributes.has_key(ns):
self.attributes[ns] = {}
elif self.attributes[ns].has_key(value):
raise SchemaError, 'attribute %s declared multiple times in %s'\
%(value, ns)
self.attributes[ns][value] = v
elif not self.attributes.has_key(value):
self.attributes[value] = v
else:
raise SchemaError, 'attribute %s declared multiple times' %value
if not isinstance(self, WSDLToolsAdapter):
self.__checkAttributes()
self.__setAttributeDefaults()
#set QNames
for k in ['type', 'element', 'base', 'ref', 'substitutionGroup', 'itemType']:
if self.attributes.has_key(k):
prefix, value = SplitQName(self.attributes.get(k))
self.attributes[k] = \
TypeDescriptionComponent((self.getXMLNS(prefix), value))
#Union, memberTypes is a whitespace separated list of QNames
for k in ['memberTypes']:
if self.attributes.has_key(k):
qnames = self.attributes[k]
self.attributes[k] = []
for qname in qnames.split():
prefix, value = SplitQName(qname)
self.attributes['memberTypes'].append(\
TypeDescriptionComponent(\
(self.getXMLNS(prefix), value)))
def getContents(self, node):
"""retrieve xsd contents
"""
return node.getContentList(*self.__class__.contents['xsd'])
def __setAttributeDefaults(self):
"""Looks for default values for unset attributes. If
class variable representing attribute is None, then
it must be defined as an instance variable.
"""
for k,v in self.__class__.attributes.items():
if v and not self.attributes.has_key(k):
if isinstance(v, types.FunctionType):
self.attributes[k] = v(self)
else:
self.attributes[k] = v
def __checkAttributes(self):
"""Checks that required attributes have been defined,
attributes w/default cannot be required. Checks
all defined attributes are legal, attribute
references are not subject to this test.
"""
for a in self.__class__.required:
if not self.attributes.has_key(a):
raise SchemaError,\
'class instance %s, missing required attribute %s'\
%(self.__class__, a)
for a in self.attributes.keys():
if (a not in (XMLSchemaComponent.xmlns, XMLNS.XML)) and\
(a not in self.__class__.attributes.keys()) and not\
(self.isAttribute() and self.isReference()):
raise SchemaError, '%s, unknown attribute(%s,%s)' \
%(self.getItemTrace(), a, self.attributes[a])
class WSDLToolsAdapter(XMLSchemaComponent):
"""WSDL Adapter to grab the attributes from the wsdl document node.
"""
attributes = {'name':None, 'targetNamespace':None}
tag = 'definitions'
def __init__(self, wsdl):
XMLSchemaComponent.__init__(self, parent=wsdl)
self.setAttributes(DOMAdapter(wsdl.document))
def getImportSchemas(self):
"""returns WSDLTools.WSDL types Collection
"""
return self._parent().types
class Notation(XMLSchemaComponent):
"""<notation>
parent:
schema
attributes:
id -- ID
name -- NCName, Required
public -- token, Required
system -- anyURI
contents:
annotation?
"""
required = ['name', 'public']
attributes = {'id':None, 'name':None, 'public':None, 'system':None}
contents = {'xsd':('annotation')}
tag = 'notation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Annotation(XMLSchemaComponent):
"""<annotation>
parent:
all,any,anyAttribute,attribute,attributeGroup,choice,complexContent,
complexType,element,extension,field,group,import,include,key,keyref,
list,notation,redefine,restriction,schema,selector,simpleContent,
simpleType,union,unique
attributes:
id -- ID
contents:
(documentation | appinfo)*
"""
attributes = {'id':None}
contents = {'xsd':('documentation', 'appinfo')}
tag = 'annotation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'documentation':
#print_debug('class %s, documentation skipped' %self.__class__, 5)
continue
elif component == 'appinfo':
#print_debug('class %s, appinfo skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Documentation(XMLSchemaComponent):
"""<documentation>
parent:
annotation
attributes:
source, anyURI
xml:lang, language
contents:
mixed, any
"""
attributes = {'source':None, 'xml:lang':None}
contents = {'xsd':('mixed', 'any')}
tag = 'documentation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Appinfo(XMLSchemaComponent):
"""<appinfo>
parent:
annotation
attributes:
source, anyURI
contents:
mixed, any
"""
attributes = {'source':None, 'anyURI':None}
contents = {'xsd':('mixed', 'any')}
tag = 'appinfo'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class XMLSchemaFake:
# This is temporary, for the benefit of WSDL until the real thing works.
def __init__(self, element):
self.targetNamespace = DOM.getAttr(element, 'targetNamespace')
self.element = element
class XMLSchema(XMLSchemaComponent):
"""A schema is a collection of schema components derived from one
or more schema documents, that is, one or more <schema> element
information items. It represents the abstract notion of a schema
rather than a single schema document (or other representation).
<schema>
parent:
ROOT
attributes:
id -- ID
version -- token
xml:lang -- language
targetNamespace -- anyURI
attributeFormDefault -- 'qualified' | 'unqualified', 'unqualified'
elementFormDefault -- 'qualified' | 'unqualified', 'unqualified'
blockDefault -- '#all' | list of
('substitution | 'extension' | 'restriction')
finalDefault -- '#all' | list of
('extension' | 'restriction' | 'list' | 'union')
contents:
((include | import | redefine | annotation)*,
(attribute, attributeGroup, complexType, element, group,
notation, simpleType)*, annotation*)*
attributes -- schema attributes
imports -- import statements
includes -- include statements
redefines --
types -- global simpleType, complexType definitions
elements -- global element declarations
attr_decl -- global attribute declarations
attr_groups -- attribute Groups
model_groups -- model Groups
notations -- global notations
"""
attributes = {'id':None,
'version':None,
'xml:lang':None,
'targetNamespace':None,
'attributeFormDefault':'unqualified',
'elementFormDefault':'unqualified',
'blockDefault':None,
'finalDefault':None}
contents = {'xsd':('include', 'import', 'redefine', 'annotation', 'attribute',\
'attributeGroup', 'complexType', 'element', 'group',\
'notation', 'simpleType', 'annotation')}
empty_namespace = ''
tag = 'schema'
def __init__(self, parent=None):
"""parent --
instance variables:
targetNamespace -- schema's declared targetNamespace, or empty string.
_imported_schemas -- namespace keyed dict of schema dependencies, if
a schema is provided instance will not resolve import statement.
_included_schemas -- schemaLocation keyed dict of component schemas,
if schema is provided instance will not resolve include statement.
_base_url -- needed for relative URLs support, only works with URLs
relative to initial document.
includes -- collection of include statements
imports -- collection of import statements
elements -- collection of global element declarations
types -- collection of global type definitions
attr_decl -- collection of global attribute declarations
attr_groups -- collection of global attribute group definitions
model_groups -- collection of model group definitions
notations -- collection of notations
"""
self.targetNamespace = None
XMLSchemaComponent.__init__(self, parent)
f = lambda k: k.attributes['name']
ns = lambda k: k.attributes['namespace']
sl = lambda k: k.attributes['schemaLocation']
self.includes = Collection(self, key=sl)
self.imports = Collection(self, key=ns)
self.elements = Collection(self, key=f)
self.types = Collection(self, key=f)
self.attr_decl = Collection(self, key=f)
self.attr_groups = Collection(self, key=f)
self.model_groups = Collection(self, key=f)
self.notations = Collection(self, key=f)
self._imported_schemas = {}
self._included_schemas = {}
self._base_url = None
def addImportSchema(self, schema):
"""for resolving import statements in Schema instance
schema -- schema instance
_imported_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if schema.targetNamespace != self.targetNamespace:
self._imported_schemas[schema.targetNamespace] = schema
else:
raise SchemaError, 'import schema bad targetNamespace'
def addIncludeSchema(self, schemaLocation, schema):
"""for resolving include statements in Schema instance
schemaLocation -- schema location
schema -- schema instance
_included_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if not schema.targetNamespace or\
schema.targetNamespace == self.targetNamespace:
self._included_schemas[schemaLocation] = schema
else:
raise SchemaError, 'include schema bad targetNamespace'
def setImportSchemas(self, schema_dict):
"""set the import schema dictionary, which is used to
reference depedent schemas.
"""
self._imported_schemas = schema_dict
def getImportSchemas(self):
"""get the import schema dictionary, which is used to
reference depedent schemas.
"""
return self._imported_schemas
def getSchemaNamespacesToImport(self):
"""returns tuple of namespaces the schema instance has declared
itself to be depedent upon.
"""
return tuple(self.includes.keys())
def setIncludeSchemas(self, schema_dict):
"""set the include schema dictionary, which is keyed with
schemaLocation (uri).
This is a means of providing
schemas to the current schema for content inclusion.
"""
self._included_schemas = schema_dict
def getIncludeSchemas(self):
"""get the include schema dictionary, which is keyed with
schemaLocation (uri).
"""
return self._included_schemas
def getBaseUrl(self):
"""get base url, used for normalizing all relative uri's
"""
return self._base_url
def setBaseUrl(self, url):
"""set base url, used for normalizing all relative uri's
"""
self._base_url = url
def getElementFormDefault(self):
"""return elementFormDefault attribute
"""
return self.attributes.get('elementFormDefault')
def isElementFormDefaultQualified(self):
return self.attributes.get('elementFormDefault') == 'qualified'
def getAttributeFormDefault(self):
"""return attributeFormDefault attribute
"""
return self.attributes.get('attributeFormDefault')
def getBlockDefault(self):
"""return blockDefault attribute
"""
return self.attributes.get('blockDefault')
def getFinalDefault(self):
"""return finalDefault attribute
"""
return self.attributes.get('finalDefault')
def load(self, node):
pnode = node.getParentNode()
if pnode:
pname = SplitQName(pnode.getTagName())[1]
if pname == 'types':
attributes = {}
self.setAttributes(pnode)
attributes.update(self.attributes)
self.setAttributes(node)
for k,v in attributes['xmlns'].items():
if not self.attributes['xmlns'].has_key(k):
self.attributes['xmlns'][k] = v
else:
self.setAttributes(node)
else:
self.setAttributes(node)
self.targetNamespace = self.getTargetNamespace()
contents = self.getContents(node)
indx = 0
num = len(contents)
while indx < num:
while indx < num:
node = contents[indx]
component = SplitQName(node.getTagName())[1]
if component == 'include':
tp = self.__class__.Include(self)
tp.fromDom(node)
self.includes[tp.attributes['schemaLocation']] = tp
schema = tp.getSchema()
if schema.targetNamespace and \
schema.targetNamespace != self.targetNamespace:
raise SchemaError, 'included schema bad targetNamespace'
for collection in ['imports','elements','types',\
'attr_decl','attr_groups','model_groups','notations']:
for k,v in getattr(schema,collection).items():
if not getattr(self,collection).has_key(k):
v._parent = weakref.ref(self)
getattr(self,collection)[k] = v
elif component == 'import':
tp = self.__class__.Import(self)
tp.fromDom(node)
import_ns = tp.getAttribute('namespace')
if import_ns:
if import_ns == self.targetNamespace:
raise SchemaError,\
'import and schema have same targetNamespace'
self.imports[import_ns] = tp
else:
self.imports[self.__class__.empty_namespace] = tp
if not self.getImportSchemas().has_key(import_ns) and\
tp.getAttribute('schemaLocation'):
self.addImportSchema(tp.getSchema())
elif component == 'redefine':
#print_debug('class %s, redefine skipped' %self.__class__, 5)
pass
elif component == 'annotation':
#print_debug('class %s, annotation skipped' %self.__class__, 5)
pass
else:
break
indx += 1
# (attribute, attributeGroup, complexType, element, group,
# notation, simpleType)*, annotation*)*
while indx < num:
node = contents[indx]
component = SplitQName(node.getTagName())[1]
if component == 'attribute':
tp = AttributeDeclaration(self)
tp.fromDom(node)
self.attr_decl[tp.getAttribute('name')] = tp
elif component == 'attributeGroup':
tp = AttributeGroupDefinition(self)
tp.fromDom(node)
self.attr_groups[tp.getAttribute('name')] = tp
elif component == 'complexType':
tp = ComplexType(self)
tp.fromDom(node)
self.types[tp.getAttribute('name')] = tp
elif component == 'element':
tp = ElementDeclaration(self)
tp.fromDom(node)
self.elements[tp.getAttribute('name')] = tp
elif component == 'group':
tp = ModelGroupDefinition(self)
tp.fromDom(node)
self.model_groups[tp.getAttribute('name')] = tp
elif component == 'notation':
tp = Notation(self)
tp.fromDom(node)
self.notations[tp.getAttribute('name')] = tp
elif component == 'simpleType':
tp = SimpleType(self)
tp.fromDom(node)
self.types[tp.getAttribute('name')] = tp
else:
break
indx += 1
while indx < num:
node = contents[indx]
component = SplitQName(node.getTagName())[1]
if component == 'annotation':
#print_debug('class %s, annotation 2 skipped' %self.__class__, 5)
pass
else:
break
indx += 1
class Import(XMLSchemaComponent):
"""<import>
parent:
schema
attributes:
id -- ID
namespace -- anyURI
schemaLocation -- anyURI
contents:
annotation?
"""
attributes = {'id':None,
'namespace':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'import'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
if self.attributes['namespace'] == self.getTargetNamespace():
raise SchemaError, 'namespace of schema and import match'
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance, and keep a hard reference.
"""
if not self._schema:
ns = self.attributes['namespace']
schema = self._parent().getImportSchemas().get(ns)
if not schema and self._parent()._parent:
schema = self._parent()._parent().getImportSchemas().get(ns)
if not schema:
url = self.attributes.get('schemaLocation')
if not url:
raise SchemaError, 'namespace(%s) is unknown' %ns
base_url = self._parent().getBaseUrl()
reader = SchemaReader(base_url=base_url)
reader._imports = self._parent().getImportSchemas()
reader._includes = self._parent().getIncludeSchemas()
self._schema = reader.loadFromURL(url)
return self._schema or schema
class Include(XMLSchemaComponent):
"""<include schemaLocation>
parent:
schema
attributes:
id -- ID
schemaLocation -- anyURI, required
contents:
annotation?
"""
required = ['schemaLocation']
attributes = {'id':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'include'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance.
"""
if not self._schema:
schema = self._parent()
self._schema = schema.getIncludeSchemas().get(\
self.attributes['schemaLocation']
)
if not self._schema:
url = self.attributes['schemaLocation']
reader = SchemaReader(base_url=schema.getBaseUrl())
reader._imports = schema.getImportSchemas()
reader._includes = schema.getIncludeSchemas()
self._schema = reader.loadFromURL(url)
return self._schema
class AttributeDeclaration(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker):
"""<attribute name>
parent:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
""" No list or union support
"""
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class LocalAttributeDeclaration(AttributeDeclaration,\
AttributeMarker,\
LocalMarker,\
DeclarationMarker):
"""<attribute name>
parent:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
name -- NCName, required
type -- QName
form -- ('qualified' | 'unqualified'), schema.attributeFormDefault
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'form':lambda self: GetSchema(self).getAttributeFormDefault(),
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
def __init__(self, parent):
AttributeDeclaration.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeWildCard(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker,\
WildCardMarker):
"""<anyAttribute>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
attributes = {'id':None,
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'anyAttribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeReference(XMLSchemaComponent,\
AttributeMarker,\
ReferenceMarker):
"""<attribute ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeDeclaration(self, attribute='ref'):
return XMLSchemaComponent.getAttributeDeclaration(self, attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeGroupDefinition(XMLSchemaComponent,\
AttributeGroupMarker,\
DefinitionMarker):
"""<attributeGroup name>
parents:
schema, redefine
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'attribute', 'attributeGroup', 'anyAttribute']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif component == 'attribute':
if contents[indx].hasattr('name'):
content.append(LocalAttributeDeclaration(self))
elif contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
raise SchemaError, 'Unknown attribute type'
content[-1].fromDom(contents[indx])
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
content[-1].fromDom(contents[indx])
elif component == 'anyAttribute':
if len(contents) != indx+1:
raise SchemaError, 'anyAttribute is out of order in %s' %self.getItemTrace()
content.append(AttributeWildCard(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content = tuple(content)
class AttributeGroupReference(XMLSchemaComponent,\
AttributeGroupMarker,\
ReferenceMarker):
"""<attributeGroup ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None}
contents = {'xsd':['annotation']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeGroup(self, attribute='ref'):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return XMLSchemaComponent.getQNameAttribute(self, 'attr_groups', attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Elements
#####################################################
class IdentityConstrants(XMLSchemaComponent):
"""Allow one to uniquely identify nodes in a document and ensure the
integrity of references between them.
attributes -- dictionary of attributes
selector -- XPath to selected nodes
fields -- list of XPath to key field
"""
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.selector = None
self.fields = None
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
fields = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'selector':
self.selector = self.Selector(self)
self.selector.fromDom(i)
continue
elif component == 'field':
fields.append(self.Field(self))
fields[-1].fromDom(i)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.fields = tuple(fields)
class Constraint(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Selector(Constraint):
"""<selector xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'selector'
class Field(Constraint):
"""<field xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'field'
class Unique(IdentityConstrants):
"""<unique name> Enforce fields are unique w/i a specified scope.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'unique'
class Key(IdentityConstrants):
"""<key name> Enforce fields are unique w/i a specified scope, and all
field values are present w/i document. Fields cannot
be nillable.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'key'
class KeyRef(IdentityConstrants):
"""<keyref name refer> Ensure a match between two sets of values in an
instance.
parent:
element
attributes:
id -- ID
name -- NCName, required
refer -- QName, required
contents:
annotation?, selector, field+
"""
required = ['name', 'refer']
attributes = {'id':None,
'name':None,
'refer':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'keyref'
class ElementDeclaration(XMLSchemaComponent,\
ElementMarker,\
DeclarationMarker):
"""<element name>
parents:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
nillable -- boolean, false
abstract -- boolean, false
substitutionGroup -- QName
block -- ('#all' | ('substition' | 'extension' | 'restriction')*),
schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*),
schema.finalDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'substitutionGroup':None,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.constraints = ()
def isQualified(self):
'''Global elements are always qualified.
'''
return True
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute=None):
'''If attribute is None, "type" is assumed, return the corresponding
representation of the global type definition (TypeDefinition),
or the local definition if don't find "type". To maintain backwards
compat, if attribute is provided call base class method.
'''
if attribute:
return XMLSchemaComponent.getTypeDefinition(self, attribute)
gt = XMLSchemaComponent.getTypeDefinition(self, 'type')
if gt:
return gt
return self.content
def getConstraints(self):
return self._constraints
def setConstraints(self, constraints):
self._constraints = tuple(constraints)
constraints = property(getConstraints, setConstraints, None, "tuple of key, keyref, unique constraints")
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
constraints = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType' and not self.content:
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
elif component == 'complexType' and not self.content:
self.content = LocalComplexType(self)
self.content.fromDom(i)
elif component == 'key':
constraints.append(Key(self))
constraints[-1].fromDom(i)
elif component == 'keyref':
constraints.append(KeyRef(self))
constraints[-1].fromDom(i)
elif component == 'unique':
constraints.append(Unique(self))
constraints[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.constraints = constraints
class LocalElementDeclaration(ElementDeclaration,\
LocalMarker):
"""<element>
parents:
all, choice, sequence
attributes:
id -- ID
name -- NCName, required
form -- ('qualified' | 'unqualified'), schema.elementFormDefault
type -- QName
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
default -- string
fixed -- string
nillable -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'form':lambda self: GetSchema(self).getElementFormDefault(),
'type':None,
'minOccurs':'1',
'maxOccurs':'1',
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'block':lambda self: GetSchema(self).getBlockDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
def isQualified(self):
'''Local elements can be qualified or unqualifed according
to the attribute form, or the elementFormDefault. By default
local elements are unqualified.
'''
form = self.getAttribute('form')
if form == 'qualified':
return True
if form == 'unqualified':
return False
raise SchemaError, 'Bad form (%s) for element: %s' %(form, self.getItemTrace())
class ElementReference(XMLSchemaComponent,\
ElementMarker,\
ReferenceMarker):
"""<element ref>
parents:
all, choice, sequence
attributes:
id -- ID
ref -- QName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getElementDeclaration(self, attribute=None):
'''If attribute is None, "ref" is assumed, return the corresponding
representation of the global element declaration (ElementDeclaration),
To maintain backwards compat, if attribute is provided call base class method.
'''
if attribute:
return XMLSchemaComponent.getElementDeclaration(self, attribute)
return XMLSchemaComponent.getElementDeclaration(self, 'ref')
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ElementWildCard(LocalElementDeclaration,\
WildCardMarker):
"""<any>
parents:
choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
required = []
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1',
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'any'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def isQualified(self):
'''Global elements are always qualified, but if processContents
are not strict could have dynamically generated local elements.
'''
return GetSchema(self).isElementFormDefaultQualified()
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Model Groups
#####################################################
class Sequence(XMLSchemaComponent,\
SequenceMarker):
"""<sequence>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'sequence'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class All(XMLSchemaComponent,\
AllMarker):
"""<all>
parents:
complexType, extension, restriction, group
attributes:
id -- ID
minOccurs -- '0' | '1', 1
maxOccurs -- '1', 1
contents:
annotation?, element*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element']}
tag = 'all'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Choice(XMLSchemaComponent,\
ChoiceMarker):
"""<choice>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'choice'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class ModelGroupDefinition(XMLSchemaComponent,\
ModelGroupMarker,\
DefinitionMarker):
"""<group name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (all | choice | sequence)?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'all', 'choice', 'sequence']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'all' and not self.content:
self.content = All(self)
elif component == 'choice' and not self.content:
self.content = Choice(self)
elif component == 'sequence' and not self.content:
self.content = Sequence(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ModelGroupReference(XMLSchemaComponent,\
ModelGroupMarker,\
ReferenceMarker):
"""<group ref>
parents:
choice, complexType, extension, restriction, sequence
attributes:
id -- ID
ref -- NCName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getModelGroupReference(self):
return self.getModelGroup('ref')
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ComplexType(XMLSchemaComponent,\
DefinitionMarker,\
ComplexMarker):
"""<complexType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
mixed -- boolean, false
abstract -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*), schema.finalDefault
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = ['name']
attributes = {'id':None,
'name':None,
'mixed':0,
'abstract':0,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleContent', 'complexContent',\
'group', 'all', 'choice', 'sequence', 'attribute', 'attributeGroup',\
'anyAttribute', 'any']}
tag = 'complexType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
#XXX ugly
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
self.content = None
if component == 'simpleContent':
self.content = self.__class__.SimpleContent(self)
self.content.fromDom(contents[indx])
elif component == 'complexContent':
self.content = self.__class__.ComplexContent(self)
self.content.fromDom(contents[indx])
else:
if component == 'all':
self.content = All(self)
elif component == 'choice':
self.content = Choice(self)
elif component == 'sequence':
self.content = Sequence(self)
elif component == 'group':
self.content = ModelGroupReference(self)
if self.content:
self.content.fromDom(contents[indx])
indx += 1
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
self.attr_content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s): %s' \
%(contents[indx].getTagName(),self.getItemTrace())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class _DerivedType(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.derivation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'restriction' and not self.derivation:
self.derivation = self.__class__.Restriction(self)
elif component == 'extension' and not self.derivation:
self.derivation = self.__class__.Extension(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.derivation.fromDom(i)
class ComplexContent(_DerivedType,\
ComplexMarker):
"""<complexContent>
parents:
complexType
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None,
'mixed':0 }
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'complexContent'
class _DerivationBase(XMLSchemaComponent):
"""<extension>,<restriction>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'group', 'all', 'choice',\
'sequence', 'attribute', 'attributeGroup', 'anyAttribute']}
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
#XXX ugly
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
if component == 'all':
self.content = All(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'choice':
self.content = Choice(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'sequence':
self.content = Sequence(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'group':
self.content = ModelGroupReference(self)
self.content.fromDom(contents[indx])
indx += 1
else:
self.content = None
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeGroupReference(self))
else:
self.attr_content.append(AttributeGroupDefinition(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class Extension(_DerivationBase,
ExtensionMarker):
"""<extension base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'extension'
class Restriction(_DerivationBase,\
RestrictionMarker):
"""<restriction base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'restriction'
class SimpleContent(_DerivedType,\
SimpleMarker):
"""<simpleContent>
parents:
complexType
attributes:
id -- ID
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None}
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'simpleContent'
class Extension(XMLSchemaComponent,\
ExtensionMarker):
"""<extension base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'attribute', 'attributeGroup',
'anyAttribute']}
tag = 'extension'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
if num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*, (attribute | attributeGroup)*,
anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType', 'attribute',\
'attributeGroup', 'anyAttribute'] + RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.content = []
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
elif component == 'simpleType':
self.content.append(LocalSimpleType(self))
self.content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class LocalComplexType(ComplexType,\
LocalMarker):
"""<complexType>
parents:
element
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = []
attributes = {'id':None,
'mixed':0}
tag = 'complexType'
class SimpleType(XMLSchemaComponent,\
DefinitionMarker,\
SimpleMarker):
"""<simpleType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
final -- ('#all' | ('extension' | 'restriction' | 'list' | 'union')*),
schema.finalDefault
contents:
annotation?, (restriction | list | union)
"""
required = ['name']
attributes = {'id':None,
'name':None,
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'restriction', 'list', 'union']}
tag = 'simpleType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for child in contents:
component = SplitQName(child.getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(child)
continue
break
else:
return
if component == 'restriction':
self.content = self.__class__.Restriction(self)
elif component == 'list':
self.content = self.__class__.List(self)
elif component == 'union':
self.content = self.__class__.Union(self)
else:
raise SchemaError, 'Unknown component (%s)' %(component)
self.content.fromDom(child)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleType
attributes:
id -- ID
base -- QName, required or simpleType child
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*
"""
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType']+RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getSimpleTypeContent(self):
for el in self.content:
if el.isSimple(): return el
return None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
continue
elif (component == 'simpleType') and (not indx or indx == 1):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
elif component in RestrictionMarker.facets:
#print_debug('%s class instance, skipping %s' %(self.__class__, component))
pass
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Union(XMLSchemaComponent,
UnionMarker):
"""<union>
parents:
simpleType
attributes:
id -- ID
memberTypes -- list of QNames, required or simpleType child.
contents:
annotation?, simpleType*
"""
attributes = {'id':None,
'memberTypes':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'union'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class List(XMLSchemaComponent,
ListMarker):
"""<list>
parents:
simpleType
attributes:
id -- ID
itemType -- QName, required or simpleType child.
contents:
annotation?, simpleType?
"""
attributes = {'id':None,
'itemType':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'list'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getItemType(self):
return self.attributes.get('itemType')
def getTypeDefinition(self, attribute='itemType'):
'''return the type refered to by itemType attribute or
the simpleType content. If returns None, then the
type refered to by itemType is primitive.
'''
tp = XMLSchemaComponent.getTypeDefinition(self, attribute)
return tp or self.content
def fromDom(self, node):
self.annotation = None
self.content = None
self.setAttributes(node)
contents = self.getContents(node)
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
self.content = AnonymousSimpleType(self)
self.content.fromDom(contents[indx])
break
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AnonymousSimpleType(SimpleType,\
SimpleMarker):
"""<simpleType>
parents:
attribute, element, list, restriction, union
attributes:
id -- ID
contents:
annotation?, (restriction | list | union)
"""
required = []
attributes = {'id':None}
tag = 'simpleType'
class Redefine:
"""<redefine>
parents:
attributes:
contents:
"""
tag = 'redefine'
###########################
###########################
if sys.version_info[:2] >= (2, 2):
tupleClass = tuple
else:
import UserTuple
tupleClass = UserTuple.UserTuple
class TypeDescriptionComponent(tupleClass):
"""Tuple of length 2, consisting of
a namespace and unprefixed name.
"""
def __init__(self, args):
"""args -- (namespace, name)
Remove the name's prefix, irrelevant.
"""
if len(args) != 2:
raise TypeError, 'expecting tuple (namespace, name), got %s' %args
elif args[1].find(':') >= 0:
args = (args[0], SplitQName(args[1])[1])
tuple.__init__(self, args)
return
def getTargetNamespace(self):
return self[0]
def getName(self):
return self[1]
| bsd-3-clause |
mwmuni/LIGGGHTS_GUI | OpenGL/GL/SGIS/texture_filter4.py | 9 | 1413 | '''OpenGL extension SGIS.texture_filter4
This module customises the behaviour of the
OpenGL.raw.GL.SGIS.texture_filter4 to provide a more
Python-friendly API
Overview (from the spec)
This extension allows 1D and 2D textures to be filtered using an
application-defined, four sample per dimension filter. (In addition to
the NEAREST and LINEAR filters defined in the original GL Specification.)
Such filtering results in higher image quality. It is defined only
for non-mipmapped filters. The filter that is specified must be
symmetric and separable (in the 2D case).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SGIS/texture_filter4.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIS.texture_filter4 import *
from OpenGL.raw.GL.SGIS.texture_filter4 import _EXTENSION_NAME
def glInitTextureFilter4SGIS():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# OUTPUT glGetTexFilterFuncSGIS.weights COMPSIZE(target,filter)
# INPUT glTexFilterFuncSGIS.weights size not checked against n
glTexFilterFuncSGIS=wrapper.wrapper(glTexFilterFuncSGIS).setInputArraySize(
'weights', None
)
### END AUTOGENERATED SECTION | gpl-3.0 |
vFense/vFenseAgent-nix | agent/deps/rpm/Python-2.7.5/lib/python2.7/ctypes/test/test_numbers.py | 35 | 9202 | from ctypes import *
import unittest
import struct
def valid_ranges(*types):
# given a sequence of numeric types, collect their _type_
# attribute, which is a single format character compatible with
# the struct module, use the struct module to calculate the
# minimum and maximum value allowed for this format.
# Returns a list of (min, max) values.
result = []
for t in types:
fmt = t._type_
size = struct.calcsize(fmt)
a = struct.unpack(fmt, ("\x00"*32)[:size])[0]
b = struct.unpack(fmt, ("\xFF"*32)[:size])[0]
c = struct.unpack(fmt, ("\x7F"+"\x00"*32)[:size])[0]
d = struct.unpack(fmt, ("\x80"+"\xFF"*32)[:size])[0]
result.append((min(a, b, c, d), max(a, b, c, d)))
return result
ArgType = type(byref(c_int(0)))
unsigned_types = [c_ubyte, c_ushort, c_uint, c_ulong]
signed_types = [c_byte, c_short, c_int, c_long, c_longlong]
bool_types = []
float_types = [c_double, c_float]
try:
c_ulonglong
c_longlong
except NameError:
pass
else:
unsigned_types.append(c_ulonglong)
signed_types.append(c_longlong)
try:
c_bool
except NameError:
pass
else:
bool_types.append(c_bool)
unsigned_ranges = valid_ranges(*unsigned_types)
signed_ranges = valid_ranges(*signed_types)
bool_values = [True, False, 0, 1, -1, 5000, 'test', [], [1]]
################################################################
class NumberTestCase(unittest.TestCase):
def test_default_init(self):
# default values are set to zero
for t in signed_types + unsigned_types + float_types:
self.assertEqual(t().value, 0)
def test_unsigned_values(self):
# the value given to the constructor is available
# as the 'value' attribute
for t, (l, h) in zip(unsigned_types, unsigned_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_signed_values(self):
# see above
for t, (l, h) in zip(signed_types, signed_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_bool_values(self):
from operator import truth
for t, v in zip(bool_types, bool_values):
self.assertEqual(t(v).value, truth(v))
def test_typeerror(self):
# Only numbers are allowed in the contructor,
# otherwise TypeError is raised
for t in signed_types + unsigned_types + float_types:
self.assertRaises(TypeError, t, "")
self.assertRaises(TypeError, t, None)
## def test_valid_ranges(self):
## # invalid values of the correct type
## # raise ValueError (not OverflowError)
## for t, (l, h) in zip(unsigned_types, unsigned_ranges):
## self.assertRaises(ValueError, t, l-1)
## self.assertRaises(ValueError, t, h+1)
def test_from_param(self):
# the from_param class method attribute always
# returns PyCArgObject instances
for t in signed_types + unsigned_types + float_types:
self.assertEqual(ArgType, type(t.from_param(0)))
def test_byref(self):
# calling byref returns also a PyCArgObject instance
for t in signed_types + unsigned_types + float_types + bool_types:
parm = byref(t())
self.assertEqual(ArgType, type(parm))
def test_floats(self):
# c_float and c_double can be created from
# Python int, long and float
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
for t in float_types:
self.assertEqual(t(2.0).value, 2.0)
self.assertEqual(t(2).value, 2.0)
self.assertEqual(t(2L).value, 2.0)
self.assertEqual(t(f).value, 2.0)
def test_integers(self):
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
class IntLike(object):
def __int__(self):
return 2
i = IntLike()
# integers cannot be constructed from floats,
# but from integer-like objects
for t in signed_types + unsigned_types:
self.assertRaises(TypeError, t, 3.14)
self.assertRaises(TypeError, t, f)
self.assertEqual(t(i).value, 2)
def test_sizes(self):
for t in signed_types + unsigned_types + float_types + bool_types:
try:
size = struct.calcsize(t._type_)
except struct.error:
continue
# sizeof of the type...
self.assertEqual(sizeof(t), size)
# and sizeof of an instance
self.assertEqual(sizeof(t()), size)
def test_alignments(self):
for t in signed_types + unsigned_types + float_types:
code = t._type_ # the typecode
align = struct.calcsize("c%c" % code) - struct.calcsize(code)
# alignment of the type...
self.assertEqual((code, alignment(t)),
(code, align))
# and alignment of an instance
self.assertEqual((code, alignment(t())),
(code, align))
def test_int_from_address(self):
from array import array
for t in signed_types + unsigned_types:
# the array module doesn't support all format codes
# (no 'q' or 'Q')
try:
array(t._type_)
except ValueError:
continue
a = array(t._type_, [100])
# v now is an integer at an 'external' memory location
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertEqual(type(v), t)
# changing the value at the memory location changes v's value also
a[0] = 42
self.assertEqual(v.value, a[0])
def test_float_from_address(self):
from array import array
for t in float_types:
a = array(t._type_, [3.14])
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertTrue(type(v) is t)
a[0] = 2.3456e17
self.assertEqual(v.value, a[0])
self.assertTrue(type(v) is t)
def test_char_from_address(self):
from ctypes import c_char
from array import array
a = array('c', 'x')
v = c_char.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertTrue(type(v) is c_char)
a[0] = '?'
self.assertEqual(v.value, a[0])
# array does not support c_bool / 't'
# def test_bool_from_address(self):
# from ctypes import c_bool
# from array import array
# a = array(c_bool._type_, [True])
# v = t.from_address(a.buffer_info()[0])
# self.assertEqual(v.value, a[0])
# self.assertEqual(type(v) is t)
# a[0] = False
# self.assertEqual(v.value, a[0])
# self.assertEqual(type(v) is t)
def test_init(self):
# c_int() can be initialized from Python's int, and c_int.
# Not from c_long or so, which seems strange, abd should
# probably be changed:
self.assertRaises(TypeError, c_int, c_long(42))
def test_float_overflow(self):
import sys
big_int = int(sys.float_info.max) * 2
for t in float_types + [c_longdouble]:
self.assertRaises(OverflowError, t, big_int)
if (hasattr(t, "__ctype_be__")):
self.assertRaises(OverflowError, t.__ctype_be__, big_int)
if (hasattr(t, "__ctype_le__")):
self.assertRaises(OverflowError, t.__ctype_le__, big_int)
## def test_perf(self):
## check_perf()
from ctypes import _SimpleCData
class c_int_S(_SimpleCData):
_type_ = "i"
__slots__ = []
def run_test(rep, msg, func, arg=None):
## items = [None] * rep
items = range(rep)
from time import clock
if arg is not None:
start = clock()
for i in items:
func(arg); func(arg); func(arg); func(arg); func(arg)
stop = clock()
else:
start = clock()
for i in items:
func(); func(); func(); func(); func()
stop = clock()
print "%15s: %.2f us" % (msg, ((stop-start)*1e6/5/rep))
def check_perf():
# Construct 5 objects
from ctypes import c_int
REP = 200000
run_test(REP, "int()", int)
run_test(REP, "int(999)", int)
run_test(REP, "c_int()", c_int)
run_test(REP, "c_int(999)", c_int)
run_test(REP, "c_int_S()", c_int_S)
run_test(REP, "c_int_S(999)", c_int_S)
# Python 2.3 -OO, win2k, P4 700 MHz:
#
# int(): 0.87 us
# int(999): 0.87 us
# c_int(): 3.35 us
# c_int(999): 3.34 us
# c_int_S(): 3.23 us
# c_int_S(999): 3.24 us
# Python 2.2 -OO, win2k, P4 700 MHz:
#
# int(): 0.89 us
# int(999): 0.89 us
# c_int(): 9.99 us
# c_int(999): 10.02 us
# c_int_S(): 9.87 us
# c_int_S(999): 9.85 us
if __name__ == '__main__':
## check_perf()
unittest.main()
| lgpl-3.0 |
gkbrk/WatchPeopleCode | wpc/utils.py | 2 | 2026 | from urlparse import urlparse, parse_qs
import requests
import re
# this functions is originally FROM: http://stackoverflow.com/questions/4356538/how-can-i-extract-video-id-from-youtubes-link-in-python
def youtube_video_id(url):
"""
Examples:
- http://youtu.be/SA2iWivDJiE
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
- http://www.youtube.com/embed/SA2iWivDJiE
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
"""
query = urlparse(url)
if query.hostname == 'youtu.be':
return query.path[1:]
if query.hostname in ('www.youtube.com', 'youtube.com'):
if query.path == '/watch':
p = parse_qs(query.query)
return p['v'][0]
if query.path[:7] == '/embed/':
return query.path.split('/')[2]
if query.path[:3] == '/v/':
return query.path.split('/')[2]
# fail?
return None
def twitch_channel(url):
query = urlparse(url)
path_elements = query.path.strip('/').split('/')
if len(path_elements) == 1:
channel = path_elements[0] if re.match(r'([\w-]+\.)?twitch\.tv', query.hostname) else None
return channel if channel else None
else:
return None
def wpc_channel(url):
query = urlparse(url)
path_elements = query.path.strip('/').split('/')
if query.hostname in ('www.watchpeoplecode.com', 'watchpeoplecode.com') and len(path_elements) == 2 and path_elements[0] == 'streamer':
return path_elements[1]
def requests_get_with_retries(url, retries_num=5):
# Use a `Session` instance to customize how `requests` handles making HTTP requests.
session = requests.Session()
# `mount` a custom adapter that retries failed connections for HTTP and HTTPS requests.
session.mount("http://", requests.adapters.HTTPAdapter(max_retries=retries_num))
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries_num))
# Rejoice with new fault tolerant behaviour!
return session.get(url=url)
| mit |
galactics/space-api | beyond/utils/ltan.py | 2 | 2659 | """Utilities to compute the Local Time at Ascending Node (LTAN)
Both True and Mean LTAN are available, the difference between them being
the `equation of time <https://en.wikipedia.org/wiki/Equation_of_time>`__
"""
import numpy as np
from ..env.solarsystem import get_body
from ..frames.iau1980 import _sideral
def orb2ltan(orb, type="mean"):
"""Compute the Local Time at Ascending Node (LTAN) for a given orbit
Args:
orb (Orbit): Orbit
type (str) : either "mean" or "true"
Return
float : LTAN in hours
"""
# if type == "old_mean":
# # This gives MLTAN only if orb is at Ascending Node
# theta = orb.copy(form="spherical", frame="ITRF").theta
# return (24 * (theta / (2 * np.pi) + orb.date.s / 86400)) % 24
return raan2ltan(orb.date, orb.copy(frame="EME2000", form="keplerian").raan, type)
def _mean_sun_raan(date):
"""Mean Sun RAAN in EME2000"""
# Not so sure about the UT1 thing
return (
np.radians(_sideral(date))
+ np.pi
- 2 * np.pi * date.change_scale("UT1").s / 86400
)
def raan2ltan(date, raan, type="mean"):
"""Conversion to True Local Time at Ascending Node (LTAN)
Args:
date (Date) : Date of the conversion
raan (float) : RAAN in radians, in EME2000
type (str) : either "mean" or "true"
Return:
float : LTAN in hours
"""
if type == "mean":
mean_solar_angle = raan - _mean_sun_raan(date)
ltan = (12 + mean_solar_angle * 12 / np.pi) % 24
elif type == "true":
theta_sun = (
get_body("Sun")
.propagate(date)
.copy(frame="EME2000", form="spherical")
.theta
)
ltan = ((24 * (raan - theta_sun) / (2 * np.pi)) + 12) % 24
else: # pragma: no cover
raise ValueError("Unknwon Local Time type : {}".format(type))
return ltan
def ltan2raan(date, ltan, type="mean"):
"""Conversion to Longitude
Args:
date (Date) : Date of the conversion
ltan (float) : LTAN in hours
type (str) : either "mean" or "true"
Return:
float : RAAN in radians in EME2000
"""
if type == "mean":
mean_solar_angle = (ltan - 12) * np.pi / 12
raan = (mean_solar_angle + _mean_sun_raan(date)) % (2 * np.pi)
elif type == "true":
sun = get_body("Sun").propagate(date).copy(frame="EME2000", form="spherical")
hour_angle = np.pi * (ltan - 12) / 12
raan = (sun.theta + hour_angle) % (2 * np.pi)
else: # pragma: no cover
raise ValueError("Unknwon Local Time type : {}".format(type))
return raan
| gpl-3.0 |
tebriel/dd-agent | checks.d/rabbitmq.py | 2 | 14759 | # (C) Datadog, Inc. 2013-2016
# (C) Brett Langdon <brett@blangdon.com> 2013
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import re
import time
import urllib
import urlparse
# 3p
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
EVENT_TYPE = SOURCE_TYPE_NAME = 'rabbitmq'
QUEUE_TYPE = 'queues'
NODE_TYPE = 'nodes'
MAX_DETAILED_QUEUES = 200
MAX_DETAILED_NODES = 100
# Post an event in the stream when the number of queues or nodes to
# collect is above 90% of the limit:
ALERT_THRESHOLD = 0.9
QUEUE_ATTRIBUTES = [
# Path, Name, Operation
('active_consumers', 'active_consumers', float),
('consumers', 'consumers', float),
('consumer_utilisation', 'consumer_utilisation', float),
('memory', 'memory', float),
('messages', 'messages', float),
('messages_details/rate', 'messages.rate', float),
('messages_ready', 'messages_ready', float),
('messages_ready_details/rate', 'messages_ready.rate', float),
('messages_unacknowledged', 'messages_unacknowledged', float),
('messages_unacknowledged_details/rate', 'messages_unacknowledged.rate', float),
('message_stats/ack', 'messages.ack.count', float),
('message_stats/ack_details/rate', 'messages.ack.rate', float),
('message_stats/deliver', 'messages.deliver.count', float),
('message_stats/deliver_details/rate', 'messages.deliver.rate', float),
('message_stats/deliver_get', 'messages.deliver_get.count', float),
('message_stats/deliver_get_details/rate', 'messages.deliver_get.rate', float),
('message_stats/publish', 'messages.publish.count', float),
('message_stats/publish_details/rate', 'messages.publish.rate', float),
('message_stats/redeliver', 'messages.redeliver.count', float),
('message_stats/redeliver_details/rate', 'messages.redeliver.rate', float),
]
NODE_ATTRIBUTES = [
('fd_used', 'fd_used', float),
('mem_used', 'mem_used', float),
('run_queue', 'run_queue', float),
('sockets_used', 'sockets_used', float),
('partitions', 'partitions', len)
]
ATTRIBUTES = {
QUEUE_TYPE: QUEUE_ATTRIBUTES,
NODE_TYPE: NODE_ATTRIBUTES,
}
TAGS_MAP = {
QUEUE_TYPE: {
'node': 'node',
'name': 'queue',
'vhost': 'vhost',
'policy': 'policy',
'queue_family': 'queue_family',
},
NODE_TYPE: {
'name': 'node',
}
}
METRIC_SUFFIX = {
QUEUE_TYPE: "queue",
NODE_TYPE: "node",
}
class RabbitMQ(AgentCheck):
"""This check is for gathering statistics from the RabbitMQ
Management Plugin (http://www.rabbitmq.com/management.html)
"""
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.already_alerted = []
def _get_config(self, instance):
# make sure 'rabbitmq_api_url; is present
if 'rabbitmq_api_url' not in instance:
raise Exception('Missing "rabbitmq_api_url" in RabbitMQ config.')
# get parameters
base_url = instance['rabbitmq_api_url']
if not base_url.endswith('/'):
base_url += '/'
username = instance.get('rabbitmq_user', 'guest')
password = instance.get('rabbitmq_pass', 'guest')
# Limit of queues/nodes to collect metrics from
max_detailed = {
QUEUE_TYPE: int(instance.get('max_detailed_queues', MAX_DETAILED_QUEUES)),
NODE_TYPE: int(instance.get('max_detailed_nodes', MAX_DETAILED_NODES)),
}
# List of queues/nodes to collect metrics from
specified = {
QUEUE_TYPE: {
'explicit': instance.get('queues', []),
'regexes': instance.get('queues_regexes', []),
},
NODE_TYPE: {
'explicit': instance.get('nodes', []),
'regexes': instance.get('nodes_regexes', []),
},
}
for object_type, filters in specified.iteritems():
for filter_type, filter_objects in filters.iteritems():
if type(filter_objects) != list:
raise TypeError(
"{0} / {0}_regexes parameter must be a list".format(object_type))
auth = (username, password)
return base_url, max_detailed, specified, auth
def check(self, instance):
base_url, max_detailed, specified, auth = self._get_config(instance)
# Generate metrics from the status API.
self.get_stats(instance, base_url, QUEUE_TYPE, max_detailed[
QUEUE_TYPE], specified[QUEUE_TYPE], auth=auth)
self.get_stats(instance, base_url, NODE_TYPE, max_detailed[
NODE_TYPE], specified[NODE_TYPE], auth=auth)
# Generate a service check from the aliveness API.
vhosts = instance.get('vhosts')
self._check_aliveness(base_url, vhosts, auth=auth)
def _get_data(self, url, auth=None):
try:
r = requests.get(url, auth=auth)
r.raise_for_status()
data = r.json()
except requests.exceptions.HTTPError as e:
raise Exception(
'Cannot open RabbitMQ API url: %s %s' % (url, str(e)))
except ValueError, e:
raise Exception(
'Cannot parse JSON response from API url: %s %s' % (url, str(e)))
return data
def get_stats(self, instance, base_url, object_type, max_detailed, filters, auth=None):
"""
instance: the check instance
base_url: the url of the rabbitmq management api (e.g. http://localhost:15672/api)
object_type: either QUEUE_TYPE or NODE_TYPE
max_detailed: the limit of objects to collect for this type
filters: explicit or regexes filters of specified queues or nodes (specified in the yaml file)
"""
data = self._get_data(
urlparse.urljoin(base_url, object_type), auth=auth)
# Make a copy of this list as we will remove items from it at each
# iteration
explicit_filters = list(filters['explicit'])
regex_filters = filters['regexes']
""" data is a list of nodes or queues:
data = [
{'status': 'running', 'node': 'rabbit@host', 'name': 'queue1', 'consumers': 0, 'vhost': '/', 'backing_queue_status': {'q1': 0, 'q3': 0, 'q2': 0, 'q4': 0, 'avg_ack_egress_rate': 0.0, 'ram_msg_count': 0, 'ram_ack_count': 0, 'len': 0, 'persistent_count': 0, 'target_ram_count': 'infinity', 'next_seq_id': 0, 'delta': ['delta', 'undefined', 0, 'undefined'], 'pending_acks': 0, 'avg_ack_ingress_rate': 0.0, 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0}, 'durable': True, 'idle_since': '2013-10-03 13:38:18', 'exclusive_consumer_tag': '', 'arguments': {}, 'memory': 10956, 'policy': '', 'auto_delete': False},
{'status': 'running', 'node': 'rabbit@host, 'name': 'queue10', 'consumers': 0, 'vhost': '/', 'backing_queue_status': {'q1': 0, 'q3': 0, 'q2': 0, 'q4': 0, 'avg_ack_egress_rate': 0.0, 'ram_msg_count': 0, 'ram_ack_count': 0, 'len': 0, 'persistent_count': 0, 'target_ram_count': 'infinity', 'next_seq_id': 0, 'delta': ['delta', 'undefined', 0, 'undefined'], 'pending_acks': 0, 'avg_ack_ingress_rate': 0.0, 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0}, 'durable': True, 'idle_since': '2013-10-03 13:38:18', 'exclusive_consumer_tag': '', 'arguments': {}, 'memory': 10956, 'policy': '', 'auto_delete': False},
{'status': 'running', 'node': 'rabbit@host', 'name': 'queue11', 'consumers': 0, 'vhost': '/', 'backing_queue_status': {'q1': 0, 'q3': 0, 'q2': 0, 'q4': 0, 'avg_ack_egress_rate': 0.0, 'ram_msg_count': 0, 'ram_ack_count': 0, 'len': 0, 'persistent_count': 0, 'target_ram_count': 'infinity', 'next_seq_id': 0, 'delta': ['delta', 'undefined', 0, 'undefined'], 'pending_acks': 0, 'avg_ack_ingress_rate': 0.0, 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0}, 'durable': True, 'idle_since': '2013-10-03 13:38:18', 'exclusive_consumer_tag': '', 'arguments': {}, 'memory': 10956, 'policy': '', 'auto_delete': False},
...
]
"""
if len(explicit_filters) > max_detailed:
raise Exception(
"The maximum number of %s you can specify is %d." % (object_type, max_detailed))
# a list of queues/nodes is specified. We process only those
if explicit_filters or regex_filters:
matching_lines = []
for data_line in data:
name = data_line.get("name")
if name in explicit_filters:
matching_lines.append(data_line)
explicit_filters.remove(name)
continue
match_found = False
for p in regex_filters:
match = re.search(p, name)
if match:
if _is_affirmative(instance.get("tag_families", False)) and match.groups():
data_line["queue_family"] = match.groups()[0]
matching_lines.append(data_line)
match_found = True
break
if match_found:
continue
# Absolute names work only for queues
if object_type != QUEUE_TYPE:
continue
absolute_name = '%s/%s' % (data_line.get("vhost"), name)
if absolute_name in explicit_filters:
matching_lines.append(data_line)
explicit_filters.remove(absolute_name)
continue
for p in regex_filters:
match = re.search(p, absolute_name)
if match:
if _is_affirmative(instance.get("tag_families", False)) and match.groups():
data_line["queue_family"] = match.groups()[0]
matching_lines.append(data_line)
match_found = True
break
if match_found:
continue
data = matching_lines
# if no filters are specified, check everything according to the limits
if len(data) > ALERT_THRESHOLD * max_detailed:
# Post a message on the dogweb stream to warn
self.alert(base_url, max_detailed, len(data), object_type)
if len(data) > max_detailed:
# Display a warning in the info page
self.warning(
"Too many queues to fetch. You must choose the %s you are interested in by editing the rabbitmq.yaml configuration file or get in touch with Datadog Support" % object_type)
for data_line in data[:max_detailed]:
# We truncate the list of nodes/queues if it's above the limit
self._get_metrics(data_line, object_type)
def _get_metrics(self, data, object_type):
tags = []
tag_list = TAGS_MAP[object_type]
for t in tag_list:
tag = data.get(t)
if tag:
# FIXME 6.x: remove this suffix or unify (sc doesn't have it)
tags.append('rabbitmq_%s:%s' % (tag_list[t], tag))
for attribute, metric_name, operation in ATTRIBUTES[object_type]:
# Walk down through the data path, e.g. foo/bar => d['foo']['bar']
root = data
keys = attribute.split('/')
for path in keys[:-1]:
root = root.get(path, {})
value = root.get(keys[-1], None)
if value is not None:
try:
self.gauge('rabbitmq.%s.%s' % (
METRIC_SUFFIX[object_type], metric_name), operation(value), tags=tags)
except ValueError:
self.log.debug("Caught ValueError for %s %s = %s with tags: %s" % (
METRIC_SUFFIX[object_type], attribute, value, tags))
def alert(self, base_url, max_detailed, size, object_type):
key = "%s%s" % (base_url, object_type)
if key in self.already_alerted:
# We have already posted an event
return
self.already_alerted.append(key)
title = "RabbitMQ integration is approaching the limit on the number of %s that can be collected from on %s" % (
object_type, self.hostname)
msg = """%s %s are present. The limit is %s.
Please get in touch with Datadog support to increase the limit.""" % (size, object_type, max_detailed)
event = {
"timestamp": int(time.time()),
"event_type": EVENT_TYPE,
"msg_title": title,
"msg_text": msg,
"alert_type": 'warning',
"source_type_name": SOURCE_TYPE_NAME,
"host": self.hostname,
"tags": ["base_url:%s" % base_url, "host:%s" % self.hostname],
"event_object": "rabbitmq.limit.%s" % object_type,
}
self.event(event)
def _check_aliveness(self, base_url, vhosts=None, auth=None):
""" Check the aliveness API against all or a subset of vhosts. The API
will return {"status": "ok"} and a 200 response code in the case
that the check passes.
In the case of an invalid response code or unparseable JSON the
service check will be CRITICAL.
"""
if not vhosts:
# Fetch a list of _all_ vhosts from the API.
vhosts_url = urlparse.urljoin(base_url, 'vhosts')
vhosts_response = self._get_data(vhosts_url, auth=auth)
vhosts = [v['name'] for v in vhosts_response]
for vhost in vhosts:
tags = ['vhost:%s' % vhost]
# We need to urlencode the vhost because it can be '/'.
path = u'aliveness-test/%s' % (urllib.quote_plus(vhost))
aliveness_url = urlparse.urljoin(base_url, path)
message = None
try:
aliveness_response = self._get_data(aliveness_url, auth=auth)
message = u"Response from aliveness API: %s" % aliveness_response
if aliveness_response.get('status') == 'ok':
status = AgentCheck.OK
else:
status = AgentCheck.CRITICAL
except Exception as e:
# Either we got a bad status code or unparseable JSON.
status = AgentCheck.CRITICAL
self.warning('Error when checking aliveness for vhost %s: %s'
% (vhost, str(e)))
self.service_check(
'rabbitmq.aliveness', status, tags, message=message)
| bsd-3-clause |
eadgarchen/tensorflow | tensorflow/contrib/distributions/python/ops/negative_binomial.py | 23 | 7419 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Negative Binomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class NegativeBinomial(distribution.Distribution):
"""NegativeBinomial distribution.
The NegativeBinomial distribution is related to the experiment of performing
Bernoulli trials in sequence. Given a Bernoulli trial with probability `p` of
success, the NegativeBinomial distribution represents the distribution over
the number of successes `s` that occur until we observe `f` failures.
The probability mass function (pmf) is,
```none
pmf(s; f, p) = p**s (1 - p)**f / Z
Z = s! (f - 1)! / (s + f - 1)!
```
where:
* `total_count = f`,
* `probs = p`,
* `Z` is the normalizaing constant, and,
* `n!` is the factorial of `n`.
"""
def __init__(self,
total_count,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="NegativeBinomial"):
"""Construct NegativeBinomial distributions.
Args:
total_count: Non-negative floating-point `Tensor` with shape
broadcastable to `[B1,..., Bb]` with `b >= 0` and the same dtype as
`probs` or `logits`. Defines this as a batch of `N1 x ... x Nm`
different Negative Binomial distributions. In practice, this represents
the number of negative Bernoulli trials to stop at (the `total_count`
of failures), but this is still a valid distribution when
`total_count` is a non-integer.
logits: Floating-point `Tensor` with shape broadcastable to
`[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
Each entry represents logits for the probability of success for
independent Negative Binomial distributions and must be in the open
interval `(-inf, inf)`. Only one of `logits` or `probs` should be
specified.
probs: Positive floating-point `Tensor` with shape broadcastable to
`[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
Each entry represents the probability of success for independent
Negative Binomial distributions and must be in the open interval
`(0, 1)`. Only one of `logits` or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[total_count, logits, probs]):
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(total_count)] if validate_args else []):
self._total_count = array_ops.identity(total_count)
super(NegativeBinomial, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._total_count, self._probs, self._logits],
name=name)
@property
def total_count(self):
"""Number of negative trials."""
return self._total_count
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.total_count),
array_ops.shape(self.probs))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.total_count.get_shape(),
self.probs.get_shape())
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Here we use the fact that if:
# lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
# then X ~ Poisson(lam) is Negative Binomially distributed.
rate = random_ops.random_gamma(
shape=[n],
alpha=self.total_count,
beta=math_ops.exp(-self.logits),
dtype=self.dtype,
seed=seed)
return random_ops.random_poisson(
rate,
shape=[],
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "negative_binom"))
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return math_ops.betainc(self.total_count, 1. + x,
math_ops.sigmoid(-self.logits))
def _log_prob(self, x):
return (self._log_unnormalized_prob(x)
- self._log_normalization(x))
def _log_unnormalized_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return (self.total_count * math_ops.log_sigmoid(-self.logits)
+ x * math_ops.log_sigmoid(self.logits))
def _log_normalization(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return (-math_ops.lgamma(self.total_count + x)
+ math_ops.lgamma(1. + x)
+ math_ops.lgamma(self.total_count))
def _mean(self):
return self.total_count * math_ops.exp(self.logits)
def _mode(self):
adjusted_count = array_ops.where(
1. < self.total_count,
self.total_count - 1.,
array_ops.zeros_like(self.total_count))
return math_ops.floor(adjusted_count * math_ops.exp(self.logits))
def _variance(self):
return self._mean() / math_ops.sigmoid(-self.logits)
| apache-2.0 |
xuxiao19910803/edx-platform | common/test/acceptance/pages/studio/auto_auth.py | 155 | 2446 | """
Auto-auth page (used to automatically log in during testing).
"""
import re
import urllib
from bok_choy.page_object import PageObject
from . import BASE_URL
class AutoAuthPage(PageObject):
"""
The automatic authorization page.
When allowed via the django settings file, visiting
this url will create a user and log them in.
"""
def __init__(self, browser, username=None, email=None, password=None,
staff=None, course_id=None, roles=None, no_login=None):
"""
Auto-auth is an end-point for HTTP GET requests.
By default, it will create accounts with random user credentials,
but you can also specify credentials using querystring parameters.
`username`, `email`, and `password` are the user's credentials (strings)
`staff` is a boolean indicating whether the user is global staff.
`course_id` is the ID of the course to enroll the student in.
Currently, this has the form "org/number/run"
Note that "global staff" is NOT the same as course staff.
"""
super(AutoAuthPage, self).__init__(browser)
# Create query string parameters if provided
self._params = {}
if username is not None:
self._params['username'] = username
if email is not None:
self._params['email'] = email
if password is not None:
self._params['password'] = password
if staff is not None:
self._params['staff'] = "true" if staff else "false"
if course_id is not None:
self._params['course_id'] = course_id
if roles is not None:
self._params['roles'] = roles
if no_login:
self._params['no_login'] = True
@property
def url(self):
"""
Construct the URL.
"""
url = BASE_URL + "/auto_auth"
query_str = urllib.urlencode(self._params)
if query_str:
url += "?" + query_str
return url
def is_browser_on_page(self):
message = self.q(css='BODY').text[0]
match = re.search(r'(Logged in|Created) user ([^$]+) with password ([^$]+) and user_id ([^$]+)$', message)
return True if match else False
def get_user_id(self):
message = self.q(css='BODY').text[0].strip()
match = re.search(r' user_id ([^$]+)$', message)
return match.groups()[0] if match else None
| agpl-3.0 |
ZHYfeng/malicious-code-conceal | 3-2-multi-programmes-big/Multiverso-master/binding/python/examples/theano/cnn.py | 6 | 5128 | #!/usr/bin/env python
# coding:utf8
"""
This code is adapted from
https://github.com/benanne/theano-tutorial/blob/master/6_convnet.py
The MIT License (MIT)
Copyright (c) 2015 Sander Dieleman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
import load_data
from theano.tensor.nnet import conv
from theano.tensor.signal import downsample
# MULTIVERSO: import multiverso
import multiverso as mv
# MULTIVERSO: the sharedvar in theano_ext acts same like Theano's
# sharedVariables. But it use multiverso as the backend
from multiverso.theano_ext import sharedvar
x_train, t_train, x_test, t_test = load_data.load_cifar10()
labels_test = np.argmax(t_test, axis=1)
# reshape data
x_train = x_train.reshape((x_train.shape[0], 3, 32, 32))
x_test = x_test.reshape((x_test.shape[0], 3, 32, 32))
# define symbolic Theano variables
x = T.tensor4()
t = T.matrix()
# define model: neural network
def floatX(x):
return np.asarray(x, dtype=theano.config.floatX)
def init_weights(shape, name):
# MULTIVERSO: relace the shared variable with mv_shared
return sharedvar.mv_shared(floatX(np.random.randn(*shape) * 0.1), name=name)
def momentum(cost, params, learning_rate, momentum):
grads = theano.grad(cost, params)
updates = []
for p, g in zip(params, grads):
# MULTIVERSO: relace the shared variable with mv_shared
mparam_i = sharedvar.mv_shared(np.zeros(p.get_value().shape, dtype=theano.config.floatX))
v = momentum * mparam_i - learning_rate * g
updates.append((mparam_i, v))
updates.append((p, p + v))
return updates
def model(x, w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o):
c1 = T.maximum(0, conv.conv2d(x, w_c1) + b_c1.dimshuffle('x', 0, 'x', 'x'))
p1 = downsample.max_pool_2d(c1, (3, 3))
c2 = T.maximum(0, conv.conv2d(p1, w_c2) + b_c2.dimshuffle('x', 0, 'x', 'x'))
p2 = downsample.max_pool_2d(c2, (2, 2))
p2_flat = p2.flatten(2)
h3 = T.maximum(0, T.dot(p2_flat, w_h3) + b_h3)
p_y_given_x = T.nnet.softmax(T.dot(h3, w_o) + b_o)
return p_y_given_x
# MULTIVERSO: you should call mv.init before call multiverso apis
mv.init()
worker_id = mv.worker_id()
# MULTIVERSO: every process has distinct worker id
workers_num = mv.workers_num()
w_c1 = init_weights((4, 3, 3, 3), name="w_c1")
b_c1 = init_weights((4,), name="b_c1")
w_c2 = init_weights((8, 4, 3, 3), name="w_c2")
b_c2 = init_weights((8,), name="b_c2")
w_h3 = init_weights((8 * 4 * 4, 100), name="w_h3")
b_h3 = init_weights((100,), name="b_h3")
w_o = init_weights((100, 10), name="w_o")
b_o = init_weights((10,), name="b_o")
params = [w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o]
p_y_given_x = model(x, *params)
y = T.argmax(p_y_given_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(p_y_given_x, t))
updates = momentum(cost, params, learning_rate=0.01, momentum=0.9)
# compile theano functions
train = theano.function([x, t], cost, updates=updates, allow_input_downcast=True)
predict = theano.function([x], y, allow_input_downcast=True)
# MULTIVERSO: all the workers will synchronize at the place you call barrier
mv.barrier()
# train model
batch_size = 50
for i in range(50):
for start in range(0, len(x_train), batch_size):
# every process only train batches assigned to itself
if start / batch_size % workers_num != worker_id:
continue
x_batch = x_train[start:start + batch_size]
t_batch = t_train[start:start + batch_size]
cost = train(x_batch, t_batch)
# MULTIVERSO: sync value with multiverso after every batch
sharedvar.sync_all_mv_shared_vars()
# MULTIVERSO: all the workers will synchronize at the place you call barrier
mv.barrier() # barrier every epoch
# master will calc the accuracy
if mv.is_master_worker():
predictions_test = predict(x_test)
accuracy = np.mean(predictions_test == labels_test)
print "epoch %d - accuracy: %.4f" % (i + 1, accuracy)
# MULTIVERSO: You must call shutdown at the end of the file
mv.shutdown()
| apache-2.0 |
Drooids/odoo | addons/purchase_requisition/purchase_requisition.py | 200 | 23548 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
class purchase_requisition(osv.osv):
_name = "purchase.requisition"
_description = "Purchase Requisition"
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _get_po_line(self, cr, uid, ids, field_names, arg=None, context=None):
result = dict((res_id, []) for res_id in ids)
for element in self.browse(cr, uid, ids, context=context):
for po in element.purchase_ids:
result[element.id] += [po_line.id for po_line in po.order_line]
return result
_columns = {
'name': fields.char('Call for Bids Reference', required=True, copy=False),
'origin': fields.char('Source Document'),
'ordering_date': fields.date('Scheduled Ordering Date'),
'date_end': fields.datetime('Bid Submission Deadline'),
'schedule_date': fields.date('Scheduled Date', select=True, help="The expected and scheduled date where all the products are received"),
'user_id': fields.many2one('res.users', 'Responsible'),
'exclusive': fields.selection([('exclusive', 'Select only one RFQ (exclusive)'), ('multiple', 'Select multiple RFQ')], 'Bid Selection Type', required=True, help="Select only one RFQ (exclusive): On the confirmation of a purchase order, it cancels the remaining purchase order.\nSelect multiple RFQ: It allows to have multiple purchase orders.On confirmation of a purchase order it does not cancel the remaining orders"""),
'description': fields.text('Description'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'purchase_ids': fields.one2many('purchase.order', 'requisition_id', 'Purchase Orders', states={'done': [('readonly', True)]}),
'po_line_ids': fields.function(_get_po_line, method=True, type='one2many', relation='purchase.order.line', string='Products by supplier'),
'line_ids': fields.one2many('purchase.requisition.line', 'requisition_id', 'Products to Purchase', states={'done': [('readonly', True)]}, copy=True),
'procurement_id': fields.many2one('procurement.order', 'Procurement', ondelete='set null', copy=False),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'state': fields.selection([('draft', 'Draft'), ('in_progress', 'Confirmed'),
('open', 'Bid Selection'), ('done', 'PO Created'),
('cancel', 'Cancelled')],
'Status', track_visibility='onchange', required=True,
copy=False),
'multiple_rfq_per_supplier': fields.boolean('Multiple RFQ per supplier'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', required=True),
}
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
return obj_data.get_object_reference(cr, uid, 'stock', 'picking_type_in')[1]
_defaults = {
'state': 'draft',
'exclusive': 'multiple',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition', context=c),
'user_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).id,
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'purchase.order.requisition'),
'picking_type_id': _get_picking_in,
}
def tender_cancel(self, cr, uid, ids, context=None):
purchase_order_obj = self.pool.get('purchase.order')
# try to set all associated quotations to cancel state
for tender in self.browse(cr, uid, ids, context=context):
for purchase_order in tender.purchase_ids:
purchase_order_obj.action_cancel(cr, uid, [purchase_order.id], context=context)
purchase_order_obj.message_post(cr, uid, [purchase_order.id], body=_('Cancelled by the tender associated to this quotation.'), context=context)
return self.write(cr, uid, ids, {'state': 'cancel'})
def tender_in_progress(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'in_progress'}, context=context)
def tender_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def tender_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'})
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id])
self.create_workflow(cr, uid, [p_id])
return True
def tender_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def open_product_line(self, cr, uid, ids, context=None):
""" This opens product line view to view all lines from the different quotations, groupby default by product and partner to show comparaison
between supplier price
@return: the product line tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase_requisition', 'purchase_line_tree', context=context)
res['context'] = context
po_lines = self.browse(cr, uid, ids, context=context)[0].po_line_ids
res['context'] = {
'search_default_groupby_product': True,
'search_default_hide_cancelled': True,
'tender_id': ids[0],
}
res['domain'] = [('id', 'in', [line.id for line in po_lines])]
return res
def open_rfq(self, cr, uid, ids, context=None):
""" This opens rfq view to view all quotations associated to the call for bids
@return: the RFQ tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase', 'purchase_rfq', context=context)
res['context'] = context
po_ids = [po.id for po in self.browse(cr, uid, ids, context=context)[0].purchase_ids]
res['domain'] = [('id', 'in', po_ids)]
return res
def _prepare_purchase_order(self, cr, uid, requisition, supplier, context=None):
supplier_pricelist = supplier.property_product_pricelist_purchase
return {
'origin': requisition.name,
'date_order': requisition.date_end or fields.datetime.now(),
'partner_id': supplier.id,
'pricelist_id': supplier_pricelist.id,
'currency_id': supplier_pricelist and supplier_pricelist.currency_id.id or requisition.company_id.currency_id.id,
'location_id': requisition.procurement_id and requisition.procurement_id.location_id.id or requisition.picking_type_id.default_location_dest_id.id,
'company_id': requisition.company_id.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'requisition_id': requisition.id,
'notes': requisition.description,
'picking_type_id': requisition.picking_type_id.id
}
def _prepare_purchase_order_line(self, cr, uid, requisition, requisition_line, purchase_id, supplier, context=None):
if context is None:
context = {}
po_line_obj = self.pool.get('purchase.order.line')
product_uom = self.pool.get('product.uom')
product = requisition_line.product_id
default_uom_po_id = product.uom_po_id.id
ctx = context.copy()
ctx['tz'] = requisition.user_id.tz
date_order = requisition.ordering_date and fields.date.date_to_datetime(self, cr, uid, requisition.ordering_date, context=ctx) or fields.datetime.now()
qty = product_uom._compute_qty(cr, uid, requisition_line.product_uom_id.id, requisition_line.product_qty, default_uom_po_id)
supplier_pricelist = supplier.property_product_pricelist_purchase and supplier.property_product_pricelist_purchase.id or False
vals = po_line_obj.onchange_product_id(
cr, uid, [], supplier_pricelist, product.id, qty, default_uom_po_id,
supplier.id, date_order=date_order,
fiscal_position_id=supplier.property_account_position.id,
date_planned=requisition_line.schedule_date,
name=False, price_unit=False, state='draft', context=context)['value']
vals.update({
'order_id': purchase_id,
'product_id': product.id,
'account_analytic_id': requisition_line.account_analytic_id.id,
'taxes_id': [(6, 0, vals.get('taxes_id', []))],
})
return vals
def make_purchase_order(self, cr, uid, ids, partner_id, context=None):
"""
Create New RFQ for Supplier
"""
context = dict(context or {})
assert partner_id, 'Supplier should be specified'
purchase_order = self.pool.get('purchase.order')
purchase_order_line = self.pool.get('purchase.order.line')
res_partner = self.pool.get('res.partner')
supplier = res_partner.browse(cr, uid, partner_id, context=context)
res = {}
for requisition in self.browse(cr, uid, ids, context=context):
if not requisition.multiple_rfq_per_supplier and supplier.id in filter(lambda x: x, [rfq.state != 'cancel' and rfq.partner_id.id or None for rfq in requisition.purchase_ids]):
raise osv.except_osv(_('Warning!'), _('You have already one %s purchase order for this partner, you must cancel this purchase order to create a new quotation.') % rfq.state)
context.update({'mail_create_nolog': True})
purchase_id = purchase_order.create(cr, uid, self._prepare_purchase_order(cr, uid, requisition, supplier, context=context), context=context)
purchase_order.message_post(cr, uid, [purchase_id], body=_("RFQ created"), context=context)
res[requisition.id] = purchase_id
for line in requisition.line_ids:
purchase_order_line.create(cr, uid, self._prepare_purchase_order_line(cr, uid, requisition, line, purchase_id, supplier, context=context), context=context)
return res
def check_valid_quotation(self, cr, uid, quotation, context=None):
"""
Check if a quotation has all his order lines bid in order to confirm it if its the case
return True if all order line have been selected during bidding process, else return False
args : 'quotation' must be a browse record
"""
for line in quotation.order_line:
if line.state != 'confirmed' or line.product_qty != line.quantity_bid:
return False
return True
def _prepare_po_from_tender(self, cr, uid, tender, context=None):
""" Prepare the values to write in the purchase order
created from a tender.
:param tender: the source tender from which we generate a purchase order
"""
return {'order_line': [],
'requisition_id': tender.id,
'origin': tender.name}
def _prepare_po_line_from_tender(self, cr, uid, tender, line, purchase_id, context=None):
""" Prepare the values to write in the purchase order line
created from a line of the tender.
:param tender: the source tender from which we generate a purchase order
:param line: the source tender's line from which we generate a line
:param purchase_id: the id of the new purchase
"""
return {'product_qty': line.quantity_bid,
'order_id': purchase_id}
def generate_po(self, cr, uid, ids, context=None):
"""
Generate all purchase order based on selected lines, should only be called on one tender at a time
"""
po = self.pool.get('purchase.order')
poline = self.pool.get('purchase.order.line')
id_per_supplier = {}
for tender in self.browse(cr, uid, ids, context=context):
if tender.state == 'done':
raise osv.except_osv(_('Warning!'), _('You have already generate the purchase order(s).'))
confirm = False
#check that we have at least confirm one line
for po_line in tender.po_line_ids:
if po_line.state == 'confirmed':
confirm = True
break
if not confirm:
raise osv.except_osv(_('Warning!'), _('You have no line selected for buying.'))
#check for complete RFQ
for quotation in tender.purchase_ids:
if (self.check_valid_quotation(cr, uid, quotation, context=context)):
#use workflow to set PO state to confirm
po.signal_workflow(cr, uid, [quotation.id], 'purchase_confirm')
#get other confirmed lines per supplier
for po_line in tender.po_line_ids:
#only take into account confirmed line that does not belong to already confirmed purchase order
if po_line.state == 'confirmed' and po_line.order_id.state in ['draft', 'sent', 'bid']:
if id_per_supplier.get(po_line.partner_id.id):
id_per_supplier[po_line.partner_id.id].append(po_line)
else:
id_per_supplier[po_line.partner_id.id] = [po_line]
#generate po based on supplier and cancel all previous RFQ
ctx = dict(context or {}, force_requisition_id=True)
for supplier, product_line in id_per_supplier.items():
#copy a quotation for this supplier and change order_line then validate it
quotation_id = po.search(cr, uid, [('requisition_id', '=', tender.id), ('partner_id', '=', supplier)], limit=1)[0]
vals = self._prepare_po_from_tender(cr, uid, tender, context=context)
new_po = po.copy(cr, uid, quotation_id, default=vals, context=context)
#duplicate po_line and change product_qty if needed and associate them to newly created PO
for line in product_line:
vals = self._prepare_po_line_from_tender(cr, uid, tender, line, new_po, context=context)
poline.copy(cr, uid, line.id, default=vals, context=context)
#use workflow to set new PO state to confirm
po.signal_workflow(cr, uid, [new_po], 'purchase_confirm')
#cancel other orders
self.cancel_unconfirmed_quotations(cr, uid, tender, context=context)
#set tender to state done
self.signal_workflow(cr, uid, [tender.id], 'done')
return True
def cancel_unconfirmed_quotations(self, cr, uid, tender, context=None):
#cancel other orders
po = self.pool.get('purchase.order')
for quotation in tender.purchase_ids:
if quotation.state in ['draft', 'sent', 'bid']:
self.pool.get('purchase.order').signal_workflow(cr, uid, [quotation.id], 'purchase_cancel')
po.message_post(cr, uid, [quotation.id], body=_('Cancelled by the call for bids associated to this request for quotation.'), context=context)
return True
class purchase_requisition_line(osv.osv):
_name = "purchase.requisition.line"
_description = "Purchase Requisition Line"
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok', '=', True)]),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'requisition_id': fields.many2one('purchase.requisition', 'Call for Bids', ondelete='cascade'),
'company_id': fields.related('requisition_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account',),
'schedule_date': fields.date('Scheduled Date'),
}
def onchange_product_id(self, cr, uid, ids, product_id, product_uom_id, parent_analytic_account, analytic_account, parent_date, date, context=None):
""" Changes UoM and name if product_id changes.
@param name: Name of the field
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
value = {'product_uom_id': ''}
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'product_uom_id': prod.uom_id.id, 'product_qty': 1.0}
if not analytic_account:
value.update({'account_analytic_id': parent_analytic_account})
if not date:
value.update({'schedule_date': parent_date})
return {'value': value}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition.line', context=c),
}
class purchase_order(osv.osv):
_inherit = "purchase.order"
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Call for Bids', copy=False),
}
def wkf_confirm_order(self, cr, uid, ids, context=None):
res = super(purchase_order, self).wkf_confirm_order(cr, uid, ids, context=context)
proc_obj = self.pool.get('procurement.order')
for po in self.browse(cr, uid, ids, context=context):
if po.requisition_id and (po.requisition_id.exclusive == 'exclusive'):
for order in po.requisition_id.purchase_ids:
if order.id != po.id:
proc_ids = proc_obj.search(cr, uid, [('purchase_id', '=', order.id)])
if proc_ids and po.state == 'confirmed':
proc_obj.write(cr, uid, proc_ids, {'purchase_id': po.id})
order.signal_workflow('purchase_cancel')
po.requisition_id.tender_done(context=context)
return res
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
stock_move_lines = super(purchase_order, self)._prepare_order_line_move(cr, uid, order, order_line, picking_id, group_id, context=context)
if order.requisition_id and order.requisition_id.procurement_id and order.requisition_id.procurement_id.move_dest_id:
for i in range(0, len(stock_move_lines)):
stock_move_lines[i]['move_dest_id'] = order.requisition_id.procurement_id.move_dest_id.id
return stock_move_lines
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
_columns = {
'quantity_bid': fields.float('Quantity Bid', digits_compute=dp.get_precision('Product Unit of Measure'), help="Technical field for not loosing the initial information about the quantity proposed in the bid"),
}
def action_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_confirm(self, cr, uid, ids, context=None):
super(purchase_order_line, self).action_confirm(cr, uid, ids, context=context)
for element in self.browse(cr, uid, ids, context=context):
if not element.quantity_bid:
self.write(cr, uid, ids, {'quantity_bid': element.product_qty}, context=context)
return True
def generate_po(self, cr, uid, tender_id, context=None):
#call generate_po from tender with active_id. Called from js widget
return self.pool.get('purchase.requisition').generate_po(cr, uid, [tender_id], context=context)
class product_template(osv.osv):
_inherit = 'product.template'
_columns = {
'purchase_requisition': fields.boolean('Call for Bids', help="Check this box to generate Call for Bids instead of generating requests for quotation from procurement.")
}
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Latest Requisition')
}
def _run(self, cr, uid, procurement, context=None):
requisition_obj = self.pool.get('purchase.requisition')
warehouse_obj = self.pool.get('stock.warehouse')
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
warehouse_id = warehouse_obj.search(cr, uid, [('company_id', '=', procurement.company_id.id)], context=context)
requisition_id = requisition_obj.create(cr, uid, {
'origin': procurement.origin,
'date_end': procurement.date_planned,
'warehouse_id': warehouse_id and warehouse_id[0] or False,
'company_id': procurement.company_id.id,
'procurement_id': procurement.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'line_ids': [(0, 0, {
'product_id': procurement.product_id.id,
'product_uom_id': procurement.product_uom.id,
'product_qty': procurement.product_qty
})],
})
self.message_post(cr, uid, [procurement.id], body=_("Purchase Requisition created"), context=context)
return self.write(cr, uid, [procurement.id], {'requisition_id': requisition_id}, context=context)
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
if procurement.requisition_id.state == 'done':
if any([purchase.shipped for purchase in procurement.requisition_id.purchase_ids]):
return True
return False
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
| agpl-3.0 |
BT-ojossen/odoo | addons/pad/pad.py | 179 | 4311 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import random
import re
import string
import urllib2
import logging
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools import html2plaintext
from py_etherpad import EtherpadLiteClient
_logger = logging.getLogger(__name__)
class pad_common(osv.osv_memory):
_name = 'pad.common'
def pad_is_configured(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return bool(user.company_id.pad_server)
def pad_generate_url(self, cr, uid, context=None):
company = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context).company_id
pad = {
"server" : company.pad_server,
"key" : company.pad_key,
}
# make sure pad server in the form of http://hostname
if not pad["server"]:
return pad
if not pad["server"].startswith('http'):
pad["server"] = 'http://' + pad["server"]
pad["server"] = pad["server"].rstrip('/')
# generate a salt
s = string.ascii_uppercase + string.digits
salt = ''.join([s[random.SystemRandom().randint(0, len(s) - 1)] for i in range(10)])
#path
# etherpad hardcodes pad id length limit to 50
path = '-%s-%s' % (self._name, salt)
path = '%s%s' % (cr.dbname.replace('_','-')[0:50 - len(path)], path)
# contruct the url
url = '%s/p/%s' % (pad["server"], path)
#if create with content
if "field_name" in context and "model" in context and "object_id" in context:
myPad = EtherpadLiteClient( pad["key"], pad["server"]+'/api')
try:
myPad.createPad(path)
except urllib2.URLError:
raise osv.except_osv(_("Error"), _("Pad creation failed, \
either there is a problem with your pad server URL or with your connection."))
#get attr on the field model
model = self.pool[context["model"]]
field = model._fields[context['field_name']]
real_field = field.pad_content_field
#get content of the real field
for record in model.browse(cr, uid, [context["object_id"]]):
if record[real_field]:
myPad.setText(path, (html2plaintext(record[real_field]).encode('utf-8')))
#Etherpad for html not functional
#myPad.setHTML(path, record[real_field])
return {
"server": pad["server"],
"path": path,
"url": url,
}
def pad_get_content(self, cr, uid, url, context=None):
content = ''
if url:
try:
page = urllib2.urlopen('%s/export/html'%url).read()
mo = re.search('<body>(.*)</body>',page)
if mo:
content = mo.group(1)
except:
_logger.warning("No url found '%s'.", url)
return content
# TODO
# reverse engineer protocol to be setHtml without using the api key
def write(self, cr, uid, ids, vals, context=None):
self._set_pad_value(cr, uid, vals, context)
return super(pad_common, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
self._set_pad_value(cr, uid, vals, context)
return super(pad_common, self).create(cr, uid, vals, context=context)
# Set the pad content in vals
def _set_pad_value(self, cr, uid, vals, context=None):
for k,v in vals.items():
field = self._fields[k]
if hasattr(field,'pad_content_field'):
vals[field.pad_content_field] = self.pad_get_content(cr, uid, v, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
for k, field in self._fields.iteritems():
if hasattr(field,'pad_content_field'):
pad = self.pad_generate_url(cr, uid, context)
default[k] = pad.get('url')
return super(pad_common, self).copy(cr, uid, id, default, context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yashu-seth/networkx | networkx/algorithms/vitality.py | 5 | 2517 | """
Vitality measures.
"""
# Copyright (C) 2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Renato Fabbri'])
__all__ = ['closeness_vitality']
def weiner_index(G, weight=None):
# compute sum of distances between all node pairs
# (with optional weights)
weiner=0.0
if weight is None:
for n in G:
path_length=nx.single_source_shortest_path_length(G,n)
weiner+=sum(d for n, d in path_length)
else:
for n in G:
path_length=nx.single_source_dijkstra_path_length(G,
n,weight=weight)
weiner+=sum(path_length.values())
return weiner
def closeness_vitality(G, weight=None):
"""Compute closeness vitality for nodes.
Closeness vitality of a node is the change in the sum of distances
between all node pairs when excluding that node.
Parameters
----------
G : graph
weight : None or string (optional)
The name of the edge attribute used as weight. If None the edge
weights are ignored.
Returns
-------
nodes : dictionary
Dictionary with nodes as keys and closeness vitality as the value.
Examples
--------
>>> G=nx.cycle_graph(3)
>>> nx.closeness_vitality(G)
{0: 4.0, 1: 4.0, 2: 4.0}
See Also
--------
closeness_centrality()
References
----------
.. [1] Ulrik Brandes, Sec. 3.6.2 in
Network Analysis: Methodological Foundations, Springer, 2005.
http://books.google.com/books?id=TTNhSm7HYrIC
"""
multigraph = G.is_multigraph()
wig = weiner_index(G,weight)
closeness_vitality = {}
for n in G:
# remove edges connected to node n and keep list of edges with data
# could remove node n but it doesn't count anyway
if multigraph:
edges = list(G.edges(n,data=True,keys=True))
if G.is_directed():
edges += list(G.in_edges(n,data=True,keys=True))
else:
edges = list(G.edges(n,data=True))
if G.is_directed():
edges += list(G.in_edges(n,data=True))
G.remove_edges_from(edges)
closeness_vitality[n] = wig - weiner_index(G,weight)
# add edges and data back to graph
G.add_edges_from(edges)
return closeness_vitality
| bsd-3-clause |
SurrealTiggi/mess | python/exercises/character.py | 1 | 1472 | import random
class Combat:
dodge_limit = 6
attack_limit = 6
def dodge(self):
roll = random.randint(1, self.dodge_limit)
return roll > 4
def attack(self):
roll = random.randint(1, self.attack_limit)
return roll > 4
class Character(Combat):
attack_limit = 10
experience = 0
base_hit_points = 10
def attack(self):
roll = random.randint(1, self.attack_limit)
if self.weapon == 'sword':
roll += 1
elif self.weapon == 'axe':
roll += 2
return roll > 4
def get_weapon(self):
weapon_choice = raw_input("Weapon ([S]word, [A]xe, [B]ow): ").lower()
if weapon_choice in 'sab':
if weapon_choice == 's':
return 'sword'
elif weapon_choice == 'a':
return 'axe'
else:
return 'bow'
else:
return self.get_weapon()
def __init__(self, **kwargs):
self.name = raw_input("Name: ")
self.weapon = self.get_weapon()
self.hit_points = self.base_hit_points
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return '{}, HP: {}, XP: {}.'.format(self.name, self.hit_points, self.experience)
def rest(self):
if self.hit_points < self.base_hit_points:
self.hit_points += 1
def leveled_up(self):
return self.experience >= 5
| mit |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib64/scons-2.1.0/SCons/Tool/dvipdf.py | 21 | 4142 | """SCons.Tool.dvipdf
Tool-specific initialization for dvipdf.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/dvipdf.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Action
import SCons.Defaults
import SCons.Tool.pdf
import SCons.Tool.tex
import SCons.Util
_null = SCons.Scanner.LaTeX._null
def DviPdfPsFunction(XXXDviAction, target = None, source= None, env=None):
"""A builder for DVI files that sets the TEXPICTS environment
variable before running dvi2ps or dvipdf."""
try:
abspath = source[0].attributes.path
except AttributeError :
abspath = ''
saved_env = SCons.Scanner.LaTeX.modify_env_var(env, 'TEXPICTS', abspath)
result = XXXDviAction(target, source, env)
if saved_env is _null:
try:
del env['ENV']['TEXPICTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXPICTS'] = saved_env
return result
def DviPdfFunction(target = None, source= None, env=None):
result = DviPdfPsFunction(PDFAction,target,source,env)
return result
def DviPdfStrFunction(target = None, source= None, env=None):
"""A strfunction for dvipdf that returns the appropriate
command string for the no_exec options."""
if env.GetOption("no_exec"):
result = env.subst('$DVIPDFCOM',0,target,source)
else:
result = ''
return result
PDFAction = None
DVIPDFAction = None
def PDFEmitter(target, source, env):
"""Strips any .aux or .log files from the input source list.
These are created by the TeX Builder that in all likelihood was
used to generate the .dvi file we're using as input, and we only
care about the .dvi file.
"""
def strip_suffixes(n):
return not SCons.Util.splitext(str(n))[1] in ['.aux', '.log']
source = list(filter(strip_suffixes, source))
return (target, source)
def generate(env):
"""Add Builders and construction variables for dvipdf to an Environment."""
global PDFAction
if PDFAction is None:
PDFAction = SCons.Action.Action('$DVIPDFCOM', '$DVIPDFCOMSTR')
global DVIPDFAction
if DVIPDFAction is None:
DVIPDFAction = SCons.Action.Action(DviPdfFunction, strfunction = DviPdfStrFunction)
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.dvi', DVIPDFAction)
bld.add_emitter('.dvi', PDFEmitter)
env['DVIPDF'] = 'dvipdf'
env['DVIPDFFLAGS'] = SCons.Util.CLVar('')
env['DVIPDFCOM'] = 'cd ${TARGET.dir} && $DVIPDF $DVIPDFFLAGS ${SOURCE.file} ${TARGET.file}'
# Deprecated synonym.
env['PDFCOM'] = ['$DVIPDFCOM']
def exists(env):
SCons.Tool.tex.generate_darwin(env)
return env.Detect('dvipdf')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
matrix-org/sydent | tests/test_register.py | 1 | 1427 | # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.trial import unittest
from tests.utils import make_request, make_sydent
class RegisterTestCase(unittest.TestCase):
"""Tests Sydent's register servlet"""
def setUp(self):
# Create a new sydent
self.sydent = make_sydent()
def test_sydent_rejects_invalid_hostname(self):
"""Tests that the /register endpoint rejects an invalid hostname passed as matrix_server_name"""
self.sydent.run()
bad_hostname = "example.com#"
request, channel = make_request(
self.sydent.reactor,
"POST",
"/_matrix/identity/v2/account/register",
content={"matrix_server_name": bad_hostname, "access_token": "foo"},
)
request.render(self.sydent.servlets.registerServlet)
self.assertEqual(channel.code, 400)
| apache-2.0 |
fyfcauc/android_external_chromium-org | third_party/pexpect/ANSI.py | 171 | 12646 | """This implements an ANSI (VT100) terminal emulator as a subclass of screen.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# references:
# http://en.wikipedia.org/wiki/ANSI_escape_code
# http://www.retards.org/terminals/vt102.html
# http://vt100.net/docs/vt102-ug/contents.html
# http://vt100.net/docs/vt220-rm/
# http://www.termsys.demon.co.uk/vtansi.htm
import screen
import FSM
import copy
import string
#
# The 'Do.*' functions are helper functions for the ANSI class.
#
def DoEmit (fsm):
screen = fsm.memory[0]
screen.write_ch(fsm.input_symbol)
def DoStartNumber (fsm):
fsm.memory.append (fsm.input_symbol)
def DoBuildNumber (fsm):
ns = fsm.memory.pop()
ns = ns + fsm.input_symbol
fsm.memory.append (ns)
def DoBackOne (fsm):
screen = fsm.memory[0]
screen.cursor_back ()
def DoBack (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_back (count)
def DoDownOne (fsm):
screen = fsm.memory[0]
screen.cursor_down ()
def DoDown (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_down (count)
def DoForwardOne (fsm):
screen = fsm.memory[0]
screen.cursor_forward ()
def DoForward (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_forward (count)
def DoUpReverse (fsm):
screen = fsm.memory[0]
screen.cursor_up_reverse()
def DoUpOne (fsm):
screen = fsm.memory[0]
screen.cursor_up ()
def DoUp (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_up (count)
def DoHome (fsm):
c = int(fsm.memory.pop())
r = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_home (r,c)
def DoHomeOrigin (fsm):
c = 1
r = 1
screen = fsm.memory[0]
screen.cursor_home (r,c)
def DoEraseDown (fsm):
screen = fsm.memory[0]
screen.erase_down()
def DoErase (fsm):
arg = int(fsm.memory.pop())
screen = fsm.memory[0]
if arg == 0:
screen.erase_down()
elif arg == 1:
screen.erase_up()
elif arg == 2:
screen.erase_screen()
def DoEraseEndOfLine (fsm):
screen = fsm.memory[0]
screen.erase_end_of_line()
def DoEraseLine (fsm):
arg = int(fsm.memory.pop())
screen = fsm.memory[0]
if arg == 0:
screen.erase_end_of_line()
elif arg == 1:
screen.erase_start_of_line()
elif arg == 2:
screen.erase_line()
def DoEnableScroll (fsm):
screen = fsm.memory[0]
screen.scroll_screen()
def DoCursorSave (fsm):
screen = fsm.memory[0]
screen.cursor_save_attrs()
def DoCursorRestore (fsm):
screen = fsm.memory[0]
screen.cursor_restore_attrs()
def DoScrollRegion (fsm):
screen = fsm.memory[0]
r2 = int(fsm.memory.pop())
r1 = int(fsm.memory.pop())
screen.scroll_screen_rows (r1,r2)
def DoMode (fsm):
screen = fsm.memory[0]
mode = fsm.memory.pop() # Should be 4
# screen.setReplaceMode ()
def DoLog (fsm):
screen = fsm.memory[0]
fsm.memory = [screen]
fout = open ('log', 'a')
fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n')
fout.close()
class term (screen.screen):
"""This class is an abstract, generic terminal.
This does nothing. This is a placeholder that
provides a common base class for other terminals
such as an ANSI terminal. """
def __init__ (self, r=24, c=80):
screen.screen.__init__(self, r,c)
class ANSI (term):
"""This class implements an ANSI (VT100) terminal.
It is a stream filter that recognizes ANSI terminal
escape sequences and maintains the state of a screen object. """
def __init__ (self, r=24,c=80):
term.__init__(self,r,c)
#self.screen = screen (24,80)
self.state = FSM.FSM ('INIT',[self])
self.state.set_default_transition (DoLog, 'INIT')
self.state.add_transition_any ('INIT', DoEmit, 'INIT')
self.state.add_transition ('\x1b', 'INIT', None, 'ESC')
self.state.add_transition_any ('ESC', DoLog, 'INIT')
self.state.add_transition ('(', 'ESC', None, 'G0SCS')
self.state.add_transition (')', 'ESC', None, 'G1SCS')
self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT')
self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT')
self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT')
self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT')
self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad.
self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND')
self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT')
self.state.add_transition ('[', 'ESC', None, 'ELB')
# ELB means Escape Left Bracket. That is ^[[
self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT')
self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT')
self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT')
self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT')
self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT')
self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT')
self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT')
self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT')
self.state.add_transition ('m', 'ELB', None, 'INIT')
self.state.add_transition ('?', 'ELB', None, 'MODECRAP')
self.state.add_transition_list (string.digits, 'ELB', DoStartNumber, 'NUMBER_1')
self.state.add_transition_list (string.digits, 'NUMBER_1', DoBuildNumber, 'NUMBER_1')
self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT')
self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT')
self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT')
self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT')
self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT')
self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT')
self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT')
### It gets worse... the 'm' code can have infinite number of
### number;number;number before it. I've never seen more than two,
### but the specs say it's allowed. crap!
self.state.add_transition ('m', 'NUMBER_1', None, 'INIT')
### LED control. Same implementation problem as 'm' code.
self.state.add_transition ('q', 'NUMBER_1', None, 'INIT')
# \E[?47h switch to alternate screen
# \E[?47l restores to normal screen from alternate screen.
self.state.add_transition_list (string.digits, 'MODECRAP', DoStartNumber, 'MODECRAP_NUM')
self.state.add_transition_list (string.digits, 'MODECRAP_NUM', DoBuildNumber, 'MODECRAP_NUM')
self.state.add_transition ('l', 'MODECRAP_NUM', None, 'INIT')
self.state.add_transition ('h', 'MODECRAP_NUM', None, 'INIT')
#RM Reset Mode Esc [ Ps l none
self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON')
self.state.add_transition_any ('SEMICOLON', DoLog, 'INIT')
self.state.add_transition_list (string.digits, 'SEMICOLON', DoStartNumber, 'NUMBER_2')
self.state.add_transition_list (string.digits, 'NUMBER_2', DoBuildNumber, 'NUMBER_2')
self.state.add_transition_any ('NUMBER_2', DoLog, 'INIT')
self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT')
self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT')
self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT')
### It gets worse... the 'm' code can have infinite number of
### number;number;number before it. I've never seen more than two,
### but the specs say it's allowed. crap!
self.state.add_transition ('m', 'NUMBER_2', None, 'INIT')
### LED control. Same problem as 'm' code.
self.state.add_transition ('q', 'NUMBER_2', None, 'INIT')
self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
# Create a state for 'q' and 'm' which allows an infinite number of ignored numbers
self.state.add_transition_any ('SEMICOLON_X', DoLog, 'INIT')
self.state.add_transition_list (string.digits, 'SEMICOLON_X', None, 'NUMBER_X')
self.state.add_transition_any ('NUMBER_X', DoLog, 'INIT')
self.state.add_transition ('m', 'NUMBER_X', None, 'INIT')
self.state.add_transition ('q', 'NUMBER_X', None, 'INIT')
self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
def process (self, c):
self.state.process(c)
def process_list (self, l):
self.write(l)
def write (self, s):
for c in s:
self.process(c)
def flush (self):
pass
def write_ch (self, ch):
"""This puts a character at the current cursor position. The cursor
position is moved forward with wrap-around, but no scrolling is done if
the cursor hits the lower-right corner of the screen. """
#\r and \n both produce a call to cr() and lf(), respectively.
ch = ch[0]
if ch == '\r':
self.cr()
return
if ch == '\n':
self.crlf()
return
if ch == chr(screen.BS):
self.cursor_back()
return
if ch not in string.printable:
fout = open ('log', 'a')
fout.write ('Nonprint: ' + str(ord(ch)) + '\n')
fout.close()
return
self.put_abs(self.cur_r, self.cur_c, ch)
old_r = self.cur_r
old_c = self.cur_c
self.cursor_forward()
if old_c == self.cur_c:
self.cursor_down()
if old_r != self.cur_r:
self.cursor_home (self.cur_r, 1)
else:
self.scroll_up ()
self.cursor_home (self.cur_r, 1)
self.erase_line()
# def test (self):
#
# import sys
# write_text = 'I\'ve got a ferret sticking up my nose.\n' + \
# '(He\'s got a ferret sticking up his nose.)\n' + \
# 'How it got there I can\'t tell\n' + \
# 'But now it\'s there it hurts like hell\n' + \
# 'And what is more it radically affects my sense of smell.\n' + \
# '(His sense of smell.)\n' + \
# 'I can see a bare-bottomed mandril.\n' + \
# '(Slyly eyeing his other nostril.)\n' + \
# 'If it jumps inside there too I really don\'t know what to do\n' + \
# 'I\'ll be the proud posessor of a kind of nasal zoo.\n' + \
# '(A nasal zoo.)\n' + \
# 'I\'ve got a ferret sticking up my nose.\n' + \
# '(And what is worst of all it constantly explodes.)\n' + \
# '"Ferrets don\'t explode," you say\n' + \
# 'But it happened nine times yesterday\n' + \
# 'And I should know for each time I was standing in the way.\n' + \
# 'I\'ve got a ferret sticking up my nose.\n' + \
# '(He\'s got a ferret sticking up his nose.)\n' + \
# 'How it got there I can\'t tell\n' + \
# 'But now it\'s there it hurts like hell\n' + \
# 'And what is more it radically affects my sense of smell.\n' + \
# '(His sense of smell.)'
# self.fill('.')
# self.cursor_home()
# for c in write_text:
# self.write_ch (c)
# print str(self)
#
#if __name__ == '__main__':
# t = ANSI(6,65)
# t.test()
| bsd-3-clause |
Etwigg/Examples | Group Project Website/venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/constants.py | 3008 | 1335 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
| mit |
googleapis/artman | artman/tasks/protoc_tasks.py | 1 | 15154 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks related to protoc"""
import io
import json
import os
import re
from ruamel import yaml
import six
from artman.tasks import task_base
from artman.utils import task_utils
from artman.utils.logger import logger
from artman.utils import protoc_utils
class ProtoDescGenTask(task_base.TaskBase):
"""Generates proto descriptor set"""
default_provides = 'descriptor_set'
def execute(self, src_proto_path, import_proto_path, output_dir,
api_name, api_version, organization_name, toolkit_path,
root_dir, excluded_proto_path=[], proto_deps=[], language='python'):
desc_proto_paths = []
for dep in proto_deps:
if 'proto_path' in dep and dep['proto_path']:
desc_proto_paths.append(os.path.join(root_dir, dep['proto_path']))
desc_protos = list(
protoc_utils.find_protos(src_proto_path + desc_proto_paths,
excluded_proto_path))
header_proto_path = import_proto_path + desc_proto_paths
header_proto_path.extend(src_proto_path)
desc_out_file = task_utils.api_full_name(
api_name, api_version, organization_name) + '.desc'
logger.debug('Compiling descriptors for {0}'.format(desc_protos))
self.exec_command(['mkdir', '-p', output_dir])
proto_params = protoc_utils.PROTO_PARAMS_MAP[language]
proto_compiler_command = proto_params.proto_compiler_command
logger.debug('Using protoc command: {0}'.format(proto_compiler_command))
# DescGen doesn't use group protos by package right now because
# - it doesn't have to
# - and multiple invocation will overwrite the desc_out_file
(common_resources_includes, common_resources_paths) = \
protoc_utils.protoc_common_resources_params(root_dir)
params = proto_params.proto_compiler_command + \
common_resources_includes + \
protoc_utils.protoc_header_params(header_proto_path, toolkit_path) + \
protoc_utils.protoc_desc_params(output_dir, desc_out_file) + \
common_resources_paths + \
desc_protos
self.exec_command(params)
return os.path.join(output_dir, desc_out_file)
class ProtocCodeGenTaskBase(task_base.TaskBase):
"""Generates protos"""
def _execute_proto_codegen(
self, language, src_proto_path, import_proto_path,
pkg_dir, api_name, api_version, organization_name,
toolkit_path, gapic_yaml, root_dir,
gen_proto=False, gen_grpc=False, gen_common_resources=False,
final_src_proto_path=None, final_import_proto_path=None,
excluded_proto_path=[], language_out_override=None):
# Adding 17th parameter is a sin that I commit here just because
# refactoring of this code will never happen.
src_proto_path = final_src_proto_path or src_proto_path
import_proto_path = final_import_proto_path or import_proto_path
proto_params = protoc_utils.PROTO_PARAMS_MAP[language]
if gen_proto:
protoc_proto_params = protoc_utils.protoc_proto_params(
proto_params, pkg_dir, gapic_yaml, with_grpc=True,
language_out_override=language_out_override)
else:
protoc_proto_params = []
if gen_grpc:
protoc_grpc_params = protoc_utils.protoc_grpc_params(
proto_params, pkg_dir, toolkit_path)
else:
protoc_grpc_params = []
if gen_common_resources:
(common_resources_includes, common_resources_paths) = \
protoc_utils.protoc_common_resources_params(root_dir)
protoc_plugin_params = protoc_utils.protoc_plugin_params(
proto_params, pkg_dir, gapic_yaml)
else:
(common_resources_includes, common_resources_paths) = ([], [])
protoc_plugin_params = []
if not protoc_proto_params \
and not protoc_grpc_params \
and not protoc_plugin_params:
return pkg_dir
# protoc-gen-go has some peculiarities:
# It can only compile one package per invocation. So, we need to split
# proto files by packages.
#
# The order of the input files affects comments and internal variables.
# While this doesn't affect the correctness of the result, we sort
# proto files for reproducibility.
# For other languages, we'll pass all proto files into the same protoc
# invocation (PHP especially needs that).
all_protos = protoc_utils.find_protos(src_proto_path, excluded_proto_path)
if language == "go":
protos_map = protoc_utils.group_by_go_package(all_protos)
else:
protos_map = { "": all_protos }
for (dirname, protos) in protos_map.items():
# It is possible to get duplicate protos. De-dupe them.
protos = sorted(set(protos))
command_params = proto_params.proto_compiler_command + \
common_resources_includes + \
protoc_utils.protoc_header_params(
import_proto_path + src_proto_path, toolkit_path) + \
protoc_proto_params + \
protoc_grpc_params + \
protoc_plugin_params + \
common_resources_paths + \
protos
# Execute protoc.
self.exec_command(command_params)
return pkg_dir
class ProtoCodeGenTask(ProtocCodeGenTaskBase):
default_provides = 'proto_code_dir'
"""Generates protos"""
def execute(self, language, src_proto_path, import_proto_path,
output_dir, api_name, api_version, organization_name,
toolkit_path, gapic_yaml, root_dir, final_src_proto_path=None,
final_import_proto_path=None, excluded_proto_path=[],
language_out_override=None):
pkg_dir = protoc_utils.prepare_proto_pkg_dir(
output_dir, api_name, api_version, organization_name, language)
return self._execute_proto_codegen(
language, src_proto_path, import_proto_path, pkg_dir,
api_name, api_version, organization_name, toolkit_path,
gapic_yaml, root_dir, gen_proto=True,
final_src_proto_path=final_src_proto_path,
final_import_proto_path=final_import_proto_path,
excluded_proto_path=excluded_proto_path,
language_out_override=language_out_override)
class ResourceNameGenTask(ProtocCodeGenTaskBase):
default_provides = 'proto_code_dir'
"""Generates protos"""
def execute(self, language, src_proto_path, import_proto_path,
output_dir, api_name, api_version, organization_name,
toolkit_path, gapic_yaml, root_dir, final_src_proto_path=None,
final_import_proto_path=None, excluded_proto_path=[],
language_out_override=None):
pkg_dir = protoc_utils.prepare_proto_pkg_dir(
output_dir, api_name, api_version, organization_name, language)
return self._execute_proto_codegen(
language, src_proto_path, import_proto_path, pkg_dir,
api_name, api_version, organization_name, toolkit_path,
gapic_yaml, root_dir, gen_common_resources=True,
final_src_proto_path=final_src_proto_path,
final_import_proto_path=final_import_proto_path,
excluded_proto_path=excluded_proto_path,
language_out_override=language_out_override)
class GrpcCodeGenTask(ProtocCodeGenTaskBase):
default_provides = 'grpc_code_dir'
"""Generates the gRPC client library"""
def execute(self, language, src_proto_path, import_proto_path,
toolkit_path, output_dir, api_name, api_version,
organization_name, gapic_yaml, root_dir, final_src_proto_path=None,
final_import_proto_path=None, excluded_proto_path=[],
language_out_override=None):
pkg_dir = protoc_utils.prepare_grpc_pkg_dir(
output_dir, api_name, api_version, organization_name, language)
return self._execute_proto_codegen(
language, src_proto_path, import_proto_path, pkg_dir,
api_name, api_version, organization_name, toolkit_path,
gapic_yaml, root_dir, gen_grpc=True,
final_src_proto_path=final_src_proto_path,
final_import_proto_path=final_import_proto_path,
excluded_proto_path=excluded_proto_path,
language_out_override=language_out_override)
class ProtoAndGrpcCodeGenTask(ProtocCodeGenTaskBase):
default_provides = 'grpc_code_dir'
"""Generates protos and the gRPC client library"""
def execute(self, language, src_proto_path, import_proto_path,
toolkit_path, output_dir, api_name, api_version,
organization_name, gapic_yaml, root_dir, final_src_proto_path=None,
final_import_proto_path=None, excluded_proto_path=[],
language_out_override=None):
pkg_dir = protoc_utils.prepare_grpc_pkg_dir(
output_dir, api_name, api_version, organization_name, language)
return self._execute_proto_codegen(
language, src_proto_path, import_proto_path, pkg_dir,
api_name, api_version, organization_name, toolkit_path,
gapic_yaml, root_dir, gen_proto=True, gen_grpc=True,
final_src_proto_path=final_src_proto_path,
final_import_proto_path=final_import_proto_path,
excluded_proto_path=excluded_proto_path,
language_out_override=language_out_override)
class GoCopyTask(task_base.TaskBase):
def execute(self, gapic_code_dir, grpc_code_dir):
for entry in os.listdir(grpc_code_dir):
src_path = os.path.join(grpc_code_dir, entry)
self.exec_command([
'cp', '-rf', src_path, gapic_code_dir])
class RubyGrpcCopyTask(task_base.TaskBase):
"""Copies the generated protos and gRPC client library to
the gapic_code_dir/lib.
"""
def execute(self, api_name, api_version, language, organization_name,
output_dir, gapic_code_dir, grpc_code_dir):
final_output_dir = os.path.join(gapic_code_dir, 'lib')
logger.info('Copying %s/* to %s.' % (grpc_code_dir, final_output_dir))
if not os.path.exists(final_output_dir):
self.exec_command(['mkdir', '-p', final_output_dir])
for entry in sorted(os.listdir(grpc_code_dir)):
src_path = os.path.join(grpc_code_dir, entry)
self.exec_command([
'cp', '-rf', src_path, final_output_dir])
class JavaProtoCopyTask(task_base.TaskBase):
"""Copies the .proto files into the grpc_code_dir directory
"""
def execute(self, src_proto_path, proto_code_dir, excluded_proto_path=[]):
grpc_proto_dir = os.path.join(proto_code_dir, 'src', 'main', 'proto')
for proto_path in src_proto_path:
index = protoc_utils.find_google_dir_index(proto_path)
for src_proto_file in protoc_utils.find_protos(
[proto_path], excluded_proto_path):
relative_proto_file = src_proto_file[index:]
dst_proto_file = os.path.join(
grpc_proto_dir, relative_proto_file)
self.exec_command(
['mkdir', '-p', os.path.dirname(dst_proto_file)])
self.exec_command(['cp', src_proto_file, dst_proto_file])
class PhpGrpcMoveTask(task_base.TaskBase):
"""Moves the generated protos and gRPC client library to
the gapic_code_dir/proto directory.
"""
default_provides = 'grpc_code_dir'
def execute(self, grpc_code_dir, gapic_code_dir=None):
if not gapic_code_dir:
return grpc_code_dir
final_output_dir = os.path.join(gapic_code_dir, 'proto')
if not os.path.exists(final_output_dir):
self.exec_command(['mkdir', '-p', final_output_dir])
logger.info('Moving %s/* to %s.' % (grpc_code_dir, final_output_dir))
for entry in sorted(os.listdir(grpc_code_dir)):
src_path = os.path.join(grpc_code_dir, entry)
self.exec_command([
'mv', src_path, os.path.join(final_output_dir, entry)])
self.exec_command([
'rm', '-r', grpc_code_dir])
return final_output_dir
# TODO (michaelbausor): Once correct naming is supported in
# gRPC, we should remove this.
class PhpGrpcRenameTask(task_base.TaskBase):
"""Rename references to proto files in the gRPC stub."""
def execute(self, grpc_code_dir):
for filename in protoc_utils.list_files_recursive(grpc_code_dir):
if filename.endswith('GrpcClient.php'):
logger.info('Performing replacements in: %s' % (filename,))
with io.open(filename, encoding='UTF-8') as f:
contents = f.read()
contents = protoc_utils.php_proto_rename(contents)
with io.open(filename, 'w', encoding='UTF-8') as f:
f.write(contents)
class NodeJsProtoCopyTask(task_base.TaskBase):
"""Copies the .proto files into the gapic_code_dir/proto directory
and compiles these proto files to protobufjs JSON.
"""
def execute(self, gapic_code_dir, src_proto_path, excluded_proto_path=[]):
final_output_dir = os.path.join(gapic_code_dir, 'protos')
src_dir = os.path.join(gapic_code_dir, 'src')
proto_files = []
for proto_path in src_proto_path:
index = protoc_utils.find_google_dir_index(proto_path)
for src_proto_file in protoc_utils.find_protos(
[proto_path], excluded_proto_path):
relative_proto_file = src_proto_file[index:]
proto_files.append(relative_proto_file)
dst_proto_file = os.path.join(
final_output_dir, relative_proto_file)
dst_proto_dir = os.path.dirname(dst_proto_file)
if not os.path.exists(dst_proto_dir):
self.exec_command(['mkdir', '-p', dst_proto_dir])
self.exec_command(['cp', src_proto_file, dst_proto_file])
# Execute compileProtos from Docker image (a part of from google-gax)
cwd = os.getcwd()
os.chdir(gapic_code_dir)
self.exec_command(['compileProtos', './src'])
os.chdir(cwd)
| apache-2.0 |
andrew-pa/limbo-android | jni/qemu/QMP/qmp.py | 78 | 4958 | # QEMU Monitor Protocol Python class
#
# Copyright (C) 2009, 2010 Red Hat Inc.
#
# Authors:
# Luiz Capitulino <lcapitulino@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import json
import errno
import socket
class QMPError(Exception):
pass
class QMPConnectError(QMPError):
pass
class QMPCapabilitiesError(QMPError):
pass
class QEMUMonitorProtocol:
def __init__(self, address, server=False):
"""
Create a QEMUMonitorProtocol class.
@param address: QEMU address, can be either a unix socket path (string)
or a tuple in the form ( address, port ) for a TCP
connection
@param server: server mode listens on the socket (bool)
@raise socket.error on socket connection errors
@note No connection is established, this is done by the connect() or
accept() methods
"""
self.__events = []
self.__address = address
self.__sock = self.__get_sock()
if server:
self.__sock.bind(self.__address)
self.__sock.listen(1)
def __get_sock(self):
if isinstance(self.__address, tuple):
family = socket.AF_INET
else:
family = socket.AF_UNIX
return socket.socket(family, socket.SOCK_STREAM)
def __negotiate_capabilities(self):
self.__sockfile = self.__sock.makefile()
greeting = self.__json_read()
if greeting is None or not greeting.has_key('QMP'):
raise QMPConnectError
# Greeting seems ok, negotiate capabilities
resp = self.cmd('qmp_capabilities')
if "return" in resp:
return greeting
raise QMPCapabilitiesError
def __json_read(self, only_event=False):
while True:
data = self.__sockfile.readline()
if not data:
return
resp = json.loads(data)
if 'event' in resp:
self.__events.append(resp)
if not only_event:
continue
return resp
error = socket.error
def connect(self):
"""
Connect to the QMP Monitor and perform capabilities negotiation.
@return QMP greeting dict
@raise socket.error on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
"""
self.__sock.connect(self.__address)
return self.__negotiate_capabilities()
def accept(self):
"""
Await connection from QMP Monitor and perform capabilities negotiation.
@return QMP greeting dict
@raise socket.error on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
"""
self.__sock, _ = self.__sock.accept()
return self.__negotiate_capabilities()
def cmd_obj(self, qmp_cmd):
"""
Send a QMP command to the QMP Monitor.
@param qmp_cmd: QMP command to be sent as a Python dict
@return QMP response as a Python dict or None if the connection has
been closed
"""
try:
self.__sock.sendall(json.dumps(qmp_cmd))
except socket.error, err:
if err[0] == errno.EPIPE:
return
raise socket.error(err)
return self.__json_read()
def cmd(self, name, args=None, id=None):
"""
Build a QMP command and send it to the QMP Monitor.
@param name: command name (string)
@param args: command arguments (dict)
@param id: command id (dict, list, string or int)
"""
qmp_cmd = { 'execute': name }
if args:
qmp_cmd['arguments'] = args
if id:
qmp_cmd['id'] = id
return self.cmd_obj(qmp_cmd)
def command(self, cmd, **kwds):
ret = self.cmd(cmd, kwds)
if ret.has_key('error'):
raise Exception(ret['error']['desc'])
return ret['return']
def get_events(self, wait=False):
"""
Get a list of available QMP events.
@param wait: block until an event is available (bool)
"""
self.__sock.setblocking(0)
try:
self.__json_read()
except socket.error, err:
if err[0] == errno.EAGAIN:
# No data available
pass
self.__sock.setblocking(1)
if not self.__events and wait:
self.__json_read(only_event=True)
return self.__events
def clear_events(self):
"""
Clear current list of pending events.
"""
self.__events = []
def close(self):
self.__sock.close()
self.__sockfile.close()
| gpl-2.0 |
4dsolutions/Python5 | model_property.py | 1 | 1640 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 7, 2016
@author: Kirby Urner
This class comes directly from the Python documentation.
Property emulates, in pure Python, what the __builtin__
property type does.
Note use of Descriptor special names __set__, __get__, plus
__delete__, which will in turn trigger the methods saved as
fget, fset, fdel.
Used with decorator syntax, with helper methods e.g.
setter, is most typical. See prop_circle.py
"""
class Property(object):
"Emulate PyProperty_Type() in Objects/descrobject.c"
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
return self.fget(obj)
def __set__(self, obj, value):
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(obj, value)
def __delete__(self, obj):
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(obj)
def getter(self, fget):
return type(self)(fget, self.fset, self.fdel, self.__doc__)
def setter(self, fset):
return type(self)(self.fget, fset, self.fdel, self.__doc__)
def deleter(self, fdel):
# type(self) and Property are synonymous
return Property(self.fget, self.fset, fdel, self.__doc__)
| mit |
listamilton/supermilton.repository | script.areswizard/requests/packages/chardet/euctwfreq.py | 3133 | 34872 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
| gpl-2.0 |
mattvick/phantomjs | src/qt/qtwebkit/Tools/BuildSlaveSupport/build.webkit.org-config/htdigestparser.py | 123 | 2341 | # Copyright (C) 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""htdigestparser - a parser for htdigest files"""
import hashlib
import string
class HTDigestParser(object):
def __init__(self, digest_file):
self._entries = self.parse_file(digest_file)
def authenticate(self, username, realm, password):
hashed_password = hashlib.md5(':'.join((username, realm, password))).hexdigest()
return [username, realm, hashed_password] in self.entries()
def entries(self):
return self._entries
def parse_file(self, digest_file):
entries = [line.rstrip().split(':') for line in digest_file]
# Perform some sanity-checking to ensure the file is valid.
valid_characters = set(string.hexdigits)
for entry in entries:
if len(entry) != 3:
return []
hashed_password = entry[-1]
if len(hashed_password) != 32:
return []
if not set(hashed_password).issubset(valid_characters):
return []
return entries
| bsd-3-clause |
facebookexperimental/eden | eden/fs/cli/daemon.py | 1 | 10314 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# pyre-strict
import os
import stat
import subprocess
import sys
from typing import Dict, List, Optional, Tuple
from . import daemon_util, proc_utils as proc_utils_mod
from .config import EdenInstance
from .util import ShutdownError, poll_until, print_stderr
# The amount of time to wait for the edenfs process to exit after we send SIGKILL.
# We normally expect the process to be killed and reaped fairly quickly in this
# situation. However, in rare cases on very heavily loaded systems it can take a while
# for init/systemd to wait on the process and for everything to be fully cleaned up.
# Therefore we wait up to 30 seconds by default. (I've seen it take up to a couple
# minutes on systems with extremely high disk I/O load.)
#
# If this timeout does expire this can cause `edenfsctl restart` to fail after
# killing the old process but without starting the new process, which is
# generally undesirable if we can avoid it.
DEFAULT_SIGKILL_TIMEOUT = 30.0
def wait_for_process_exit(pid: int, timeout: float) -> bool:
"""Wait for the specified process ID to exit.
Returns True if the process exits within the specified timeout, and False if the
timeout expires while the process is still alive.
"""
proc_utils: proc_utils_mod.ProcUtils = proc_utils_mod.new()
def process_exited() -> Optional[bool]:
if not proc_utils.is_process_alive(pid):
return True
return None
try:
poll_until(process_exited, timeout=timeout)
return True
except TimeoutError:
return False
def wait_for_shutdown(
pid: int, timeout: float, kill_timeout: float = DEFAULT_SIGKILL_TIMEOUT
) -> bool:
"""Wait for a process to exit.
If it does not exit within `timeout` seconds kill it with SIGKILL.
Returns True if the process exited on its own or False if it only exited
after SIGKILL.
Throws a ShutdownError if we failed to kill the process with SIGKILL
(either because we failed to send the signal, or if the process still did
not exit within kill_timeout seconds after sending SIGKILL).
"""
# Wait until the process exits on its own.
if wait_for_process_exit(pid, timeout):
return True
# client.shutdown() failed to terminate the process within the specified
# timeout. Take a more aggressive approach by sending SIGKILL.
print_stderr(
"error: sent shutdown request, but edenfs did not exit "
"within {} seconds. Attempting SIGKILL.",
timeout,
)
sigkill_process(pid, timeout=kill_timeout)
return False
def sigkill_process(pid: int, timeout: float = DEFAULT_SIGKILL_TIMEOUT) -> None:
"""Send SIGKILL to a process, and wait for it to exit.
If timeout is greater than 0, this waits for the process to exit after sending the
signal. Throws a ShutdownError exception if the process does not exit within the
specified timeout.
Returns successfully if the specified process did not exist in the first place.
This is done to handle situations where the process exited on its own just before we
could send SIGKILL.
"""
proc_utils: proc_utils_mod.ProcUtils = proc_utils_mod.new()
try:
proc_utils.kill_process(pid)
except PermissionError:
raise ShutdownError(
"Received a permissions when attempting to kill edenfs. "
"Perhaps edenfs failed to drop root privileges properly?"
)
if timeout <= 0:
return
if not wait_for_process_exit(pid, timeout):
raise ShutdownError(
"edenfs process {} did not terminate within {} seconds of "
"sending SIGKILL.".format(pid, timeout)
)
async def start_edenfs_service(
instance: EdenInstance,
daemon_binary: Optional[str] = None,
edenfs_args: Optional[List[str]] = None,
) -> int:
"""Start the edenfs daemon."""
if instance.should_use_experimental_systemd_mode():
from . import systemd_service
return await systemd_service.start_systemd_service(
instance=instance, daemon_binary=daemon_binary, edenfs_args=edenfs_args
)
return _start_edenfs_service(
instance=instance,
daemon_binary=daemon_binary,
edenfs_args=edenfs_args,
takeover=False,
)
def gracefully_restart_edenfs_service(
instance: EdenInstance,
daemon_binary: Optional[str] = None,
edenfs_args: Optional[List[str]] = None,
) -> int:
"""Gracefully restart the EdenFS service"""
if instance.should_use_experimental_systemd_mode():
raise NotImplementedError("TODO(T33122320): Implement 'eden start --takeover'")
return _start_edenfs_service(
instance=instance,
daemon_binary=daemon_binary,
edenfs_args=edenfs_args,
takeover=True,
)
def _start_edenfs_service(
instance: EdenInstance,
daemon_binary: Optional[str] = None,
edenfs_args: Optional[List[str]] = None,
takeover: bool = False,
) -> int:
"""Get the command and environment to use to start edenfs."""
daemon_binary = daemon_util.find_daemon_binary(daemon_binary)
cmd = get_edenfs_cmd(instance, daemon_binary)
if takeover:
cmd.append("--takeover")
if edenfs_args:
cmd.extend(edenfs_args)
eden_env = get_edenfs_environment()
# Wrap the command in sudo, if necessary
cmd, eden_env = prepare_edenfs_privileges(daemon_binary, cmd, eden_env)
creation_flags = 0
if sys.platform == "win32":
CREATE_NO_WINDOW = getattr(subprocess, "CREATE_NO_WINDOW", 0x08000000)
creation_flags = CREATE_NO_WINDOW
return subprocess.call(
cmd, stdin=subprocess.DEVNULL, env=eden_env, creationflags=creation_flags
)
def get_edenfs_cmd(instance: EdenInstance, daemon_binary: str) -> List[str]:
"""Get the command line arguments to use to start the edenfs daemon."""
cmd = [
daemon_binary,
"--edenfs",
"--edenfsctlPath",
os.environ.get("EDENFS_CLI_PATH", os.path.abspath(sys.argv[0])),
"--edenDir",
str(instance.state_dir),
"--etcEdenDir",
str(instance.etc_eden_dir),
"--configPath",
str(instance.user_config_path),
]
return cmd
def prepare_edenfs_privileges(
daemon_binary: str, cmd: List[str], env: Dict[str, str]
) -> Tuple[List[str], Dict[str, str]]:
"""Update the EdenFS command and environment settings in order to run it as root.
This wraps the command using sudo, if necessary.
"""
# Nothing to do on Windows
if sys.platform == "win32":
return (cmd, env)
# If we already have root privileges we don't need to do anything.
if os.geteuid() == 0:
return (cmd, env)
# If the EdenFS binary is installed as setuid root we don't need to use sudo.
s = os.stat(daemon_binary)
if s.st_uid == 0 and (s.st_mode & stat.S_ISUID):
return (cmd, env)
# If we're still here we need to run edenfs under sudo
sudo_cmd = ["/usr/bin/sudo"]
# Add environment variable settings
# Depending on the sudo configuration, these may not
# necessarily get passed through automatically even when
# using "sudo -E".
for key, value in env.items():
sudo_cmd.append("%s=%s" % (key, value))
cmd = sudo_cmd + cmd
return cmd, env
def get_edenfs_environment() -> Dict[str, str]:
"""Get the environment to use to start the edenfs daemon."""
eden_env = {}
if sys.platform != "win32":
# Reset $PATH to the following contents, so that everyone has the
# same consistent settings.
path_dirs = ["/opt/facebook/hg/bin", "/usr/local/bin", "/bin", "/usr/bin"]
eden_env["PATH"] = ":".join(path_dirs)
else:
# On Windows, copy the existing PATH as it's not clear what locations
# are needed.
eden_env["PATH"] = os.environ["PATH"]
if sys.platform == "darwin":
# Prevent warning on mac, which will crash eden:
# +[__NSPlaceholderDate initialize] may have been in progress in
# another thread when fork() was called.
eden_env["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
# Preserve the following environment settings
preserve = [
"USER",
"LOGNAME",
"HOME",
"EMAIL",
"NAME",
"ASAN_OPTIONS",
# When we import data from mercurial, the remotefilelog extension
# may need to SSH to a remote mercurial server to get the file
# contents. Preserve SSH environment variables needed to do this.
"SSH_AUTH_SOCK",
"SSH_AGENT_PID",
"KRB5CCNAME",
"SANDCASTLE_INSTANCE_ID",
"SCRATCH_CONFIG_PATH",
# These environment variables are used by Corp2Prod (C2P) Secure Thrift
# clients to get the user certificates for authentication. (We use
# C2P Secure Thrift to fetch metadata from SCS).
"THRIFT_TLS_CL_CERT_PATH",
"THRIFT_TLS_CL_KEY_PATH",
]
if sys.platform == "win32":
preserve += [
"APPDATA",
"SYSTEMROOT",
"USERPROFILE",
"USERNAME",
"PROGRAMDATA",
"LOCALAPPDATA",
]
for name, value in os.environ.items():
# Preserve any environment variable starting with "TESTPILOT_".
# TestPilot uses a few environment variables to keep track of
# processes started during test runs, so it can track down and kill
# runaway processes that weren't cleaned up by the test itself.
# We want to make sure this behavior works during the eden
# integration tests.
# Similarly, we want to preserve EDENFS_ env vars which are
# populated by our own test infra to relay paths to important
# build artifacts in our build tree.
if name.startswith("TESTPILOT_") or name.startswith("EDENFS_"):
eden_env[name] = value
elif name in preserve:
eden_env[name] = value
else:
# Drop any environment variable not matching the above cases
pass
return eden_env
| gpl-2.0 |
Infixz/BlogCatke | virtualenv.bundle/requests/packages/chardet2/sbcharsetprober.py | 25 | 4689 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
self._mReversed = reversed # TRUE if we need to reverse every pair in the model lookup
self._mNameProber = nameProber # Optional auxiliary prober for name decision
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mLastOrder = 255 # char order of last character
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
self._mFreqChar = 0 # characters that fall in our sampling range
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][c]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
self._mSeqCounters[self._mModel['precedenceMatrix'][(self._mLastOrder * SAMPLE_SIZE) + order]] += 1
else: # reverse the order of the letters in the lookup
self._mSeqCounters[self._mModel['precedenceMatrix'][(order * SAMPLE_SIZE) + self._mLastOrder]] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a winner\n' % (self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative shortcut threshhold %s\n' % (self._mModel['charsetName'], cf, NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
# print self._mSeqCounters[POSITIVE_CAT], self._mTotalSeqs, self._mModel['mTypicalPositiveRatio']
r = (1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs / self._mModel['mTypicalPositiveRatio']
# print r, self._mFreqChar, self._mTotalChar
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| apache-2.0 |
Arctan-Open-Source/arclaunch | test/gtest/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
thnee/ansible | test/units/modules/network/f5/test_bigip_file_copy.py | 22 | 3619 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_file_copy import ApiParameters
from library.modules.bigip_file_copy import IFileManager
from library.modules.bigip_file_copy import ModuleParameters
from library.modules.bigip_file_copy import ModuleManager
from library.modules.bigip_file_copy import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_file_copy import ApiParameters
from ansible.modules.network.f5.bigip_file_copy import IFileManager
from ansible.modules.network.f5.bigip_file_copy import ModuleParameters
from ansible.modules.network.f5.bigip_file_copy import ModuleManager
from ansible.modules.network.f5.bigip_file_copy import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
source='file.txt',
force=True
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.source == 'file.txt'
assert p.force is True
def test_api_parameters(self):
args = load_fixture('load_sys_file_external-monitor_1.json')
p = ApiParameters(params=args)
assert p.checksum == '0c78e6641632e47d11802b29cfd119d2233cb80a'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
source='file.txt',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
tm = IFileManager(module=module)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
tm.upload_to_device = Mock(return_value=True)
tm.remove_uploaded_file_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
dac284/ud032 | Lesson_5_Analyzing_Data/14-Using_push/push.py | 9 | 2042 | #!/usr/bin/env python
"""
$push is similar to $addToSet. The difference is that rather than accumulating only unique values
it aggregates all values into an array.
Using an aggregation query, count the number of tweets for each user. In the same $group stage,
use $push to accumulate all the tweet texts for each user. Limit your output to the 5 users
with the most tweets.
Your result documents should include only the fields:
"_id" (screen name of user),
"count" (number of tweets found for the user),
"tweet_texts" (a list of the tweet texts found for the user).
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used in
examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
pipeline = [ ]
return pipeline
def aggregate(db, pipeline):
result = db.tweets.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('twitter')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
assert len(result["result"]) == 5
assert result["result"][0]["count"] > result["result"][4]["count"]
import pprint
pprint.pprint(result)
| agpl-3.0 |
DarthStrom/python_koans | python2/runner/sensei.py | 43 | 9863 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os
import glob
import helper
from mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
from libs.colorama import init, Fore, Style
init() # init colorama
class Sensei(MockableTestResult):
def __init__(self, stream):
unittest.TestResult.__init__(self)
self.stream = stream
self.prevTestClassName = None
self.tests = path_to_enlightenment.koans()
self.pass_count = 0
self.lesson_pass_count = 0
self.all_lessons = None
def startTest(self, test):
MockableTestResult.startTest(self, test)
if helper.cls_name(test) != self.prevTestClassName:
self.prevTestClassName = helper.cls_name(test)
if not self.failures:
self.stream.writeln()
self.stream.writeln("{0}{1}Thinking {2}".format(
Fore.RESET, Style.NORMAL, helper.cls_name(test)))
if helper.cls_name(test) != 'AboutAsserts':
self.lesson_pass_count += 1
def addSuccess(self, test):
if self.passesCount():
MockableTestResult.addSuccess(self, test)
self.stream.writeln( \
" {0}{1}{2} has expanded your awareness.{3}{4}" \
.format(Fore.GREEN, Style.BRIGHT, test._testMethodName, \
Fore.RESET, Style.NORMAL))
self.pass_count += 1
def addError(self, test, err):
# Having 1 list for errors and 1 list for failures would mess with
# the error sequence
self.addFailure(test, err)
def passesCount(self):
return not (self.failures and helper.cls_name(self.failures[0][0]) !=
self.prevTestClassName)
def addFailure(self, test, err):
MockableTestResult.addFailure(self, test, err)
def sortFailures(self, testClassName):
table = list()
for test, err in self.failures:
if helper.cls_name(test) == testClassName:
m = re.search("(?<= line )\d+" ,err)
if m:
tup = (int(m.group(0)), test, err)
table.append(tup)
if table:
return sorted(table)
else:
return None
def firstFailure(self):
if not self.failures: return None
table = self.sortFailures(helper.cls_name(self.failures[0][0]))
if table:
return (table[0][1], table[0][2])
else:
return None
def learn(self):
self.errorReport()
self.stream.writeln("")
self.stream.writeln("")
self.stream.writeln(self.report_progress())
if self.failures:
self.stream.writeln(self.report_remaining())
self.stream.writeln("")
self.stream.writeln(self.say_something_zenlike())
if self.failures: sys.exit(-1)
self.stream.writeln(
"\n{0}**************************************************" \
.format(Fore.RESET))
self.stream.writeln("\n{0}That was the last one, well done!" \
.format(Fore.MAGENTA))
self.stream.writeln(
"\nIf you want more, take a look at about_extra_credit_task.py")
def errorReport(self):
problem = self.firstFailure()
if not problem: return
test, err = problem
self.stream.writeln(" {0}{1}{2} has damaged your "
"karma.".format(Fore.RED, Style.BRIGHT, test._testMethodName))
self.stream.writeln("\n{0}{1}You have not yet reached enlightenment ..." \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}".format(Fore.RED, \
Style.BRIGHT, self.scrapeAssertionError(err)))
self.stream.writeln("")
self.stream.writeln("{0}{1}Please meditate on the following code:" \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}{3}{4}".format(Fore.YELLOW, Style.BRIGHT, \
self.scrapeInterestingStackDump(err), Fore.RESET, Style.NORMAL))
def scrapeAssertionError(self, err):
if not err: return ""
error_text = ""
count = 0
for line in err.splitlines():
m = re.search("^[^^ ].*$",line)
if m and m.group(0):
count+=1
if count>1:
error_text += (" " + line.strip()).rstrip() + '\n'
return error_text.strip('\n')
def scrapeInterestingStackDump(self, err):
if not err:
return ""
lines = err.splitlines()
sep = '@@@@@SEP@@@@@'
stack_text = ""
for line in lines:
m = re.search("^ File .*$",line)
if m and m.group(0):
stack_text += '\n' + line
m = re.search("^ \w(\w)+.*$",line)
if m and m.group(0):
stack_text += sep + line
lines = stack_text.splitlines()
stack_text = ""
for line in lines:
m = re.search("^.*[/\\\\]koans[/\\\\].*$",line)
if m and m.group(0):
stack_text += line + '\n'
stack_text = stack_text.replace(sep, '\n').strip('\n')
stack_text = re.sub(r'(about_\w+.py)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
stack_text = re.sub(r'(line \d+)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
return stack_text
def report_progress(self):
return "You have completed {0} koans and " \
"{1} lessons.".format(
self.pass_count,
self.lesson_pass_count)
def report_remaining(self):
koans_remaining = self.total_koans() - self.pass_count
lessons_remaining = self.total_lessons() - self.lesson_pass_count
return "You are now {0} koans and {1} lessons away from " \
"reaching enlightenment.".format(
koans_remaining,
lessons_remaining)
# Hat's tip to Tim Peters for the zen statements from The 'Zen
# of Python' (http://www.python.org/dev/peps/pep-0020/)
#
# Also a hat's tip to Ara T. Howard for the zen statements from his
# metakoans Ruby Quiz (http://rubyquiz.com/quiz67.html) and
# Edgecase's later permutation in the Ruby Koans
def say_something_zenlike(self):
if self.failures:
turn = self.pass_count % 37
zenness = "";
if turn == 0:
zenness = "Beautiful is better than ugly."
elif turn == 1 or turn == 2:
zenness = "Explicit is better than implicit."
elif turn == 3 or turn == 4:
zenness = "Simple is better than complex."
elif turn == 5 or turn == 6:
zenness = "Complex is better than complicated."
elif turn == 7 or turn == 8:
zenness = "Flat is better than nested."
elif turn == 9 or turn == 10:
zenness = "Sparse is better than dense."
elif turn == 11 or turn == 12:
zenness = "Readability counts."
elif turn == 13 or turn == 14:
zenness = "Special cases aren't special enough to " \
"break the rules."
elif turn == 15 or turn == 16:
zenness = "Although practicality beats purity."
elif turn == 17 or turn == 18:
zenness = "Errors should never pass silently."
elif turn == 19 or turn == 20:
zenness = "Unless explicitly silenced."
elif turn == 21 or turn == 22:
zenness = "In the face of ambiguity, refuse the " \
"temptation to guess."
elif turn == 23 or turn == 24:
zenness = "There should be one-- and preferably only " \
"one --obvious way to do it."
elif turn == 25 or turn == 26:
zenness = "Although that way may not be obvious at " \
"first unless you're Dutch."
elif turn == 27 or turn == 28:
zenness = "Now is better than never."
elif turn == 29 or turn == 30:
zenness = "Although never is often better than right " \
"now."
elif turn == 31 or turn == 32:
zenness = "If the implementation is hard to explain, " \
"it's a bad idea."
elif turn == 33 or turn == 34:
zenness = "If the implementation is easy to explain, " \
"it may be a good idea."
else:
zenness = "Namespaces are one honking great idea -- " \
"let's do more of those!"
return "{0}{1}{2}{3}".format(Fore.CYAN, zenness, Fore.RESET, Style.NORMAL);
else:
return "{0}Nobody ever expects the Spanish Inquisition." \
.format(Fore.CYAN)
# Hopefully this will never ever happen!
return "The temple is collapsing! Run!!!"
def total_lessons(self):
all_lessons = self.filter_all_lessons()
if all_lessons:
return len(all_lessons)
else:
return 0
def total_koans(self):
return self.tests.countTestCases()
def filter_all_lessons(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
if not self.all_lessons:
self.all_lessons = glob.glob('{0}/../koans/about*.py'.format(cur_dir))
self.all_lessons = filter(lambda filename:
"about_extra_credit" not in filename,
self.all_lessons)
return self.all_lessons
| mit |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/Sphinx-1.2.2-py2.7.egg/sphinx/transforms.py | 3 | 19071 | # -*- coding: utf-8 -*-
"""
sphinx.transforms
~~~~~~~~~~~~~~~~~
Docutils transforms used by Sphinx when reading documents.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from docutils import nodes
from docutils.utils import new_document, relative_path
from docutils.parsers.rst import Parser as RSTParser
from docutils.transforms import Transform
from docutils.transforms.parts import ContentsFilter
from sphinx import addnodes
from sphinx.locale import _, init as init_locale
from sphinx.util import split_index_msg
from sphinx.util.nodes import traverse_translatable_index, extract_messages
from sphinx.util.osutil import ustrftime, find_catalog
from sphinx.util.compat import docutils_version
from sphinx.util.pycompat import all
from sphinx.domains.std import (
make_term_from_paragraph_node,
make_termnodes_from_paragraph_node,
)
default_substitutions = set([
'version',
'release',
'today',
])
class DefaultSubstitutions(Transform):
"""
Replace some substitutions if they aren't defined in the document.
"""
# run before the default Substitutions
default_priority = 210
def apply(self):
config = self.document.settings.env.config
# only handle those not otherwise defined in the document
to_handle = default_substitutions - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
text = config[refname]
if refname == 'today' and not text:
# special handling: can also specify a strftime format
text = ustrftime(config.today_fmt or _('%B %d, %Y'))
ref.replace_self(nodes.Text(text, text))
class MoveModuleTargets(Transform):
"""
Move module targets that are the first thing in a section to the section
title.
XXX Python specific
"""
default_priority = 210
def apply(self):
for node in self.document.traverse(nodes.target):
if not node['ids']:
continue
if (node.has_key('ismod') and
node.parent.__class__ is nodes.section and
# index 0 is the section title node
node.parent.index(node) == 1):
node.parent['ids'][0:0] = node['ids']
node.parent.remove(node)
class HandleCodeBlocks(Transform):
"""
Several code block related transformations.
"""
default_priority = 210
def apply(self):
# move doctest blocks out of blockquotes
for node in self.document.traverse(nodes.block_quote):
if all(isinstance(child, nodes.doctest_block) for child
in node.children):
node.replace_self(node.children)
# combine successive doctest blocks
#for node in self.document.traverse(nodes.doctest_block):
# if node not in node.parent.children:
# continue
# parindex = node.parent.index(node)
# while len(node.parent) > parindex+1 and \
# isinstance(node.parent[parindex+1], nodes.doctest_block):
# node[0] = nodes.Text(node[0] + '\n\n' +
# node.parent[parindex+1][0])
# del node.parent[parindex+1]
class SortIds(Transform):
"""
Sort secion IDs so that the "id[0-9]+" one comes last.
"""
default_priority = 261
def apply(self):
for node in self.document.traverse(nodes.section):
if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
node['ids'] = node['ids'][1:] + [node['ids'][0]]
class CitationReferences(Transform):
"""
Replace citation references by pending_xref nodes before the default
docutils transform tries to resolve them.
"""
default_priority = 619
def apply(self):
for citnode in self.document.traverse(nodes.citation_reference):
cittext = citnode.astext()
refnode = addnodes.pending_xref(cittext, reftype='citation',
reftarget=cittext, refwarn=True,
ids=citnode["ids"])
refnode.line = citnode.line or citnode.parent.line
refnode += nodes.Text('[' + cittext + ']')
citnode.parent.replace(citnode, refnode)
class CustomLocaleReporter(object):
"""
Replacer for document.reporter.get_source_and_line method.
reST text lines for translation do not have the original source line number.
This class provides the correct line numbers when reporting.
"""
def __init__(self, source, line):
self.source, self.line = source, line
def set_reporter(self, document):
if docutils_version < (0, 9):
document.reporter.locator = self.get_source_and_line
else:
document.reporter.get_source_and_line = self.get_source_and_line
def get_source_and_line(self, lineno=None):
return self.source, self.line
class Locale(Transform):
"""
Replace translatable nodes with their translated doctree.
"""
default_priority = 0
def apply(self):
env = self.document.settings.env
settings, source = self.document.settings, self.document['source']
# XXX check if this is reliable
assert source.startswith(env.srcdir)
docname = path.splitext(relative_path(path.join(env.srcdir, 'dummy'),
source))[0]
textdomain = find_catalog(docname,
self.document.settings.gettext_compact)
# fetch translations
dirs = [path.join(env.srcdir, directory)
for directory in env.config.locale_dirs]
catalog, has_catalog = init_locale(dirs, env.config.language,
textdomain)
if not has_catalog:
return
parser = RSTParser()
#phase1: replace reference ids with translated names
for node, msg in extract_messages(self.document):
msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg or not msgstr.strip():
# as-of-yet untranslated
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
patch = new_document(source, settings)
CustomLocaleReporter(node.source, node.line).set_reporter(patch)
parser.parse(msgstr, patch)
try:
patch = patch[0]
except IndexError: # empty node
pass
# XXX doctest and other block markup
if not isinstance(patch, nodes.paragraph):
continue # skip for now
processed = False # skip flag
# update title(section) target name-id mapping
if isinstance(node, nodes.title):
section_node = node.parent
new_name = nodes.fully_normalize_name(patch.astext())
old_name = nodes.fully_normalize_name(node.astext())
if old_name != new_name:
# if name would be changed, replace node names and
# document nameids mapping with new name.
names = section_node.setdefault('names', [])
names.append(new_name)
if old_name in names:
names.remove(old_name)
_id = self.document.nameids.get(old_name, None)
explicit = self.document.nametypes.get(old_name, None)
# * if explicit: _id is label. title node need another id.
# * if not explicit:
#
# * _id is None:
#
# _id is None means _id was duplicated.
# old_name entry still exists in nameids and
# nametypes for another duplicated entry.
#
# * _id is provided: bellow process
if not explicit and _id:
# _id was not duplicated.
# remove old_name entry from document ids database
# to reuse original _id.
self.document.nameids.pop(old_name, None)
self.document.nametypes.pop(old_name, None)
self.document.ids.pop(_id, None)
# re-entry with new named section node.
self.document.note_implicit_target(section_node)
# replace target's refname to new target name
def is_named_target(node):
return isinstance(node, nodes.target) and \
node.get('refname') == old_name
for old_target in self.document.traverse(is_named_target):
old_target['refname'] = new_name
processed = True
# glossary terms update refid
if isinstance(node, nodes.term):
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
ids = []
termnodes = []
for _id in node['names']:
if _id in gloss_entries:
gloss_entries.remove(_id)
_id, _, new_termnodes = \
make_termnodes_from_paragraph_node(env, patch, _id)
ids.append(_id)
termnodes.extend(new_termnodes)
if termnodes and ids:
patch = make_term_from_paragraph_node(termnodes, ids)
node['ids'] = patch['ids']
node['names'] = patch['names']
processed = True
# update leaves with processed nodes
if processed:
for child in patch.children:
child.parent = node
node.children = patch.children
node['translated'] = True
#phase2: translation
for node, msg in extract_messages(self.document):
if node.get('translated', False):
continue
msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg: # as-of-yet untranslated
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
patch = new_document(source, settings)
CustomLocaleReporter(node.source, node.line).set_reporter(patch)
parser.parse(msgstr, patch)
try:
patch = patch[0]
except IndexError: # empty node
pass
# XXX doctest and other block markup
if not isinstance(patch, nodes.paragraph):
continue # skip for now
# auto-numbered foot note reference should use original 'ids'.
def is_autonumber_footnote_ref(node):
return isinstance(node, nodes.footnote_reference) and \
node.get('auto') == 1
def list_replace_or_append(lst, old, new):
if old in lst:
lst[lst.index(old)] = new
else:
lst.append(new)
old_foot_refs = node.traverse(is_autonumber_footnote_ref)
new_foot_refs = patch.traverse(is_autonumber_footnote_ref)
if len(old_foot_refs) != len(new_foot_refs):
env.warn_node('inconsistent footnote references in '
'translated message', node)
old_foot_namerefs = {}
for r in old_foot_refs:
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
for new in new_foot_refs:
refname = new.get('refname')
refs = old_foot_namerefs.get(refname, [])
if not refs:
continue
old = refs.pop(0)
new['ids'] = old['ids']
for id in new['ids']:
self.document.ids[id] = new
list_replace_or_append(
self.document.autofootnote_refs, old, new)
if refname:
list_replace_or_append(
self.document.footnote_refs.setdefault(refname, []),
old, new)
list_replace_or_append(
self.document.refnames.setdefault(refname, []),
old, new)
# reference should use new (translated) 'refname'.
# * reference target ".. _Python: ..." is not translatable.
# * use translated refname for section refname.
# * inline reference "`Python <...>`_" has no 'refname'.
def is_refnamed_ref(node):
return isinstance(node, nodes.reference) and \
'refname' in node
old_refs = node.traverse(is_refnamed_ref)
new_refs = patch.traverse(is_refnamed_ref)
if len(old_refs) != len(new_refs):
env.warn_node('inconsistent references in '
'translated message', node)
old_ref_names = [r['refname'] for r in old_refs]
new_ref_names = [r['refname'] for r in new_refs]
orphans = list(set(old_ref_names) - set(new_ref_names))
for new in new_refs:
if not self.document.has_name(new['refname']):
# Maybe refname is translated but target is not translated.
# Note: multiple translated refnames break link ordering.
if orphans:
new['refname'] = orphans.pop(0)
else:
# orphan refnames is already empty!
# reference number is same in new_refs and old_refs.
pass
self.document.note_refname(new)
# refnamed footnote and citation should use original 'ids'.
def is_refnamed_footnote_ref(node):
footnote_ref_classes = (nodes.footnote_reference,
nodes.citation_reference)
return isinstance(node, footnote_ref_classes) and \
'refname' in node
old_refs = node.traverse(is_refnamed_footnote_ref)
new_refs = patch.traverse(is_refnamed_footnote_ref)
refname_ids_map = {}
if len(old_refs) != len(new_refs):
env.warn_node('inconsistent references in '
'translated message', node)
for old in old_refs:
refname_ids_map[old["refname"]] = old["ids"]
for new in new_refs:
refname = new["refname"]
if refname in refname_ids_map:
new["ids"] = refname_ids_map[refname]
# Original pending_xref['reftarget'] contain not-translated
# target name, new pending_xref must use original one.
# This code restricts to change ref-targets in the translation.
old_refs = node.traverse(addnodes.pending_xref)
new_refs = patch.traverse(addnodes.pending_xref)
xref_reftarget_map = {}
if len(old_refs) != len(new_refs):
env.warn_node('inconsistent term references in '
'translated message', node)
def get_ref_key(node):
case = node["refdomain"], node["reftype"]
if case == ('std', 'term'):
return None
else:
return (
node["refdomain"],
node["reftype"],
node['reftarget'],)
for old in old_refs:
key = get_ref_key(old)
if key:
xref_reftarget_map[key] = old["reftarget"]
for new in new_refs:
key = get_ref_key(new)
if key in xref_reftarget_map:
new['reftarget'] = xref_reftarget_map[key]
# update leaves
for child in patch.children:
child.parent = node
node.children = patch.children
node['translated'] = True
# Extract and translate messages for index entries.
for node, entries in traverse_translatable_index(self.document):
new_entries = []
for type, msg, tid, main in entries:
msg_parts = split_index_msg(type, msg)
msgstr_parts = []
for part in msg_parts:
msgstr = catalog.gettext(part)
if not msgstr:
msgstr = part
msgstr_parts.append(msgstr)
new_entries.append((type, ';'.join(msgstr_parts), tid, main))
node['raw_entries'] = entries
node['entries'] = new_entries
class RemoveTranslatableInline(Transform):
"""
Remove inline nodes used for translation as placeholders.
"""
default_priority = 999
def apply(self):
from sphinx.builders.gettext import MessageCatalogBuilder
env = self.document.settings.env
builder = env.app.builder
if isinstance(builder, MessageCatalogBuilder):
return
for inline in self.document.traverse(nodes.inline):
if 'translatable' in inline:
inline.parent.remove(inline)
inline.parent += inline.children
class SphinxContentsFilter(ContentsFilter):
"""
Used with BuildEnvironment.add_toc_from() to discard cross-file links
within table-of-contents link nodes.
"""
def visit_pending_xref(self, node):
text = node.astext()
self.parent.append(nodes.literal(text, text))
raise nodes.SkipNode
def visit_image(self, node):
raise nodes.SkipNode
| gpl-2.0 |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/lib2to3/tests/test_pytree.py | 131 | 17346 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Unit tests for pytree.py.
NOTE: Please *don't* add doc strings to individual test methods!
In verbose mode, printing of the module, class and method name is much
more helpful than printing of (the first line of) the docstring,
especially when debugging a test.
"""
from __future__ import with_statement
import sys
import warnings
# Testing imports
from . import support
from lib2to3 import pytree
try:
sorted
except NameError:
def sorted(lst):
l = list(lst)
l.sort()
return l
class TestNodes(support.TestCase):
"""Unit tests for nodes (Base, Leaf, Node)."""
if sys.version_info >= (2,6):
# warnings.catch_warnings is new in 2.6.
def test_deprecated_prefix_methods(self):
l = pytree.Leaf(100, "foo")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
self.assertEqual(l.get_prefix(), "")
l.set_prefix("hi")
self.assertEqual(l.prefix, "hi")
self.assertEqual(len(w), 2)
for warning in w:
self.assertTrue(warning.category is DeprecationWarning)
self.assertEqual(str(w[0].message), "get_prefix() is deprecated; " \
"use the prefix property")
self.assertEqual(str(w[1].message), "set_prefix() is deprecated; " \
"use the prefix property")
def test_instantiate_base(self):
if __debug__:
# Test that instantiating Base() raises an AssertionError
self.assertRaises(AssertionError, pytree.Base)
def test_leaf(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(l1.type, 100)
self.assertEqual(l1.value, "foo")
def test_leaf_repr(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(repr(l1), "Leaf(100, 'foo')")
def test_leaf_str(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(str(l1), "foo")
l2 = pytree.Leaf(100, "foo", context=(" ", (10, 1)))
self.assertEqual(str(l2), " foo")
def test_leaf_str_numeric_value(self):
# Make sure that the Leaf's value is stringified. Failing to
# do this can cause a TypeError in certain situations.
l1 = pytree.Leaf(2, 5)
l1.prefix = "foo_"
self.assertEqual(str(l1), "foo_5")
def test_leaf_equality(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "foo", context=(" ", (1, 0)))
self.assertEqual(l1, l2)
l3 = pytree.Leaf(101, "foo")
l4 = pytree.Leaf(100, "bar")
self.assertNotEqual(l1, l3)
self.assertNotEqual(l1, l4)
def test_leaf_prefix(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(l1.prefix, "")
self.assertFalse(l1.was_changed)
l1.prefix = " ##\n\n"
self.assertEqual(l1.prefix, " ##\n\n")
self.assertTrue(l1.was_changed)
def test_node(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(200, "bar")
n1 = pytree.Node(1000, [l1, l2])
self.assertEqual(n1.type, 1000)
self.assertEqual(n1.children, [l1, l2])
def test_node_repr(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
n1 = pytree.Node(1000, [l1, l2])
self.assertEqual(repr(n1),
"Node(1000, [%s, %s])" % (repr(l1), repr(l2)))
def test_node_str(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
n1 = pytree.Node(1000, [l1, l2])
self.assertEqual(str(n1), "foo bar")
def test_node_prefix(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(l1.prefix, "")
n1 = pytree.Node(1000, [l1])
self.assertEqual(n1.prefix, "")
n1.prefix = " "
self.assertEqual(n1.prefix, " ")
self.assertEqual(l1.prefix, " ")
def test_get_suffix(self):
l1 = pytree.Leaf(100, "foo", prefix="a")
l2 = pytree.Leaf(100, "bar", prefix="b")
n1 = pytree.Node(1000, [l1, l2])
self.assertEqual(l1.get_suffix(), l2.prefix)
self.assertEqual(l2.get_suffix(), "")
self.assertEqual(n1.get_suffix(), "")
l3 = pytree.Leaf(100, "bar", prefix="c")
n2 = pytree.Node(1000, [n1, l3])
self.assertEqual(n1.get_suffix(), l3.prefix)
self.assertEqual(l3.get_suffix(), "")
self.assertEqual(n2.get_suffix(), "")
def test_node_equality(self):
n1 = pytree.Node(1000, ())
n2 = pytree.Node(1000, [], context=(" ", (1, 0)))
self.assertEqual(n1, n2)
n3 = pytree.Node(1001, ())
self.assertNotEqual(n1, n3)
def test_node_recursive_equality(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1])
n2 = pytree.Node(1000, [l2])
self.assertEqual(n1, n2)
l3 = pytree.Leaf(100, "bar")
n3 = pytree.Node(1000, [l3])
self.assertNotEqual(n1, n3)
def test_replace(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "+")
l3 = pytree.Leaf(100, "bar")
n1 = pytree.Node(1000, [l1, l2, l3])
self.assertEqual(n1.children, [l1, l2, l3])
self.assertTrue(isinstance(n1.children, list))
self.assertFalse(n1.was_changed)
l2new = pytree.Leaf(100, "-")
l2.replace(l2new)
self.assertEqual(n1.children, [l1, l2new, l3])
self.assertTrue(isinstance(n1.children, list))
self.assertTrue(n1.was_changed)
def test_replace_with_list(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "+")
l3 = pytree.Leaf(100, "bar")
n1 = pytree.Node(1000, [l1, l2, l3])
l2.replace([pytree.Leaf(100, "*"), pytree.Leaf(100, "*")])
self.assertEqual(str(n1), "foo**bar")
self.assertTrue(isinstance(n1.children, list))
def test_leaves(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "fooey")
n2 = pytree.Node(1000, [l1, l2])
n3 = pytree.Node(1000, [l3])
n1 = pytree.Node(1000, [n2, n3])
self.assertEqual(list(n1.leaves()), [l1, l2, l3])
def test_depth(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
n2 = pytree.Node(1000, [l1, l2])
n3 = pytree.Node(1000, [])
n1 = pytree.Node(1000, [n2, n3])
self.assertEqual(l1.depth(), 2)
self.assertEqual(n3.depth(), 1)
self.assertEqual(n1.depth(), 0)
def test_post_order(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "fooey")
c1 = pytree.Node(1000, [l1, l2])
n1 = pytree.Node(1000, [c1, l3])
self.assertEqual(list(n1.post_order()), [l1, l2, c1, l3, n1])
def test_pre_order(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "fooey")
c1 = pytree.Node(1000, [l1, l2])
n1 = pytree.Node(1000, [c1, l3])
self.assertEqual(list(n1.pre_order()), [n1, c1, l1, l2, l3])
def test_changed(self):
l1 = pytree.Leaf(100, "f")
self.assertFalse(l1.was_changed)
l1.changed()
self.assertTrue(l1.was_changed)
l1 = pytree.Leaf(100, "f")
n1 = pytree.Node(1000, [l1])
self.assertFalse(n1.was_changed)
n1.changed()
self.assertTrue(n1.was_changed)
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "+")
l3 = pytree.Leaf(100, "bar")
n1 = pytree.Node(1000, [l1, l2, l3])
n2 = pytree.Node(1000, [n1])
self.assertFalse(l1.was_changed)
self.assertFalse(n1.was_changed)
self.assertFalse(n2.was_changed)
n1.changed()
self.assertTrue(n1.was_changed)
self.assertTrue(n2.was_changed)
self.assertFalse(l1.was_changed)
def test_leaf_constructor_prefix(self):
for prefix in ("xyz_", ""):
l1 = pytree.Leaf(100, "self", prefix=prefix)
self.assertTrue(str(l1), prefix + "self")
self.assertEqual(l1.prefix, prefix)
def test_node_constructor_prefix(self):
for prefix in ("xyz_", ""):
l1 = pytree.Leaf(100, "self")
l2 = pytree.Leaf(100, "foo", prefix="_")
n1 = pytree.Node(1000, [l1, l2], prefix=prefix)
self.assertTrue(str(n1), prefix + "self_foo")
self.assertEqual(n1.prefix, prefix)
self.assertEqual(l1.prefix, prefix)
self.assertEqual(l2.prefix, "_")
def test_remove(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1, l2])
n2 = pytree.Node(1000, [n1])
self.assertEqual(n1.remove(), 0)
self.assertEqual(n2.children, [])
self.assertEqual(l1.parent, n1)
self.assertEqual(n1.parent, None)
self.assertEqual(n2.parent, None)
self.assertFalse(n1.was_changed)
self.assertTrue(n2.was_changed)
self.assertEqual(l2.remove(), 1)
self.assertEqual(l1.remove(), 0)
self.assertEqual(n1.children, [])
self.assertEqual(l1.parent, None)
self.assertEqual(n1.parent, None)
self.assertEqual(n2.parent, None)
self.assertTrue(n1.was_changed)
self.assertTrue(n2.was_changed)
def test_remove_parentless(self):
n1 = pytree.Node(1000, [])
n1.remove()
self.assertEqual(n1.parent, None)
l1 = pytree.Leaf(100, "foo")
l1.remove()
self.assertEqual(l1.parent, None)
def test_node_set_child(self):
l1 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1])
l2 = pytree.Leaf(100, "bar")
n1.set_child(0, l2)
self.assertEqual(l1.parent, None)
self.assertEqual(l2.parent, n1)
self.assertEqual(n1.children, [l2])
n2 = pytree.Node(1000, [l1])
n2.set_child(0, n1)
self.assertEqual(l1.parent, None)
self.assertEqual(n1.parent, n2)
self.assertEqual(n2.parent, None)
self.assertEqual(n2.children, [n1])
self.assertRaises(IndexError, n1.set_child, 4, l2)
# I don't care what it raises, so long as it's an exception
self.assertRaises(Exception, n1.set_child, 0, list)
def test_node_insert_child(self):
l1 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1])
l2 = pytree.Leaf(100, "bar")
n1.insert_child(0, l2)
self.assertEqual(l2.parent, n1)
self.assertEqual(n1.children, [l2, l1])
l3 = pytree.Leaf(100, "abc")
n1.insert_child(2, l3)
self.assertEqual(n1.children, [l2, l1, l3])
# I don't care what it raises, so long as it's an exception
self.assertRaises(Exception, n1.insert_child, 0, list)
def test_node_append_child(self):
n1 = pytree.Node(1000, [])
l1 = pytree.Leaf(100, "foo")
n1.append_child(l1)
self.assertEqual(l1.parent, n1)
self.assertEqual(n1.children, [l1])
l2 = pytree.Leaf(100, "bar")
n1.append_child(l2)
self.assertEqual(l2.parent, n1)
self.assertEqual(n1.children, [l1, l2])
# I don't care what it raises, so long as it's an exception
self.assertRaises(Exception, n1.append_child, list)
def test_node_next_sibling(self):
n1 = pytree.Node(1000, [])
n2 = pytree.Node(1000, [])
p1 = pytree.Node(1000, [n1, n2])
self.assertTrue(n1.next_sibling is n2)
self.assertEqual(n2.next_sibling, None)
self.assertEqual(p1.next_sibling, None)
def test_leaf_next_sibling(self):
l1 = pytree.Leaf(100, "a")
l2 = pytree.Leaf(100, "b")
p1 = pytree.Node(1000, [l1, l2])
self.assertTrue(l1.next_sibling is l2)
self.assertEqual(l2.next_sibling, None)
self.assertEqual(p1.next_sibling, None)
def test_node_prev_sibling(self):
n1 = pytree.Node(1000, [])
n2 = pytree.Node(1000, [])
p1 = pytree.Node(1000, [n1, n2])
self.assertTrue(n2.prev_sibling is n1)
self.assertEqual(n1.prev_sibling, None)
self.assertEqual(p1.prev_sibling, None)
def test_leaf_prev_sibling(self):
l1 = pytree.Leaf(100, "a")
l2 = pytree.Leaf(100, "b")
p1 = pytree.Node(1000, [l1, l2])
self.assertTrue(l2.prev_sibling is l1)
self.assertEqual(l1.prev_sibling, None)
self.assertEqual(p1.prev_sibling, None)
class TestPatterns(support.TestCase):
"""Unit tests for tree matching patterns."""
def test_basic_patterns(self):
# Build a tree
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1, l2])
n2 = pytree.Node(1000, [l3])
root = pytree.Node(1000, [n1, n2])
# Build a pattern matching a leaf
pl = pytree.LeafPattern(100, "foo", name="pl")
r = {}
self.assertFalse(pl.match(root, results=r))
self.assertEqual(r, {})
self.assertFalse(pl.match(n1, results=r))
self.assertEqual(r, {})
self.assertFalse(pl.match(n2, results=r))
self.assertEqual(r, {})
self.assertTrue(pl.match(l1, results=r))
self.assertEqual(r, {"pl": l1})
r = {}
self.assertFalse(pl.match(l2, results=r))
self.assertEqual(r, {})
# Build a pattern matching a node
pn = pytree.NodePattern(1000, [pl], name="pn")
self.assertFalse(pn.match(root, results=r))
self.assertEqual(r, {})
self.assertFalse(pn.match(n1, results=r))
self.assertEqual(r, {})
self.assertTrue(pn.match(n2, results=r))
self.assertEqual(r, {"pn": n2, "pl": l3})
r = {}
self.assertFalse(pn.match(l1, results=r))
self.assertEqual(r, {})
self.assertFalse(pn.match(l2, results=r))
self.assertEqual(r, {})
def test_wildcard(self):
# Build a tree for testing
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1, l2])
n2 = pytree.Node(1000, [l3])
root = pytree.Node(1000, [n1, n2])
# Build a pattern
pl = pytree.LeafPattern(100, "foo", name="pl")
pn = pytree.NodePattern(1000, [pl], name="pn")
pw = pytree.WildcardPattern([[pn], [pl, pl]], name="pw")
r = {}
self.assertFalse(pw.match_seq([root], r))
self.assertEqual(r, {})
self.assertFalse(pw.match_seq([n1], r))
self.assertEqual(r, {})
self.assertTrue(pw.match_seq([n2], r))
# These are easier to debug
self.assertEqual(sorted(r.keys()), ["pl", "pn", "pw"])
self.assertEqual(r["pl"], l1)
self.assertEqual(r["pn"], n2)
self.assertEqual(r["pw"], [n2])
# But this is equivalent
self.assertEqual(r, {"pl": l1, "pn": n2, "pw": [n2]})
r = {}
self.assertTrue(pw.match_seq([l1, l3], r))
self.assertEqual(r, {"pl": l3, "pw": [l1, l3]})
self.assertTrue(r["pl"] is l3)
r = {}
def test_generate_matches(self):
la = pytree.Leaf(1, "a")
lb = pytree.Leaf(1, "b")
lc = pytree.Leaf(1, "c")
ld = pytree.Leaf(1, "d")
le = pytree.Leaf(1, "e")
lf = pytree.Leaf(1, "f")
leaves = [la, lb, lc, ld, le, lf]
root = pytree.Node(1000, leaves)
pa = pytree.LeafPattern(1, "a", "pa")
pb = pytree.LeafPattern(1, "b", "pb")
pc = pytree.LeafPattern(1, "c", "pc")
pd = pytree.LeafPattern(1, "d", "pd")
pe = pytree.LeafPattern(1, "e", "pe")
pf = pytree.LeafPattern(1, "f", "pf")
pw = pytree.WildcardPattern([[pa, pb, pc], [pd, pe],
[pa, pb], [pc, pd], [pe, pf]],
min=1, max=4, name="pw")
self.assertEqual([x[0] for x in pw.generate_matches(leaves)],
[3, 5, 2, 4, 6])
pr = pytree.NodePattern(type=1000, content=[pw], name="pr")
matches = list(pytree.generate_matches([pr], [root]))
self.assertEqual(len(matches), 1)
c, r = matches[0]
self.assertEqual(c, 1)
self.assertEqual(str(r["pr"]), "abcdef")
self.assertEqual(r["pw"], [la, lb, lc, ld, le, lf])
for c in "abcdef":
self.assertEqual(r["p" + c], pytree.Leaf(1, c))
def test_has_key_example(self):
pattern = pytree.NodePattern(331,
(pytree.LeafPattern(7),
pytree.WildcardPattern(name="args"),
pytree.LeafPattern(8)))
l1 = pytree.Leaf(7, "(")
l2 = pytree.Leaf(3, "x")
l3 = pytree.Leaf(8, ")")
node = pytree.Node(331, [l1, l2, l3])
r = {}
self.assertTrue(pattern.match(node, r))
self.assertEqual(r["args"], [l2])
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.