repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
pwoodworth/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/db/models/sql/aggregates.py | 309 | 1804 | from django.db.models.sql.aggregates import *
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql.conversion import GeomField
class GeoAggregate(Aggregate):
# Default SQL template for spatial aggregates.
sql_template = '%(function)s(%(field)s)'
# Conversion class, if necessary.
conversion_class = None
# Flags for indicating the type of the aggregate.
is_extent = False
def __init__(self, col, source=None, is_summary=False, tolerance=0.05, **extra):
super(GeoAggregate, self).__init__(col, source, is_summary, **extra)
# Required by some Oracle aggregates.
self.tolerance = tolerance
# Can't use geographic aggregates on non-geometry fields.
if not isinstance(self.source, GeometryField):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if connection.ops.oracle:
self.extra['tolerance'] = self.tolerance
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
sql_template, sql_function = connection.ops.spatial_aggregate_sql(self)
params = {
'function': sql_function,
'field': field_name
}
params.update(self.extra)
return sql_template % params
class Collect(GeoAggregate):
pass
class Extent(GeoAggregate):
is_extent = '2D'
class Extent3D(GeoAggregate):
is_extent = '3D'
class MakeLine(GeoAggregate):
pass
class Union(GeoAggregate):
pass
| apache-2.0 |
ds0nt/or-tools | examples/python/fill_a_pix.py | 34 | 4969 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fill-a-Pix problem in Google CP Solver.
From
http://www.conceptispuzzles.com/index.aspx?uri=puzzle/fill-a-pix/basiclogic
'''
Each puzzle consists of a grid containing clues in various places. The
object is to reveal a hidden picture by painting the squares around each
clue so that the number of painted squares, including the square with
the clue, matches the value of the clue.
'''
http://www.conceptispuzzles.com/index.aspx?uri=puzzle/fill-a-pix/rules
'''
Fill-a-Pix is a Minesweeper-like puzzle based on a grid with a pixilated
picture hidden inside. Using logic alone, the solver determines which
squares are painted and which should remain empty until the hidden picture
is completely exposed.
'''
Fill-a-pix History:
http://www.conceptispuzzles.com/index.aspx?uri=puzzle/fill-a-pix/history
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/fill_a_pix.mzn
* SICStus Prolog: http://www.hakank.org/sicstus/fill_a_pix.pl
* ECLiPSe: http://hakank.org/eclipse/fill_a_pix.ecl
* Gecode: http://hakank.org/gecode/fill_a_pix.cpp
And see the Minesweeper model:
* http://www.hakank.org/google_or_tools/minesweeper.py
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
# Puzzle 1 from
# http://www.conceptispuzzles.com/index.aspx?uri=puzzle/fill-a-pix/rules
default_n = 10
X = -1
default_puzzle = [
[X, X, X, X, X, X, X, X, 0, X],
[X, 8, 8, X, 2, X, 0, X, X, X],
[5, X, 8, X, X, X, X, X, X, X],
[X, X, X, X, X, 2, X, X, X, 2],
[1, X, X, X, 4, 5, 6, X, X, X],
[X, 0, X, X, X, 7, 9, X, X, 6],
[X, X, X, 6, X, X, 9, X, X, 6],
[X, X, 6, 6, 8, 7, 8, 7, X, 5],
[X, 4, X, 6, 6, 6, X, 6, X, 4],
[X, X, X, X, X, X, 3, X, X, X]
]
def main(puzzle='', n=''):
# Create the solver.
solver = pywrapcp.Solver('Fill-a-Pix')
#
# data
#
# Set default problem
if puzzle == '':
puzzle = default_puzzle
n = default_n
else:
print 'n:', n
# for the neighbors of 'this' cell
S = [-1, 0, 1]
# print problem instance
print 'Problem:'
for i in range(n):
for j in range(n):
if puzzle[i][j] == X:
sys.stdout.write('.')
else:
sys.stdout.write(str(puzzle[i][j]))
print
print
#
# declare variables
#
pict = {}
for i in range(n):
for j in range(n):
pict[(i, j)] = solver.IntVar(0, 1, 'pict %i %i' % (i, j))
pict_flat = [pict[i, j] for i in range(n) for j in range(n)]
#
# constraints
#
for i in range(n):
for j in range(n):
if puzzle[i][j] > X:
# this cell is the sum of all the surrounding cells
solver.Add(
puzzle[i][j] == solver.Sum([pict[i + a, j + b]
for a in S for b in S
if i + a >= 0 and
j + b >= 0 and
i + a < n and
j + b < n])
)
#
# solution and search
#
db = solver.Phase(pict_flat,
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT)
solver.NewSearch(db)
num_solutions = 0
print 'Solution:'
while solver.NextSolution():
num_solutions += 1
for i in range(n):
row = [str(pict[i, j].Value()) for j in range(n)]
for j in range(n):
if row[j] == '0':
row[j] = ' '
else:
row[j] = '#'
print ''.join(row)
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime(), 'ms'
#
# Read a problem instance from a file
#
def read_problem(file):
f = open(file, 'r')
n = int(f.readline())
puzzle = []
for i in range(n):
x = f.readline()
row = [0] * n
for j in range(n):
if x[j] == '.':
tmp = -1
else:
tmp = int(x[j])
row[j] = tmp
puzzle.append(row)
return [puzzle, n]
if __name__ == '__main__':
if len(sys.argv) > 1:
file = sys.argv[1]
print 'Problem instance from', file
[puzzle, n] = read_problem(file)
main(puzzle, n)
else:
main()
| apache-2.0 |
alxgu/ansible | lib/ansible/modules/cloud/amazon/ecs_taskdefinition_facts.py | 29 | 12096 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_taskdefinition_facts
short_description: describe a task definition in ecs
notes:
- for details of the parameters and returns see
U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition)
description:
- Describes a task definition in ecs.
version_added: "2.5"
author:
- Gustavo Maia (@gurumaia)
- Mark Chance (@Java1Guy)
- Darek Kaczynski (@kaczynskid)
requirements: [ json, botocore, boto3 ]
options:
task_definition:
description:
- The name of the task definition to get details for
required: true
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- ecs_taskdefinition_facts:
task_definition: test-td
'''
RETURN = '''
container_definitions:
description: Returns a list of complex objects representing the containers
returned: success
type: complex
contains:
name:
description: The name of a container.
returned: always
type: str
image:
description: The image used to start a container.
returned: always
type: str
cpu:
description: The number of cpu units reserved for the container.
returned: always
type: int
memoryReservation:
description: The soft limit (in MiB) of memory to reserve for the container.
returned: when present
type: int
links:
description: Links to other containers.
returned: when present
type: str
portMappings:
description: The list of port mappings for the container.
returned: always
type: complex
contains:
containerPort:
description: The port number on the container.
returned: when present
type: int
hostPort:
description: The port number on the container instance to reserve for your container.
returned: when present
type: int
protocol:
description: The protocol used for the port mapping.
returned: when present
type: str
essential:
description: Whether this is an essential container or not.
returned: always
type: bool
entryPoint:
description: The entry point that is passed to the container.
returned: when present
type: str
command:
description: The command that is passed to the container.
returned: when present
type: str
environment:
description: The environment variables to pass to a container.
returned: always
type: complex
contains:
name:
description: The name of the environment variable.
returned: when present
type: str
value:
description: The value of the environment variable.
returned: when present
type: str
mountPoints:
description: The mount points for data volumes in your container.
returned: always
type: complex
contains:
sourceVolume:
description: The name of the volume to mount.
returned: when present
type: str
containerPath:
description: The path on the container to mount the host volume at.
returned: when present
type: str
readOnly:
description: If this value is true , the container has read-only access to the volume.
If this value is false , then the container can write to the volume.
returned: when present
type: bool
volumesFrom:
description: Data volumes to mount from another container.
returned: always
type: complex
contains:
sourceContainer:
description: The name of another container within the same task definition to mount volumes from.
returned: when present
type: str
readOnly:
description: If this value is true , the container has read-only access to the volume.
If this value is false , then the container can write to the volume.
returned: when present
type: bool
hostname:
description: The hostname to use for your container.
returned: when present
type: str
user:
description: The user name to use inside the container.
returned: when present
type: str
workingDirectory:
description: The working directory in which to run commands inside the container.
returned: when present
type: str
disableNetworking:
description: When this parameter is true, networking is disabled within the container.
returned: when present
type: bool
privileged:
description: When this parameter is true, the container is given elevated
privileges on the host container instance (similar to the root user).
returned: when present
type: bool
readonlyRootFilesystem:
description: When this parameter is true, the container is given read-only access to its root file system.
returned: when present
type: bool
dnsServers:
description: A list of DNS servers that are presented to the container.
returned: when present
type: str
dnsSearchDomains:
description: A list of DNS search domains that are presented to the container.
returned: when present
type: str
extraHosts:
description: A list of hostnames and IP address mappings to append to the /etc/hosts file on the container.
returned: when present
type: complex
contains:
hostname:
description: The hostname to use in the /etc/hosts entry.
returned: when present
type: str
ipAddress:
description: The IP address to use in the /etc/hosts entry.
returned: when present
type: str
dockerSecurityOptions:
description: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems.
returned: when present
type: str
dockerLabels:
description: A key/value map of labels to add to the container.
returned: when present
type: str
ulimits:
description: A list of ulimits to set in the container.
returned: when present
type: complex
contains:
name:
description: The type of the ulimit .
returned: when present
type: str
softLimit:
description: The soft limit for the ulimit type.
returned: when present
type: int
hardLimit:
description: The hard limit for the ulimit type.
returned: when present
type: int
logConfiguration:
description: The log configuration specification for the container.
returned: when present
type: str
options:
description: The configuration options to send to the log driver.
returned: when present
type: str
family:
description: The family of your task definition, used as the definition name
returned: always
type: str
task_definition_arn:
description: ARN of the task definition
returned: always
type: str
task_role_arn:
description: The ARN of the IAM role that containers in this task can assume
returned: when role is set
type: str
network_mode:
description: Network mode for the containers
returned: always
type: str
revision:
description: Revision number that was queried
returned: always
type: int
volumes:
description: The list of volumes in a task
returned: always
type: complex
contains:
name:
description: The name of the volume.
returned: when present
type: str
host:
description: The contents of the host parameter determine whether your data volume
persists on the host container instance and where it is stored.
returned: when present
type: bool
source_path:
description: The path on the host container instance that is presented to the container.
returned: when present
type: str
status:
description: The status of the task definition
returned: always
type: str
requires_attributes:
description: The container instance attributes required by your task
returned: when present
type: complex
contains:
name:
description: The name of the attribute.
returned: when present
type: str
value:
description: The value of the attribute.
returned: when present
type: str
targetType:
description: The type of the target with which to attach the attribute.
returned: when present
type: str
targetId:
description: The ID of the target.
returned: when present
type: str
placement_constraints:
description: A list of placement constraint objects to use for tasks
returned: always
type: complex
contains:
type:
description: The type of constraint.
returned: when present
type: str
expression:
description: A cluster query language expression to apply to the constraint.
returned: when present
type: str
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported AnsibleAWSModule
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
task_definition=dict(required=True, type='str')
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
ecs = boto3_conn(module, conn_type='client', resource='ecs',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
try:
ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition']
except botocore.exceptions.ClientError:
ecs_td = {}
module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td))
if __name__ == '__main__':
main()
| gpl-3.0 |
benoitsteiner/tensorflow-opencl | tensorflow/python/training/tensorboard_logging.py | 165 | 5328 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensorboard_logging provides logging that is also written to the events file.
Any messages logged via this module will be logged both via the platform logging
mechanism and to the SummaryWriter set via `set_summary_writer`. This is useful
for logging messages that you might want to be visible from inside TensorBoard
or that should be permanently associated with the training session.
You can use this just like the logging module:
>>> tensorboard_logging.set_summary_writer(summary_writer)
>>> tensorboard_logging.info("my %s", "message")
>>> tensorboard_logging.log(tensorboard_logging.WARN, "something")
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import tf_logging as logging
DEBUG = 'DEBUG'
INFO = 'INFO'
WARN = 'WARN'
ERROR = 'ERROR'
FATAL = 'FATAL'
# Messages with levels below this verbosity will not be logged.
_verbosity = WARN
# A value meaning 'not set yet' so we can use None to mean 'user actively told
# us they don't want a SummaryWriter'.
_sentinel_summary_writer = object()
# The SummaryWriter instance to use when logging, or None to not log, or
# _sentinel_summary_writer to indicate that the user hasn't called
# set_summary_writer yet.
_summary_writer = _sentinel_summary_writer
# Map from the tensorboard_logging logging enum values to the proto's enum
# values.
_LEVEL_PROTO_MAP = {
DEBUG: event_pb2.LogMessage.DEBUGGING,
INFO: event_pb2.LogMessage.INFO,
WARN: event_pb2.LogMessage.WARN,
ERROR: event_pb2.LogMessage.ERROR,
FATAL: event_pb2.LogMessage.FATAL,
}
# Map from the tensorboard_logging module levels to the logging module levels.
_PLATFORM_LOGGING_LEVEL_MAP = {
DEBUG: logging.DEBUG,
INFO: logging.INFO,
WARN: logging.WARN,
ERROR: logging.ERROR,
FATAL: logging.FATAL
}
def get_verbosity():
return _verbosity
def set_verbosity(verbosity):
_check_verbosity(verbosity)
global _verbosity
_verbosity = verbosity
def _check_verbosity(verbosity):
if verbosity not in _LEVEL_PROTO_MAP:
raise ValueError('Level %s is not a valid tensorboard_logging level' %
verbosity)
def set_summary_writer(summary_writer):
"""Sets the summary writer that events will be logged to.
Calling any logging methods inside this module without calling this method
will fail. If you don't want to log, call `set_summary_writer(None)`.
Args:
summary_writer: Either a SummaryWriter or None. None will cause messages not
to be logged to any SummaryWriter, but they will still be passed to the
platform logging module.
"""
global _summary_writer
_summary_writer = summary_writer
def _clear_summary_writer():
"""Makes all subsequent log invocations error.
This is only used for testing. If you want to disable TensorBoard logging,
call `set_summary_writer(None)` instead.
"""
global _summary_writer
_summary_writer = _sentinel_summary_writer
def log(level, message, *args):
"""Conditionally logs `message % args` at the level `level`.
Note that tensorboard_logging verbosity and logging verbosity are separate;
the message will always be passed through to the logging module regardless of
whether it passes the tensorboard_logging verbosity check.
Args:
level: The verbosity level to use. Must be one of
tensorboard_logging.{DEBUG, INFO, WARN, ERROR, FATAL}.
message: The message template to use.
*args: Arguments to interpolate to the message template, if any.
Raises:
ValueError: If `level` is not a valid logging level.
RuntimeError: If the `SummaryWriter` to use has not been set.
"""
if _summary_writer is _sentinel_summary_writer:
raise RuntimeError('Must call set_summary_writer before doing any '
'logging from tensorboard_logging')
_check_verbosity(level)
proto_level = _LEVEL_PROTO_MAP[level]
if proto_level >= _LEVEL_PROTO_MAP[_verbosity]:
log_message = event_pb2.LogMessage(level=proto_level,
message=message % args)
event = event_pb2.Event(wall_time=time.time(), log_message=log_message)
if _summary_writer:
_summary_writer.add_event(event)
logging.log(_PLATFORM_LOGGING_LEVEL_MAP[level], message, *args)
def debug(message, *args):
log(DEBUG, message, *args)
def info(message, *args):
log(INFO, message, *args)
def warn(message, *args):
log(WARN, message, *args)
def error(message, *args):
log(ERROR, message, *args)
def fatal(message, *args):
log(FATAL, message, *args)
| apache-2.0 |
cntnboys/410Lab6 | v1/lib/python2.7/site-packages/django/core/mail/message.py | 43 | 15657 | from __future__ import unicode_literals
import mimetypes
import os
import random
import sys
import time
from email import (charset as Charset, encoders as Encoders,
message_from_string, generator)
from email.message import Message
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.header import Header
from email.utils import formatdate, getaddresses, formataddr, parseaddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_text
from django.utils import six
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = set([
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
])
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return str(name), val
def sanitize_address(addr, encoding):
if isinstance(addr, six.string_types):
addr = parseaddr(force_text(addr))
nm, addr = addr
# This try-except clause is needed on Python 3 < 3.2.4
# http://bugs.python.org/issue14291
try:
nm = Header(nm, encoding).encode()
except UnicodeEncodeError:
nm = Header(nm, 'utf-8').encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN
if '@' in addr:
localpart, domain = addr.split('@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna').decode('ascii')
addr = '@'.join([localpart, domain])
else:
addr = Header(addr, encoding).encode()
return formataddr((nm, addr))
class MIMEMixin():
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.StringIO()
g = generator.Generator(fp, mangle_from_=False)
if six.PY2:
g.flatten(self, unixfrom=unixfrom)
else:
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
if six.PY2:
as_bytes = as_string
else:
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
if charset == 'utf-8':
# Unfortunately, Python doesn't support setting a Charset instance
# as MIMEText init parameter (http://bugs.python.org/issue16324).
# We do it manually and trigger re-encoding of the payload.
MIMEText.__init__(self, text, subtype, None)
del self['Content-Transfer-Encoding']
# Workaround for versions without http://bugs.python.org/issue19063
if (3, 2) < sys.version_info < (3, 3, 4):
payload = text.encode(utf8_charset.output_charset)
self._payload = payload.decode('ascii', 'surrogateescape')
self.set_charset(utf8_charset)
else:
self.set_payload(text, utf8_charset)
self.replace_header('Content-Type', 'text/%s; charset="%s"' % (subtype, charset))
else:
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, six.string_types), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if cc:
assert not isinstance(cc, six.string_types), '"cc" argument must be a list or tuple'
self.cc = list(cc)
else:
self.cc = []
if bcc:
assert not isinstance(bcc, six.string_types), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(self.to))
if self.cc:
msg['Cc'] = ', '.join(self.cc)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return self.to + self.cc + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
with open(path, 'rb') as f:
content = f.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
if six.PY2:
filename = filename.encode('utf-8')
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers, cc)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| apache-2.0 |
bop/foundation | lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/namedwizardtests/tests.py | 88 | 14807 | from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import (NamedUrlSessionWizardView,
NamedUrlCookieWizardView)
from django.contrib.formtools.tests.wizard.forms import get_request, Step1, Step2
class NamedWizardTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_initial_call(self):
response = self.client.get(reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
self.assertEqual(wizard['url_name'], self.wizard_urlname)
def test_initial_call_with_params(self):
get_params = {'getvar1': 'getval1', 'getvar2': 'getval2'}
response = self.client.get(reverse('%s_start' % self.wizard_urlname),
get_params)
self.assertEqual(response.status_code, 302)
# Test for proper redirect GET parameters
location = response['Location']
self.assertNotEqual(location.find('?'), -1)
querydict = QueryDict(location[location.find('?') + 1:])
self.assertEqual(dict(querydict.items()), get_params)
def test_form_post_error(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': [u'This field is required.'],
'user': [u'This field is required.']})
def test_form_post_success(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={
'step': response.context['wizard']['steps'].current
}), {'wizard_goto_step': response.context['wizard']['steps'].prev})
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_jump(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form3'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
def test_form_finish(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
all_data = response.context['form_list']
self.assertEqual(all_data[1]['file1'].read(), open(__file__).read())
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': u'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': u'123 Main St', 'address2': u'Djangoland'},
{'random_crap': u'blah blah'},
[{'random_crap': u'blah blah'}, {'random_crap': u'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
step2_url = reverse(self.wizard_urlname, kwargs={'step': 'form2'})
response = self.client.get(step2_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
self.assertEqual(response.context['wizard']['form'].files['form2-file1'].read(), open(__file__).read())
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
self.assertEqual(all_data['file1'].read(), open(__file__).read())
del all_data['file1']
self.assertEqual(
all_data,
{'name': u'Pony', 'thirsty': True, 'user': self.testuser,
'address1': u'123 Main St', 'address2': u'Djangoland',
'random_crap': u'blah blah', 'formset-form4': [
{'random_crap': u'blah blah'},
{'random_crap': u'blah blah'}
]})
def test_manipulated_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
loc = response['Location']
response = self.client.get(loc)
self.assertEqual(response.status_code, 200, loc)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_reset(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.get(
'%s?reset=1' % reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
class NamedSessionWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_session'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
class NamedCookieWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_cookie'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
class NamedFormTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def test_revalidation(self):
request = get_request()
testform = self.formwizard_class.as_view(
[('start', Step1), ('step2', Step2)],
url_name=self.wizard_urlname)
response, instance = testform(request, step='done')
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class TestNamedUrlSessionWizardView(NamedUrlSessionWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlSessionWizardView, self).dispatch(request, *args, **kwargs)
return response, self
class TestNamedUrlCookieWizardView(NamedUrlCookieWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlCookieWizardView, self).dispatch(request, *args, **kwargs)
return response, self
class NamedSessionFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlSessionWizardView
wizard_urlname = 'nwiz_session'
class NamedCookieFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlCookieWizardView
wizard_urlname = 'nwiz_cookie'
| gpl-2.0 |
jmmease/pandas | pandas/tests/io/parser/dtypes.py | 2 | 15223 | # -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('categories', [
['a', 'b', 'c'],
['a', 'c', 'b'],
['a', 'b', 'c', 'd'],
['c', 'b', 'a'],
])
def test_categorical_categoricaldtype(self, categories, ordered):
data = """a,b
1,a
1,b
1,b
2,c"""
expected = pd.DataFrame({
"a": [1, 1, 1, 2],
"b": Categorical(['a', 'b', 'b', 'c'],
categories=categories,
ordered=ordered)
})
dtype = {"b": CategoricalDtype(categories=categories,
ordered=ordered)}
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_unsorted(self):
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(['c', 'b', 'a'])
expected = pd.DataFrame({
'a': [1, 1, 1, 2],
'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
})
result = self.read_csv(StringIO(data), dtype={'b': dtype})
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_numeric(self):
dtype = {'b': CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = pd.DataFrame({'b': Categorical([1, 1, 2, 3])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_datetime(self):
dtype = {
'b': CategoricalDtype(pd.date_range('2017', '2019', freq='AS'))
}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
dtype = {
'b': CategoricalDtype([pd.Timestamp("2014")])
}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = pd.DataFrame({'b': Categorical([pd.Timestamp('2014')] * 2)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_timedelta(self):
dtype = {'b': CategoricalDtype(pd.to_timedelta(['1H', '2H', '3H']))}
data = "b\n1H\n2H\n3H"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_unexpected_categories(self):
dtype = {'b': CategoricalDtype(['a', 'b', 'd', 'e'])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = pd.DataFrame({"b": Categorical(list('dacd'),
dtype=dtype['b'])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ['a', 'b', 'c']
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'],
categories=cats)}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'],
categories=cats)},
index=[2, 3])]
dtype = CategoricalDtype(cats)
actuals = self.read_csv(StringIO(data), dtype={'b': dtype},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'),
np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
# see gh-9424
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one.1', dtype='f')], axis=1)
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
data = ''
result = self.read_csv(StringIO(data), names=['one', 'one'],
dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_raise_on_passed_int_dtype_with_nas(self):
# see gh-2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
sep=",", skipinitialspace=True,
dtype={'DOY': np.int64})
def test_dtype_with_converter(self):
data = """a,b
1.1,2.2
1.2,2.3"""
# dtype spec ignored if converted specified
with tm.assert_produces_warning(ParserWarning):
result = self.read_csv(StringIO(data), dtype={'a': 'i8'},
converters={'a': lambda x: str(x)})
expected = DataFrame({'a': ['1.1', '1.2'], 'b': [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
def test_empty_dtype(self):
# see gh-14712
data = 'a,b'
expected = pd.DataFrame(columns=['a', 'b'], dtype=np.float64)
result = self.read_csv(StringIO(data), header=0, dtype=np.float64)
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'a': pd.Categorical([]),
'b': pd.Categorical([])},
index=[])
result = self.read_csv(StringIO(data), header=0,
dtype='category')
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), header=0,
dtype={'a': 'category', 'b': 'category'})
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
result = self.read_csv(StringIO(data), header=0,
dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'a': pd.Series([], dtype='timedelta64[ns]'),
'b': pd.Series([], dtype='timedelta64[ns]')},
index=[])
result = self.read_csv(StringIO(data), header=0,
dtype='timedelta64[ns]')
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.float64)
result = self.read_csv(StringIO(data), header=0,
dtype={'a': np.float64})
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.float64)
result = self.read_csv(StringIO(data), header=0,
dtype={0: np.float64})
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
expected['b'] = expected['b'].astype(np.float64)
result = self.read_csv(StringIO(data), header=0,
dtype={'a': np.int32, 1: np.float64})
tm.assert_frame_equal(result, expected)
def test_numeric_dtype(self):
data = '0\n1'
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
expected = pd.DataFrame([0, 1], dtype=dt)
result = self.read_csv(StringIO(data), header=None, dtype=dt)
tm.assert_frame_equal(expected, result)
| bsd-3-clause |
3dfxmadscientist/cbss-server | addons/account_bank_statement_extensions/wizard/confirm_statement_line.py | 52 | 1515 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class confirm_statement_line(osv.osv_memory):
_name = 'confirm.statement.line'
_description = 'Confirm selected statement lines'
def confirm_lines(self, cr, uid, ids, context):
line_ids = context['active_ids']
line_obj = self.pool.get('account.bank.statement.line')
line_obj.write(cr, uid, line_ids, {'state': 'confirm'}, context=context)
return {}
confirm_statement_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NoMasp/wwmmo | website/qrcode/util.py | 4 | 9831 | import math
from qrcode import base, exceptions
# QR encoding modes.
MODE_NUMBER = 1 << 0
MODE_ALPHA_NUM = 1 << 1
MODE_8BIT_BYTE = 1 << 2
MODE_KANJI = 1 << 3
# Encoding mode sizes.
MODE_SIZE_SMALL = {
MODE_NUMBER: 10,
MODE_ALPHA_NUM: 9,
MODE_8BIT_BYTE: 8,
MODE_KANJI: 8,
}
MODE_SIZE_MEDIUM = {
MODE_NUMBER: 12,
MODE_ALPHA_NUM: 11,
MODE_8BIT_BYTE: 16,
MODE_KANJI: 10,
}
MODE_SIZE_LARGE = {
MODE_NUMBER: 14,
MODE_ALPHA_NUM: 13,
MODE_8BIT_BYTE: 16,
MODE_KANJI: 12,
}
PATTERN_POSITION_TABLE = [
[],
[6, 18],
[6, 22],
[6, 26],
[6, 30],
[6, 34],
[6, 22, 38],
[6, 24, 42],
[6, 26, 46],
[6, 28, 50],
[6, 30, 54],
[6, 32, 58],
[6, 34, 62],
[6, 26, 46, 66],
[6, 26, 48, 70],
[6, 26, 50, 74],
[6, 30, 54, 78],
[6, 30, 56, 82],
[6, 30, 58, 86],
[6, 34, 62, 90],
[6, 28, 50, 72, 94],
[6, 26, 50, 74, 98],
[6, 30, 54, 78, 102],
[6, 28, 54, 80, 106],
[6, 32, 58, 84, 110],
[6, 30, 58, 86, 114],
[6, 34, 62, 90, 118],
[6, 26, 50, 74, 98, 122],
[6, 30, 54, 78, 102, 126],
[6, 26, 52, 78, 104, 130],
[6, 30, 56, 82, 108, 134],
[6, 34, 60, 86, 112, 138],
[6, 30, 58, 86, 114, 142],
[6, 34, 62, 90, 118, 146],
[6, 30, 54, 78, 102, 126, 150],
[6, 24, 50, 76, 102, 128, 154],
[6, 28, 54, 80, 106, 132, 158],
[6, 32, 58, 84, 110, 136, 162],
[6, 26, 54, 82, 110, 138, 166],
[6, 30, 58, 86, 114, 142, 170]
]
G15 = ((1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) |
(1 << 0))
G18 = ((1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8) | (1 << 5) |
(1 << 2) | (1 << 0))
G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1)
PAD0 = 0xEC
PAD1 = 0x11
def BCH_type_info(data):
d = data << 10
while BCH_digit(d) - BCH_digit(G15) >= 0:
d ^= (G15 << (BCH_digit(d) - BCH_digit(G15)))
return ((data << 10) | d) ^ G15_MASK
def BCH_type_number(data):
d = data << 12
while BCH_digit(d) - BCH_digit(G18) >= 0:
d ^= (G18 << (BCH_digit(d) - BCH_digit(G18)))
return (data << 12) | d
def BCH_digit(data):
digit = 0
while data != 0:
digit += 1
data >>= 1
return digit
def pattern_position(version):
return PATTERN_POSITION_TABLE[version - 1]
def mask_func(pattern):
"""
Return the mask function for the given mask pattern.
"""
if pattern == 0: # 000
return lambda i, j: (i + j) % 2 == 0
if pattern == 1: # 001
return lambda i, j: i % 2 == 0
if pattern == 2: # 010
return lambda i, j: j % 3 == 0
if pattern == 3: # 011
return lambda i, j: (i + j) % 3 == 0
if pattern == 4: # 100
return lambda i, j: (math.floor(i / 2) + math.floor(j / 3)) % 2 == 0
if pattern == 5: # 101
return lambda i, j: (i * j) % 2 + (i * j) % 3 == 0
if pattern == 6: # 110
return lambda i, j: ((i * j) % 2 + (i * j) % 3) % 2 == 0
if pattern == 7: # 111
return lambda i, j: ((i * j) % 3 + (i + j) % 2) % 2 == 0
raise TypeError("Bad mask pattern: " + pattern)
def length_in_bits(mode, version):
if mode not in (MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE,
MODE_KANJI):
raise TypeError("Invalid mode (%s)" % mode)
if version < 1 or version > 40:
raise ValueError("Invalid version (was %s, expected 1 to 40)" %
version)
if version < 10:
mode_size = MODE_SIZE_SMALL
elif version < 27:
mode_size = MODE_SIZE_MEDIUM
else:
mode_size = MODE_SIZE_LARGE
return mode_size[mode]
def lost_point(modules):
modules_count = len(modules)
lost_point = 0
# LEVEL1
for row in range(modules_count):
for col in range(modules_count):
sameCount = 0
dark = modules[row][col]
for r in range(-1, 2):
if row + r < 0 or modules_count <= row + r:
continue
for c in range(-1, 2):
if col + c < 0 or modules_count <= col + c:
continue
if r == 0 and c == 0:
continue
if dark == modules[row + r][col + c]:
sameCount += 1
if sameCount > 5:
lost_point += (3 + sameCount - 5)
# LEVEL2
for row in range(modules_count - 1):
for col in range(modules_count - 1):
count = 0
if modules[row][col]:
count += 1
if modules[row + 1][col]:
count += 1
if modules[row][col + 1]:
count += 1
if modules[row + 1][col + 1]:
count += 1
if count == 0 or count == 4:
lost_point += 3
# LEVEL3
for row in range(modules_count):
for col in range(modules_count - 6):
if (modules[row][col]
and not modules[row][col + 1]
and modules[row][col + 2]
and modules[row][col + 3]
and modules[row][col + 4]
and not modules[row][col + 5]
and modules[row][col + 6]):
lost_point += 40
for col in range(modules_count):
for row in range(modules_count - 6):
if (modules[row][col]
and not modules[row + 1][col]
and modules[row + 2][col]
and modules[row + 3][col]
and modules[row + 4][col]
and not modules[row + 5][col]
and modules[row + 6][col]):
lost_point += 40
# LEVEL4
darkCount = 0
for col in range(modules_count):
for row in range(modules_count):
if modules[row][col]:
darkCount += 1
ratio = abs(100 * darkCount / modules_count / modules_count - 50) / 5
lost_point += ratio * 10
return lost_point
class QR8bitByte:
def __init__(self, data):
self.mode = MODE_8BIT_BYTE
self.data = data
def __len__(self):
return len(self.data)
def write(self, buffer):
for c in self.data:
# not JIS ...
buffer.put(ord(c), 8)
def __repr__(self):
return self.data
class BitBuffer:
def __init__(self):
self.buffer = []
self.length = 0
def __repr__(self):
return ".".join([str(n) for n in self.buffer])
def get(self, index):
buf_index = math.floor(index / 8)
return ((self.buffer[buf_index] >> (7 - index % 8)) & 1) == 1
def put(self, num, length):
for i in range(length):
self.put_bit(((num >> (length - i - 1)) & 1) == 1)
def __len__(self):
return self.length
def put_bit(self, bit):
buf_index = self.length // 8
if len(self.buffer) <= buf_index:
self.buffer.append(0)
if bit:
self.buffer[buf_index] |= (0x80 >> (self.length % 8))
self.length += 1
def create_bytes(buffer, rs_blocks):
offset = 0
maxDcCount = 0
maxEcCount = 0
dcdata = [0] * len(rs_blocks)
ecdata = [0] * len(rs_blocks)
for r in range(len(rs_blocks)):
dcCount = rs_blocks[r].data_count
ecCount = rs_blocks[r].total_count - dcCount
maxDcCount = max(maxDcCount, dcCount)
maxEcCount = max(maxEcCount, ecCount)
dcdata[r] = [0] * dcCount
for i in range(len(dcdata[r])):
dcdata[r][i] = 0xff & buffer.buffer[i + offset]
offset += dcCount
# Get error correction polynomial.
rsPoly = base.Polynomial([1], 0)
for i in range(ecCount):
rsPoly = rsPoly * base.Polynomial([1, base.gexp(i)], 0)
rawPoly = base.Polynomial(dcdata[r], len(rsPoly) - 1)
modPoly = rawPoly % rsPoly
ecdata[r] = [0] * (len(rsPoly) - 1)
for i in range(len(ecdata[r])):
modIndex = i + len(modPoly) - len(ecdata[r])
if (modIndex >= 0):
ecdata[r][i] = modPoly[modIndex]
else:
ecdata[r][i] = 0
totalCodeCount = 0
for rs_block in rs_blocks:
totalCodeCount += rs_block.total_count
data = [None] * totalCodeCount
index = 0
for i in range(maxDcCount):
for r in range(len(rs_blocks)):
if i < len(dcdata[r]):
data[index] = dcdata[r][i]
index += 1
for i in range(maxEcCount):
for r in range(len(rs_blocks)):
if i < len(ecdata[r]):
data[index] = ecdata[r][i]
index += 1
return data
def create_data(version, error_correction, data_list):
rs_blocks = base.rs_blocks(version, error_correction)
buffer = BitBuffer()
for data in data_list:
buffer.put(data.mode, 4)
buffer.put(len(data),
length_in_bits(data.mode, version))
data.write(buffer)
# calc num max data.
total_data_count = 0
for block in rs_blocks:
total_data_count += block.data_count
if len(buffer) > total_data_count * 8:
raise exceptions.DataOverflowError("Code length overflow. Data size "
"(%s) > size available (%s)" % (len(buffer), total_data_count * 8))
# end code
if len(buffer) + 4 <= total_data_count * 8:
buffer.put(0, 4)
# padding
while len(buffer) % 8:
buffer.put_bit(False)
# padding
while True:
if len(buffer) >= total_data_count * 8:
break
buffer.put(PAD0, 8)
if len(buffer) >= total_data_count * 8:
break
buffer.put(PAD1, 8)
return create_bytes(buffer, rs_blocks)
| mit |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/test/test_pickle.py | 41 | 2028 | import pickle
from cStringIO import StringIO
from test import test_support
from test.pickletester import AbstractPickleTests
from test.pickletester import AbstractPickleModuleTests
from test.pickletester import AbstractPersistentPicklerTests
from test.pickletester import AbstractPicklerUnpicklerObjectTests
class PickleTests(AbstractPickleTests, AbstractPickleModuleTests):
def dumps(self, arg, proto=0, fast=0):
# Ignore fast
return pickle.dumps(arg, proto)
def loads(self, buf):
# Ignore fast
return pickle.loads(buf)
module = pickle
error = KeyError
class PicklerTests(AbstractPickleTests):
error = KeyError
def dumps(self, arg, proto=0, fast=0):
f = StringIO()
p = pickle.Pickler(f, proto)
if fast:
p.fast = fast
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, buf):
f = StringIO(buf)
u = pickle.Unpickler(f)
return u.load()
class PersPicklerTests(AbstractPersistentPicklerTests):
def dumps(self, arg, proto=0, fast=0):
class PersPickler(pickle.Pickler):
def persistent_id(subself, obj):
return self.persistent_id(obj)
f = StringIO()
p = PersPickler(f, proto)
if fast:
p.fast = fast
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, buf):
class PersUnpickler(pickle.Unpickler):
def persistent_load(subself, obj):
return self.persistent_load(obj)
f = StringIO(buf)
u = PersUnpickler(f)
return u.load()
class PicklerUnpicklerObjectTests(AbstractPicklerUnpicklerObjectTests):
pickler_class = pickle.Pickler
unpickler_class = pickle.Unpickler
def test_main():
test_support.run_unittest(
PickleTests,
PicklerTests,
PersPicklerTests,
PicklerUnpicklerObjectTests,
)
test_support.run_doctest(pickle)
if __name__ == "__main__":
test_main()
| mit |
palmerjh/iEBE | EBE-Node/iS/listR.py | 26 | 12905 | # version 19 --- Zhi Qiu
# level 1
"""
Provide useful functions dealing with lists and strings.
If a function ends with "I", it returns an iterator, "D" for dictionary,
and "L" for list.
"""
import re
def toList(qvar):
""" Make qvar a list if not."""
if type(qvar) != type([]): qvar = [qvar]
return qvar
def flatten(nested):
"""
Flatten a list using iterator and exceptional control.
"""
try:
for sublist in nested:
if type(sublist)==type(""):
yield sublist
else:
for element in flatten(sublist):
yield element
except TypeError:
yield nested
def FL(nested):
"""
Flatten a list using iterator.
"""
for sublist in nested:
if type(sublist) == type([]):
for element in FL(sublist): yield element
else:
yield sublist
FLI = FL # use FLI in future functions
def FLL(nested):
""" Return a flattend list. """
return list(FL(nested))
def totalLen(nested):
""" Return the total number of elements in a list. """
return len(FLL(nested))
def intersect(lists):
"""
Return the intersection of all lists in "lists".
"""
if len(lists) == 0: return lists
if len(lists) == 1: return lists[0]
finalList = set(lists[0])
for aList in lists[1:]:
finalList = finalList & set(aList)
return list(finalList)
def union(lists):
"""
Return the union of all lists in "lists".
"""
if len(lists) == 0: return lists
if len(lists) == 1: return lists[0]
finalList = set(lists[0])
for aList in lists[1:]:
finalList = finalList | set(aList)
return list(finalList)
def difference(lists):
"""
Return the first set minus the rest.
"""
if len(lists) == 0: return lists
if len(lists) == 1: return lists[0]
finalList = set(lists[0])
for aList in lists[1:]:
finalList = finalList - set(aList)
return list(finalList)
def outer(lists, cuList=[]):
"""
Return all combination of elements in the sublist
of lists.
"""
if lists == []:
yield cuList
return
for element in lists[0]:
newList = list(cuList)
newList.append(element)
for elem in outer(lists[1:], newList):
yield elem
def strZip(list1, list2, string):
""" Return a list of strings of the form x1stringx2 where x1
and x2 are elements of list1 and list2 respectively.
"""
result = []
for x1, x2 in zip(list1, list2):
result.append(str(x1)+string+str(x2))
return result
def listToStr(list, seperator=" "):
""" Return a string consists of elements in list seperated by
seperator.
"""
return seperator.join(map(str,list))
def applyOrderList(order, aList):
""" Apply order to aList.
An order of the form [2,0,1] means that the first element
should be the 3rd element of aList, and so on.
"""
if order==[]:
return aList
else:
return map(lambda v:aList[v], order)
def applyOrderDic(order, aDic):
""" Apply order to aList.
An order of the form ["a","b","c"] means that the first element
should be aDic["a"], and so on.
"""
if order==[]:
return aDic.value()
else:
return map(lambda v:aDic[v], order)
def createOrderList(wantedOrder, currentOrder):
""" Create an order list that can transform currentOrder to
wantedOrder by applying applyOrderList function.
An order list is a list that specifies the position of the
desired element in a list in the correct order, e.g:
order of [3,1,2,4,6]->[1,2,3] which is got by using
createOrderList([1,2,3],[3,1,2,4,6]) is [1,2,0].
"""
return map(lambda x:currentOrder.index(x), wantedOrder)
def firstOccurenceInStr(aList, aString):
""" Return the first element in aList that is contained in the
string aString.
"""
for elem in aList:
if elem in aString:
return elem
else:
return None
def getTailNumber(string):
"""
Extracts the last occurence of a number from a string.
"""
num = re.findall(r"[0-9][0-9\.]*", string)
if num != []:
return num[len(num)-1]
else:
return ""
def areDefined(names, dic):
"""
Return True if all keys that have "names" are defined and are not "None".
"""
key_list = dic.keys()
names = toList(names)
for aName in names:
if aName not in key_list: return False
if dic[aName] == None: return False
else:
return True
def itemsList(dic):
""" Return dic.items() using list instead of tuple. """
return map(list, dic.items())
def split(a_string, seperator):
""" Split a string using seperator. """
return a_string.split(seperator)
def _mimicCore(patternList, flatList):
""" Core program for function mimic. """
for elem in patternList:
if type(elem) != type([]):
yield flatList[0]
flatList = flatList[1:]
continue
else:
yield list(_mimicCore(elem, flatList[:len(list(FL(elem)))]))
flatList = flatList[len(list(FL(elem))):]
def mimic(patternList, flatList):
""" Make the flattened list (flatList) to have the same
structure as patternList. (List only, no tuples)
"""
if len(list(FL(patternList))) != len(flatList):
print("patternList must have the same number of total elements as flatList!")
return None
return list(_mimicCore(patternList, flatList))
def containedIn(smaller, larger):
""" Return True if smaller (list) is contained in larger (list). """
for elem in toList(smaller):
if elem not in larger: return False
else:
return True
def biDifference(larger, smaller):
""" Remove smaller from larger (not necessary to contain smaller). """
largerCopy = list(larger)
for elem in toList(smaller):
if elem in larger:
largerCopy.remove(elem)
return largerCopy
def biSetDifference(larger, smaller):
""" Remove smaller from larger (not necessary to contain smaller) using set operation. """
return list(set(flatten(larger))-set(smaller))
def removeListFromDict(aDict, aList):
""" Remove those items whose keys in aList from aDict. """
aList = toList(aList)
if aList == []: return aDict
for elem in aList:
aDict.pop(elem)
return aDict
def biIntersectI(list1, list2):
""" Return the intersection of the two lists. """
for elem in toList(list1):
if elem in list2: yield elem
def subDict(keys, aDict):
""" Return a sub dictionary according to the list keys. """
preDict = []
allKeys = aDict.keys()
for aKey in toList(keys):
if aKey in allKeys: preDict.append([aKey, aDict[aKey]])
return dict(preDict)
def removeDuplicatesSimple(aList):
""" Remove duplicates in a simple list. """
return list(set(aList))
def _removeDuplicatesOneLevel(aList):
""" Remove first level duplicates. """
result = []
if aList == []: return
if type(aList) != type([]): return aList
for elem in aList:
if elem not in result: result.append(elem)
return result
def removeDuplicates(aList):
""" Remove duplicates in a list. """
if type(aList) != type([]): return aList
result = []
for elem in aList:
result.append(removeDuplicates(elem))
return _removeDuplicatesOneLevel(result)
def strEqual(str1, str2, ignoreCase=False):
""" Return true if two strings are the same. """
if ignoreCase==False:
return str1.strip()==str2.strip()
else:
return str1.strip().upper()==str2.strip().upper()
def getValueListFromDict(keys, aDict):
""" Return a list of values corresponding to the specified keys. """
if type(keys)==type([]):
return map(lambda x:aDict[x], keys)
else:
return []
def addItemsToDict(aList, aDict):
""" Add a list of items to a dictionary. """
return dict(removeDuplicates(itemsList(aDict)+aList))
def floatizeL(aList):
""" Convert elements in aList to float number. """
return map(float, aList)
def floatizeItemInDict(aDict, keyList):
""" Convert items in aDict with keys in keyList to float number."""
keyList = toList(keyList)
result = aDict
for elem in keyList:
result[elem] = float(aDict[elem])
return result
def stringizeL(aList):
""" Convert elements in aList to strings. """
if type(aList)==type([]):
return map(str, flatten(aList))
else:
return str(aList)
def transpose(lists):
""" Transpose a list of lists. """
if not lists: return []
return map(lambda *row: list(row), *lists)
def transpose2(lists, defval=0):
""" Transpose a list of list. defval is used instead of None for uneven lengthed lists. """
if not lists: return []
return map(lambda *row: [elem or defval for elem in row], *lists)
def getColumn(data, colN):
""" Return the column colN (counted starting from 0) in the data. """
return transpose(data)[colN]
def getColumns(data, col_list):
"""
Take sublist given from all rows and columns specified by col_list from
a doublely iterated list.
The convention for the index of the rows and columns are the same as in slicing.
"""
result = []
for aRow in data:
result.append(map(lambda column: aRow[column], col_list))
return result
def seperateStr(strV, seperationSymbols=[" ", "-", "\n"]):
""" Split string according to seperationSymbols. """
if not strV:
return []
else:
strings = [strV]
for aSeperator in seperationSymbols:
strings = FLL(map(lambda x:split(x, aSeperator), strings))
return strings
def readCSESD(strV, connectionSymbol="=", seperationSymbols=[",", " ", "-", "\n", "/", "\\"]): #CSE: Comma Seperated Equations
""" Return a dic of the form {arg1:value1, ...} if with
connectionSymbol="=" and seperationSymbol=",", strV is
like arg1=value1,arg2=value2,...
Values are in string form.
"""
result = []
strings = seperateStr(strV, seperationSymbols) # total seperation
for aStr in strings:
# print(aStr)
if connectionSymbol not in aStr: continue
result.append(split(aStr, connectionSymbol))
result = removeDuplicates(result) # remove duplicates
# print(result)
if not result: result=[] # make it "empty" instead of "None"
if ([""] in result):
result.remove([""]) # remove empty lists
return dict(result)
def connectCSES(aList, connectionSymbol="=", seperationSymbol=","):
""" Return a string represents a list of the form [name, value] using the form name=value. """
if not aList: return ""
return seperationSymbol.join(map(lambda x: connectionSymbol.join(x), aList))
def removeTailReturn(aStr):
""" Return a string without the tail "\n" if it has one. """
if aStr[-1:] == "\n":
return aStr[:-1]
else:
return aStr
def takeBlock(aList, row_l,row_r,col_l,col_r):
""" Take sublist given from row row_l to row_r and column col_l to col_r from a double list.
The convention for the index of the rows and columns are the same as in slicing.
"""
result = []
for aRow in aList[row_l:row_r]:
result.append(aRow[col_l:col_r])
return result
def takeBlock2(aList, row_list, col_list):
"""
Take sublist given from rows specified by row_list and column specified by col_list from
a doublely iterated list.
The convention for the index of the rows and columns are the same as in slicing.
"""
result = []
for row in row_list:
result.append(map(lambda column: aList[row][column], col_list))
return result
def intStr(i, total=3):
""" Return a sting of the integer i begin with 0. """
return '0'*(total-len(str(i)))+str(i)
def isNested(data):
""" Return true if the data is a nested list. """
if not data: return False # if empty then return False
if type(data[0]) != type([]): return False # if not a nested list then return False
return True
def sliceMatrixData(data, columnStep=0, centralLargeness=0):
""" Slice data into smaller nested lists of specified vertical size (columnStep).
If centralLargeness is not 0, only a smaller central block of specified size is used. """
Ny, Nx = len(data), len(data[0])
if columnStep==0: columnStep = Nx
if Ny % columnStep != 0: # check if Ny is dividable by Nx
print("Total length of data is not dividable by columnStep (or did you use float number for colStep?)!")
return []
if centralLargeness == 0: centralLargeness = min(columnStep, Nx) # set visible area
y_left = int((columnStep-centralLargeness)/2) # set block size in y direction (row direction)
y_right = int((columnStep+centralLargeness)/2)
x_left = int((Nx-centralLargeness)/2) # set block in size x direction (column direction)
x_right = int((Nx+centralLargeness)/2)
result = []
for i in range(Ny/columnStep):
result.append(take(data[i*columnStep:(i+1)*columnStep][:], y_left, y_right, x_left, x_right))
return result
def next(aList, index):
""" Return the index to the next element (compared to the element
with index "index") in aList, or 0 if it already is the last one.
Useful to make a list of loop.
"""
return (index+1) % len(aList)
def isFloat(string):
""" Return a true is string is convertable to a float, otherwise false.
"""
try:
float(string)
return True
except ValueError:
return False
def zeros(m,n):
""" Return a zero "matrix" (iterated list) with m rows (1st index) and n columns. """
return map(lambda var: map(lambda var: 0, range(n)), range(m));
| gpl-3.0 |
ench0/android_kernel_samsung_hlte | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
wwj718/edx-video | lms/djangoapps/certificates/migrations/0012_auto__add_field_generatedcertificate_name__add_field_generatedcertific.py | 188 | 6384 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeneratedCertificate.name'
db.add_column('certificates_generatedcertificate', 'name',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'GeneratedCertificate.created_date'
db.add_column('certificates_generatedcertificate', 'created_date',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now_add=True, blank=True),
keep_default=False)
# Adding field 'GeneratedCertificate.modified_date'
db.add_column('certificates_generatedcertificate', 'modified_date',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GeneratedCertificate.name'
db.delete_column('certificates_generatedcertificate', 'name')
# Deleting field 'GeneratedCertificate.created_date'
db.delete_column('certificates_generatedcertificate', 'created_date')
# Deleting field 'GeneratedCertificate.modified_date'
db.delete_column('certificates_generatedcertificate', 'modified_date')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 |
paninetworks/neutron | neutron/api/versions.py | 23 | 1940 | # Copyright 2011 Citrix Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
from oslo_log import log as logging
import webob.dec
from neutron.api.views import versions as versions_view
from neutron import wsgi
LOG = logging.getLogger(__name__)
class Versions(object):
@classmethod
def factory(cls, global_config, **local_config):
return cls()
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Respond to a request for all Neutron API versions."""
version_objs = [
{
"id": "v2.0",
"status": "CURRENT",
},
]
if req.path != '/':
language = req.best_match_language()
msg = _('Unknown API version specified')
msg = oslo_i18n.translate(msg, language)
return webob.exc.HTTPNotFound(explanation=msg)
builder = versions_view.get_view_builder(req)
versions = [builder.build(version) for version in version_objs]
response = dict(versions=versions)
metadata = {}
content_type = req.best_match_content_type()
body = (wsgi.Serializer(metadata=metadata).
serialize(response, content_type))
response = webob.Response()
response.content_type = content_type
response.body = body
return response
| apache-2.0 |
keithhamilton/blackmaas | lib/python2.7/site-packages/pip/_vendor/colorama/win32.py | 451 | 4833 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
from ctypes import wintypes
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort, POINTER
)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", wintypes._COORD),
("dwCursorPosition", wintypes._COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", wintypes._COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
wintypes._COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = wintypes._COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = wintypes._COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
| bsd-3-clause |
openpgh/askpgh | askbot/migrations/0056_transplant_answer_count_field.py | 17 | 26939 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Thread.answer_count'
db.add_column('askbot_thread', 'answer_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'Thread.answer_count'
db.delete_column('askbot_thread', 'answer_count')
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('answer', 'revision'), ('question', 'revision'))", 'object_name': 'PostRevision'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 |
SteveNguyen/poppy-software | poppytools/sensor/inertial_unit.py | 1 | 2759 | import serial
import threading
import time
GYRO_X_CAL =- 26.691
GYRO_Y_CAL =- 49.95
GYRO_Z_CAL = 25.779
class Imu(threading.Thread):
def __init__(self, port, baudrate = 57600):
threading.Thread.__init__(self)
self.daemon = True
self.s = serial.Serial(port, baudrate)
self.acc = Vector()
self.tilt = Vector()
self.gyro = Vector()
self.magneto = Vector()
def run(self):
while True:
l = self.s.readline()
# We force this to catch up with any potential lag.
# while self.s.inWaiting():
# l = self.s.readline()
l = l.replace('\r\n', '')
try:
#just get the calibrated values and the angles
if l[:5] == '#A-C=':
l = l.replace('#A-C=', '')
self.acc.x, self.acc.y, self.acc.z = map(float, l.split(','))
elif l[:5] == '#G-C=':
l = l.replace('#G-C=', '')
self.gyro.x, self.gyro.y, self.gyro.z = map(float, l.split(','))
#test calib
self.gyro.x -= GYRO_X_CAL
self.gyro.y -= GYRO_Y_CAL
self.gyro.z -= GYRO_Z_CAL
elif l[:5] == '#M-C=':
l = l.replace('#M-C=', '')
self.magneto.x, self.magneto.y, self.magneto.z = map(float, l.split(','))
# if l[:5] == '#A-R=':
# l = l.replace('#A-R=', '')
# self.acc.x, self.acc.y, self.acc.z = map(float, l.split(','))
# elif l[:5] == '#G-R=':
# l = l.replace('#G-R=', '')
# self.gyro.x, self.gyro.y, self.gyro.z = map(float, l.split(','))
# elif l[:5] == '#M-R=':
# l = l.replace('#M-R=', '')
# self.magneto.x, self.magneto.y, self.magneto.z = map(float, l.split(','))
elif l[:5] == '#YPR=':
l = l.replace('#YPR=', '')
self.tilt.x, self.tilt.y, self.tilt.z = map(float, l.split(','))
except ValueError:
# Some lines sent by the Arduino just seems incoherent...
pass
class Vector(object):
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = float(x)
self.y = float(y)
self.z = float(z)
@property
def json(self):
return [self.x, self.y, self.z]
def __repr__(self):
return '[{}, {}, {}]'.format(self.x, self.y, self.z)
if __name__ == '__main__':
imu = Imu('/dev/poppy_imu')
imu.start()
while True:
print imu.acc, imu.gyro, imu.tilt
time.sleep(0.1)
| gpl-3.0 |
yiwen-luo/LeetCode | Python/assign-cookies.py | 3 | 1590 | # Time: O(nlogn)
# Space: O(1)
# Assume you are an awesome parent and want to give your children some cookies.
# But, you should give each child at most one cookie. Each child i has a greed factor gi,
# which is the minimum size of a cookie that the child will be content with;
# and each cookie j has a size sj. If sj >= gi, we can assign the cookie j to the child i,
# and the child i will be content.
# Your goal is to maximize the number of your content children and output the maximum number.
#
# Note:
# You may assume the greed factor is always positive.
# You cannot assign more than one cookie to one child.
#
# Example 1:
# Input: [1,2,3], [1,1]
#
# Output: 1
#
# Explanation: You have 3 children and 2 cookies. The greed factors of 3 children are 1, 2, 3.
# And even though you have 2 cookies, since their size is both 1,
# you could only make the child whose greed factor is 1 content.
# You need to output 1.
# Example 2:
# Input: [1,2], [1,2,3]
#
# Output: 2
#
# Explanation: You have 2 children and 3 cookies. The greed factors of 2 children are 1, 2.
# You have 3 cookies and their sizes are big enough to gratify all of the children,
# You need to output 2.
class Solution(object):
def findContentChildren(self, g, s):
"""
:type g: List[int]
:type s: List[int]
:rtype: int
"""
g.sort()
s.sort()
result, i = 0, 0
for j in xrange(len(s)):
if i == len(g):
break
if s[j] >= g[i]:
result += 1
i += 1
return result
| mit |
shenyy/lily2-gem5 | src/mem/slicc/symbols/StateMachine.py | 7 | 53345 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.util import orderdict
from slicc.symbols.Symbol import Symbol
from slicc.symbols.Var import Var
import slicc.generate.html as html
import re
python_class_map = {"int": "Int",
"std::string": "String",
"bool": "Bool",
"CacheMemory": "RubyCache",
"WireBuffer": "RubyWireBuffer",
"Sequencer": "RubySequencer",
"DirectoryMemory": "RubyDirectoryMemory",
"MemoryControl": "RubyMemoryControl",
"DMASequencer": "DMASequencer"
}
class StateMachine(Symbol):
def __init__(self, symtab, ident, location, pairs, config_parameters):
super(StateMachine, self).__init__(symtab, ident, location, pairs)
self.table = None
self.config_parameters = config_parameters
for param in config_parameters:
if param.pointer:
var = Var(symtab, param.name, location, param.type_ast.type,
"(*m_%s_ptr)" % param.name, {}, self)
else:
var = Var(symtab, param.name, location, param.type_ast.type,
"m_%s" % param.name, {}, self)
self.symtab.registerSym(param.name, var)
self.states = orderdict()
self.events = orderdict()
self.actions = orderdict()
self.transitions = []
self.in_ports = []
self.functions = []
self.objects = []
self.TBEType = None
self.EntryType = None
self.message_buffer_names = []
def __repr__(self):
return "[StateMachine: %s]" % self.ident
def addState(self, state):
assert self.table is None
self.states[state.ident] = state
def addEvent(self, event):
assert self.table is None
self.events[event.ident] = event
def addAction(self, action):
assert self.table is None
# Check for duplicate action
for other in self.actions.itervalues():
if action.ident == other.ident:
action.warning("Duplicate action definition: %s" % action.ident)
action.error("Duplicate action definition: %s" % action.ident)
if action.short == other.short:
other.warning("Duplicate action shorthand: %s" % other.ident)
other.warning(" shorthand = %s" % other.short)
action.warning("Duplicate action shorthand: %s" % action.ident)
action.error(" shorthand = %s" % action.short)
self.actions[action.ident] = action
def addTransition(self, trans):
assert self.table is None
self.transitions.append(trans)
def addInPort(self, var):
self.in_ports.append(var)
def addFunc(self, func):
# register func in the symbol table
self.symtab.registerSym(str(func), func)
self.functions.append(func)
def addObject(self, obj):
self.objects.append(obj)
def addType(self, type):
type_ident = '%s' % type.c_ident
if type_ident == "%s_TBE" %self.ident:
if self.TBEType != None:
self.error("Multiple Transaction Buffer types in a " \
"single machine.");
self.TBEType = type
elif "interface" in type and "AbstractCacheEntry" == type["interface"]:
if self.EntryType != None:
self.error("Multiple AbstractCacheEntry types in a " \
"single machine.");
self.EntryType = type
# Needs to be called before accessing the table
def buildTable(self):
assert self.table is None
table = {}
for trans in self.transitions:
# Track which actions we touch so we know if we use them
# all -- really this should be done for all symbols as
# part of the symbol table, then only trigger it for
# Actions, States, Events, etc.
for action in trans.actions:
action.used = True
index = (trans.state, trans.event)
if index in table:
table[index].warning("Duplicate transition: %s" % table[index])
trans.error("Duplicate transition: %s" % trans)
table[index] = trans
# Look at all actions to make sure we used them all
for action in self.actions.itervalues():
if not action.used:
error_msg = "Unused action: %s" % action.ident
if "desc" in action:
error_msg += ", " + action.desc
action.warning(error_msg)
self.table = table
def writeCodeFiles(self, path):
self.printControllerPython(path)
self.printControllerHH(path)
self.printControllerCC(path)
self.printCSwitch(path)
self.printCWakeup(path)
self.printProfilerCC(path)
self.printProfilerHH(path)
self.printProfileDumperCC(path)
self.printProfileDumperHH(path)
def printControllerPython(self, path):
code = self.symtab.codeFormatter()
ident = self.ident
py_ident = "%s_Controller" % ident
c_ident = "%s_Controller" % self.ident
code('''
from m5.params import *
from m5.SimObject import SimObject
from Controller import RubyController
class $py_ident(RubyController):
type = '$py_ident'
''')
code.indent()
for param in self.config_parameters:
dflt_str = ''
if param.default is not None:
dflt_str = str(param.default) + ', '
if python_class_map.has_key(param.type_ast.type.c_ident):
python_type = python_class_map[param.type_ast.type.c_ident]
code('${{param.name}} = Param.${{python_type}}(${dflt_str}"")')
else:
self.error("Unknown c++ to python class conversion for c++ " \
"type: '%s'. Please update the python_class_map " \
"in StateMachine.py", param.type_ast.type.c_ident)
code.dedent()
code.write(path, '%s.py' % py_ident)
def printControllerHH(self, path):
'''Output the method declarations for the class declaration'''
code = self.symtab.codeFormatter()
ident = self.ident
c_ident = "%s_Controller" % self.ident
self.message_buffer_names = []
code('''
/** \\file $c_ident.hh
*
* Auto generated C++ code started by $__file__:$__line__
* Created by slicc definition of Module "${{self.short}}"
*/
#ifndef __${ident}_CONTROLLER_HH__
#define __${ident}_CONTROLLER_HH__
#include <iostream>
#include <sstream>
#include <string>
#include "mem/protocol/${ident}_ProfileDumper.hh"
#include "mem/protocol/${ident}_Profiler.hh"
#include "mem/protocol/TransitionResult.hh"
#include "mem/protocol/Types.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "params/$c_ident.hh"
''')
seen_types = set()
for var in self.objects:
if var.type.ident not in seen_types and not var.type.isPrimitive:
code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
seen_types.add(var.type.ident)
# for adding information to the protocol debug trace
code('''
extern std::stringstream ${ident}_transitionComment;
class $c_ident : public AbstractController
{
// the coherence checker needs to call isBlockExclusive() and isBlockShared()
// making the Chip a friend class is an easy way to do this for now
public:
typedef ${c_ident}Params Params;
$c_ident(const Params *p);
static int getNumControllers();
void init();
MessageBuffer* getMandatoryQueue() const;
const int & getVersion() const;
const std::string toString() const;
const std::string getName() const;
void stallBuffer(MessageBuffer* buf, Address addr);
void wakeUpBuffers(Address addr);
void wakeUpAllBuffers();
void initNetworkPtr(Network* net_ptr) { m_net_ptr = net_ptr; }
void print(std::ostream& out) const;
void printConfig(std::ostream& out) const;
void wakeup();
void printStats(std::ostream& out) const;
void clearStats();
void blockOnQueue(Address addr, MessageBuffer* port);
void unblock(Address addr);
void recordCacheTrace(int cntrl, CacheRecorder* tr);
Sequencer* getSequencer() const;
private:
''')
code.indent()
# added by SS
for param in self.config_parameters:
if param.pointer:
code('${{param.type_ast.type}}* m_${{param.ident}}_ptr;')
else:
code('${{param.type_ast.type}} m_${{param.ident}};')
code('''
int m_number_of_TBEs;
TransitionResult doTransition(${ident}_Event event,
''')
if self.EntryType != None:
code('''
${{self.EntryType.c_ident}}* m_cache_entry_ptr,
''')
if self.TBEType != None:
code('''
${{self.TBEType.c_ident}}* m_tbe_ptr,
''')
code('''
const Address& addr);
TransitionResult doTransitionWorker(${ident}_Event event,
${ident}_State state,
${ident}_State& next_state,
''')
if self.TBEType != None:
code('''
${{self.TBEType.c_ident}}*& m_tbe_ptr,
''')
if self.EntryType != None:
code('''
${{self.EntryType.c_ident}}*& m_cache_entry_ptr,
''')
code('''
const Address& addr);
std::string m_name;
int m_transitions_per_cycle;
int m_buffer_size;
int m_recycle_latency;
std::map<std::string, std::string> m_cfg;
NodeID m_version;
Network* m_net_ptr;
MachineID m_machineID;
bool m_is_blocking;
std::map<Address, MessageBuffer*> m_block_map;
typedef std::vector<MessageBuffer*> MsgVecType;
typedef std::map< Address, MsgVecType* > WaitingBufType;
WaitingBufType m_waiting_buffers;
int m_max_in_port_rank;
int m_cur_in_port_rank;
static ${ident}_ProfileDumper s_profileDumper;
${ident}_Profiler m_profiler;
static int m_num_controllers;
// Internal functions
''')
for func in self.functions:
proto = func.prototype
if proto:
code('$proto')
if self.EntryType != None:
code('''
// Set and Reset for cache_entry variable
void set_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, AbstractCacheEntry* m_new_cache_entry);
void unset_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr);
''')
if self.TBEType != None:
code('''
// Set and Reset for tbe variable
void set_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${ident}_TBE* m_new_tbe);
void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr);
''')
code('''
// Actions
''')
if self.TBEType != None and self.EntryType != None:
for action in self.actions.itervalues():
code('/** \\brief ${{action.desc}} */')
code('void ${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.EntryType.c_ident}}*& m_cache_entry_ptr, const Address& addr);')
elif self.TBEType != None:
for action in self.actions.itervalues():
code('/** \\brief ${{action.desc}} */')
code('void ${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, const Address& addr);')
elif self.EntryType != None:
for action in self.actions.itervalues():
code('/** \\brief ${{action.desc}} */')
code('void ${{action.ident}}(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, const Address& addr);')
else:
for action in self.actions.itervalues():
code('/** \\brief ${{action.desc}} */')
code('void ${{action.ident}}(const Address& addr);')
# the controller internal variables
code('''
// Objects
''')
for var in self.objects:
th = var.get("template_hack", "")
code('${{var.type.c_ident}}$th* m_${{var.c_ident}}_ptr;')
if var.type.ident == "MessageBuffer":
self.message_buffer_names.append("m_%s_ptr" % var.c_ident)
code.dedent()
code('};')
code('#endif // __${ident}_CONTROLLER_H__')
code.write(path, '%s.hh' % c_ident)
def printControllerCC(self, path):
'''Output the actions for performing the actions'''
code = self.symtab.codeFormatter()
ident = self.ident
c_ident = "%s_Controller" % self.ident
code('''
/** \\file $c_ident.cc
*
* Auto generated C++ code started by $__file__:$__line__
* Created by slicc definition of Module "${{self.short}}"
*/
#include <sys/types.h>
#include <unistd.h>
#include <cassert>
#include <sstream>
#include <string>
#include "base/compiler.hh"
#include "base/cprintf.hh"
#include "debug/RubyGenerated.hh"
#include "debug/RubySlicc.hh"
#include "mem/protocol/${ident}_Controller.hh"
#include "mem/protocol/${ident}_Event.hh"
#include "mem/protocol/${ident}_State.hh"
#include "mem/protocol/Types.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/slicc_interface/RubySlicc_includes.hh"
#include "mem/ruby/system/System.hh"
using namespace std;
''')
# include object classes
seen_types = set()
for var in self.objects:
if var.type.ident not in seen_types and not var.type.isPrimitive:
code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
seen_types.add(var.type.ident)
code('''
$c_ident *
${c_ident}Params::create()
{
return new $c_ident(this);
}
int $c_ident::m_num_controllers = 0;
${ident}_ProfileDumper $c_ident::s_profileDumper;
// for adding information to the protocol debug trace
stringstream ${ident}_transitionComment;
#define APPEND_TRANSITION_COMMENT(str) (${ident}_transitionComment << str)
/** \\brief constructor */
$c_ident::$c_ident(const Params *p)
: AbstractController(p)
{
m_version = p->version;
m_transitions_per_cycle = p->transitions_per_cycle;
m_buffer_size = p->buffer_size;
m_recycle_latency = p->recycle_latency;
m_number_of_TBEs = p->number_of_TBEs;
m_is_blocking = false;
m_name = "${ident}";
''')
#
# max_port_rank is used to size vectors and thus should be one plus the
# largest port rank
#
max_port_rank = self.in_ports[0].pairs["max_port_rank"] + 1
code(' m_max_in_port_rank = $max_port_rank;')
code.indent()
#
# After initializing the universal machine parameters, initialize the
# this machines config parameters. Also detemine if these configuration
# params include a sequencer. This information will be used later for
# contecting the sequencer back to the L1 cache controller.
#
contains_dma_sequencer = False
sequencers = []
for param in self.config_parameters:
if param.name == "dma_sequencer":
contains_dma_sequencer = True
elif re.compile("sequencer").search(param.name):
sequencers.append(param.name)
if param.pointer:
code('m_${{param.name}}_ptr = p->${{param.name}};')
else:
code('m_${{param.name}} = p->${{param.name}};')
#
# For the l1 cache controller, add the special atomic support which
# includes passing the sequencer a pointer to the controller.
#
if self.ident == "L1Cache":
if not sequencers:
self.error("The L1Cache controller must include the sequencer " \
"configuration parameter")
for seq in sequencers:
code('''
m_${{seq}}_ptr->setController(this);
''')
else:
for seq in sequencers:
code('''
m_${{seq}}_ptr->setController(this);
''')
#
# For the DMA controller, pass the sequencer a pointer to the
# controller.
#
if self.ident == "DMA":
if not contains_dma_sequencer:
self.error("The DMA controller must include the sequencer " \
"configuration parameter")
code('''
m_dma_sequencer_ptr->setController(this);
''')
code('m_num_controllers++;')
for var in self.objects:
if var.ident.find("mandatoryQueue") >= 0:
code('m_${{var.c_ident}}_ptr = new ${{var.type.c_ident}}();')
code.dedent()
code('''
}
void
$c_ident::init()
{
MachineType machine_type;
int base;
m_machineID.type = MachineType_${ident};
m_machineID.num = m_version;
// initialize objects
m_profiler.setVersion(m_version);
s_profileDumper.registerProfiler(&m_profiler);
''')
code.indent()
for var in self.objects:
vtype = var.type
vid = "m_%s_ptr" % var.c_ident
if "network" not in var:
# Not a network port object
if "primitive" in vtype:
code('$vid = new ${{vtype.c_ident}};')
if "default" in var:
code('(*$vid) = ${{var["default"]}};')
else:
# Normal Object
# added by SS
if "factory" in var:
code('$vid = ${{var["factory"]}};')
elif var.ident.find("mandatoryQueue") < 0:
th = var.get("template_hack", "")
expr = "%s = new %s%s" % (vid, vtype.c_ident, th)
args = ""
if "non_obj" not in vtype and not vtype.isEnumeration:
if expr.find("TBETable") >= 0:
args = "m_number_of_TBEs"
else:
args = var.get("constructor_hack", "")
code('$expr($args);')
code('assert($vid != NULL);')
if "default" in var:
code('*$vid = ${{var["default"]}}; // Object default')
elif "default" in vtype:
comment = "Type %s default" % vtype.ident
code('*$vid = ${{vtype["default"]}}; // $comment')
# Set ordering
if "ordered" in var and "trigger_queue" not in var:
# A buffer
code('$vid->setOrdering(${{var["ordered"]}});')
# Set randomization
if "random" in var:
# A buffer
code('$vid->setRandomization(${{var["random"]}});')
# Set Priority
if vtype.isBuffer and \
"rank" in var and "trigger_queue" not in var:
code('$vid->setPriority(${{var["rank"]}});')
else:
# Network port object
network = var["network"]
ordered = var["ordered"]
vnet = var["virtual_network"]
vnet_type = var["vnet_type"]
assert var.machine is not None
code('''
machine_type = string_to_MachineType("${{var.machine.ident}}");
base = MachineType_base_number(machine_type);
$vid = m_net_ptr->get${network}NetQueue(m_version + base, $ordered, $vnet, "$vnet_type");
''')
code('assert($vid != NULL);')
# Set ordering
if "ordered" in var:
# A buffer
code('$vid->setOrdering(${{var["ordered"]}});')
# Set randomization
if "random" in var:
# A buffer
code('$vid->setRandomization(${{var["random"]}});')
# Set Priority
if "rank" in var:
code('$vid->setPriority(${{var["rank"]}})')
# Set buffer size
if vtype.isBuffer:
code('''
if (m_buffer_size > 0) {
$vid->resize(m_buffer_size);
}
''')
# set description (may be overriden later by port def)
code('''
$vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{var.c_ident}}]");
''')
if vtype.isBuffer:
if "recycle_latency" in var:
code('$vid->setRecycleLatency(${{var["recycle_latency"]}});')
else:
code('$vid->setRecycleLatency(m_recycle_latency);')
# Set the queue consumers
code()
for port in self.in_ports:
code('${{port.code}}.setConsumer(this);')
# Set the queue descriptions
code()
for port in self.in_ports:
code('${{port.code}}.setDescription("[Version " + to_string(m_version) + ", $ident, $port]");')
# Initialize the transition profiling
code()
for trans in self.transitions:
# Figure out if we stall
stall = False
for action in trans.actions:
if action.ident == "z_stall":
stall = True
# Only possible if it is not a 'z' case
if not stall:
state = "%s_State_%s" % (self.ident, trans.state.ident)
event = "%s_Event_%s" % (self.ident, trans.event.ident)
code('m_profiler.possibleTransition($state, $event);')
code.dedent()
code('}')
has_mandatory_q = False
for port in self.in_ports:
if port.code.find("mandatoryQueue_ptr") >= 0:
has_mandatory_q = True
if has_mandatory_q:
mq_ident = "m_%s_mandatoryQueue_ptr" % self.ident
else:
mq_ident = "NULL"
seq_ident = "NULL"
for param in self.config_parameters:
if param.name == "sequencer":
assert(param.pointer)
seq_ident = "m_%s_ptr" % param.name
code('''
int
$c_ident::getNumControllers()
{
return m_num_controllers;
}
MessageBuffer*
$c_ident::getMandatoryQueue() const
{
return $mq_ident;
}
Sequencer*
$c_ident::getSequencer() const
{
return $seq_ident;
}
const int &
$c_ident::getVersion() const
{
return m_version;
}
const string
$c_ident::toString() const
{
return "$c_ident";
}
const string
$c_ident::getName() const
{
return m_name;
}
void
$c_ident::stallBuffer(MessageBuffer* buf, Address addr)
{
if (m_waiting_buffers.count(addr) == 0) {
MsgVecType* msgVec = new MsgVecType;
msgVec->resize(m_max_in_port_rank, NULL);
m_waiting_buffers[addr] = msgVec;
}
(*(m_waiting_buffers[addr]))[m_cur_in_port_rank] = buf;
}
void
$c_ident::wakeUpBuffers(Address addr)
{
if (m_waiting_buffers.count(addr) > 0) {
//
// Wake up all possible lower rank (i.e. lower priority) buffers that could
// be waiting on this message.
//
for (int in_port_rank = m_cur_in_port_rank - 1;
in_port_rank >= 0;
in_port_rank--) {
if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
(*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
}
}
delete m_waiting_buffers[addr];
m_waiting_buffers.erase(addr);
}
}
void
$c_ident::wakeUpAllBuffers()
{
//
// Wake up all possible buffers that could be waiting on any message.
//
std::vector<MsgVecType*> wokeUpMsgVecs;
if(m_waiting_buffers.size() > 0) {
for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
buf_iter != m_waiting_buffers.end();
++buf_iter) {
for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
vec_iter != buf_iter->second->end();
++vec_iter) {
if (*vec_iter != NULL) {
(*vec_iter)->reanalyzeAllMessages();
}
}
wokeUpMsgVecs.push_back(buf_iter->second);
}
for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
wb_iter != wokeUpMsgVecs.end();
++wb_iter) {
delete (*wb_iter);
}
m_waiting_buffers.clear();
}
}
void
$c_ident::blockOnQueue(Address addr, MessageBuffer* port)
{
m_is_blocking = true;
m_block_map[addr] = port;
}
void
$c_ident::unblock(Address addr)
{
m_block_map.erase(addr);
if (m_block_map.size() == 0) {
m_is_blocking = false;
}
}
void
$c_ident::print(ostream& out) const
{
out << "[$c_ident " << m_version << "]";
}
void
$c_ident::printConfig(ostream& out) const
{
out << "$c_ident config: " << m_name << endl;
out << " version: " << m_version << endl;
map<string, string>::const_iterator it;
for (it = m_cfg.begin(); it != m_cfg.end(); it++)
out << " " << it->first << ": " << it->second << endl;
}
void
$c_ident::printStats(ostream& out) const
{
''')
#
# Cache and Memory Controllers have specific profilers associated with
# them. Print out these stats before dumping state transition stats.
#
for param in self.config_parameters:
if param.type_ast.type.ident == "CacheMemory" or \
param.type_ast.type.ident == "DirectoryMemory" or \
param.type_ast.type.ident == "MemoryControl":
assert(param.pointer)
code(' m_${{param.ident}}_ptr->printStats(out);')
code('''
if (m_version == 0) {
s_profileDumper.dumpStats(out);
}
}
void $c_ident::clearStats() {
''')
#
# Cache and Memory Controllers have specific profilers associated with
# them. These stats must be cleared too.
#
for param in self.config_parameters:
if param.type_ast.type.ident == "CacheMemory" or \
param.type_ast.type.ident == "MemoryControl":
assert(param.pointer)
code(' m_${{param.ident}}_ptr->clearStats();')
code('''
m_profiler.clearStats();
}
''')
if self.EntryType != None:
code('''
// Set and Reset for cache_entry variable
void
$c_ident::set_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, AbstractCacheEntry* m_new_cache_entry)
{
m_cache_entry_ptr = (${{self.EntryType.c_ident}}*)m_new_cache_entry;
}
void
$c_ident::unset_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr)
{
m_cache_entry_ptr = 0;
}
''')
if self.TBEType != None:
code('''
// Set and Reset for tbe variable
void
$c_ident::set_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.TBEType.c_ident}}* m_new_tbe)
{
m_tbe_ptr = m_new_tbe;
}
void
$c_ident::unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr)
{
m_tbe_ptr = NULL;
}
''')
code('''
void
$c_ident::recordCacheTrace(int cntrl, CacheRecorder* tr)
{
''')
#
# Record cache contents for all associated caches.
#
code.indent()
for param in self.config_parameters:
if param.type_ast.type.ident == "CacheMemory":
assert(param.pointer)
code('m_${{param.ident}}_ptr->recordCacheContents(cntrl, tr);')
code.dedent()
code('''
}
// Actions
''')
if self.TBEType != None and self.EntryType != None:
for action in self.actions.itervalues():
if "c_code" not in action:
continue
code('''
/** \\brief ${{action.desc}} */
void
$c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.EntryType.c_ident}}*& m_cache_entry_ptr, const Address& addr)
{
DPRINTF(RubyGenerated, "executing ${{action.ident}}\\n");
${{action["c_code"]}}
}
''')
elif self.TBEType != None:
for action in self.actions.itervalues():
if "c_code" not in action:
continue
code('''
/** \\brief ${{action.desc}} */
void
$c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, const Address& addr)
{
DPRINTF(RubyGenerated, "executing ${{action.ident}}\\n");
${{action["c_code"]}}
}
''')
elif self.EntryType != None:
for action in self.actions.itervalues():
if "c_code" not in action:
continue
code('''
/** \\brief ${{action.desc}} */
void
$c_ident::${{action.ident}}(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, const Address& addr)
{
DPRINTF(RubyGenerated, "executing ${{action.ident}}\\n");
${{action["c_code"]}}
}
''')
else:
for action in self.actions.itervalues():
if "c_code" not in action:
continue
code('''
/** \\brief ${{action.desc}} */
void
$c_ident::${{action.ident}}(const Address& addr)
{
DPRINTF(RubyGenerated, "executing ${{action.ident}}\\n");
${{action["c_code"]}}
}
''')
for func in self.functions:
code(func.generateCode())
code.write(path, "%s.cc" % c_ident)
def printCWakeup(self, path):
'''Output the wakeup loop for the events'''
code = self.symtab.codeFormatter()
ident = self.ident
code('''
// Auto generated C++ code started by $__file__:$__line__
// ${ident}: ${{self.short}}
#include <sys/types.h>
#include <unistd.h>
#include <cassert>
#include "base/misc.hh"
#include "debug/RubySlicc.hh"
#include "mem/protocol/${ident}_Controller.hh"
#include "mem/protocol/${ident}_Event.hh"
#include "mem/protocol/${ident}_State.hh"
#include "mem/protocol/Types.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/slicc_interface/RubySlicc_includes.hh"
#include "mem/ruby/system/System.hh"
using namespace std;
void
${ident}_Controller::wakeup()
{
int counter = 0;
while (true) {
// Some cases will put us into an infinite loop without this limit
assert(counter <= m_transitions_per_cycle);
if (counter == m_transitions_per_cycle) {
// Count how often we are fully utilized
g_system_ptr->getProfiler()->controllerBusy(m_machineID);
// Wakeup in another cycle and try again
g_eventQueue_ptr->scheduleEvent(this, 1);
break;
}
''')
code.indent()
code.indent()
# InPorts
#
for port in self.in_ports:
code.indent()
code('// ${ident}InPort $port')
if port.pairs.has_key("rank"):
code('m_cur_in_port_rank = ${{port.pairs["rank"]}};')
else:
code('m_cur_in_port_rank = 0;')
code('${{port["c_code_in_port"]}}')
code.dedent()
code('')
code.dedent()
code.dedent()
code('''
break; // If we got this far, we have nothing left todo
}
// g_eventQueue_ptr->scheduleEvent(this, 1);
}
''')
code.write(path, "%s_Wakeup.cc" % self.ident)
def printCSwitch(self, path):
'''Output switch statement for transition table'''
code = self.symtab.codeFormatter()
ident = self.ident
code('''
// Auto generated C++ code started by $__file__:$__line__
// ${ident}: ${{self.short}}
#include <cassert>
#include "base/misc.hh"
#include "base/trace.hh"
#include "debug/ProtocolTrace.hh"
#include "debug/RubyGenerated.hh"
#include "mem/protocol/${ident}_Controller.hh"
#include "mem/protocol/${ident}_Event.hh"
#include "mem/protocol/${ident}_State.hh"
#include "mem/protocol/Types.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/System.hh"
#define HASH_FUN(state, event) ((int(state)*${ident}_Event_NUM)+int(event))
#define GET_TRANSITION_COMMENT() (${ident}_transitionComment.str())
#define CLEAR_TRANSITION_COMMENT() (${ident}_transitionComment.str(""))
TransitionResult
${ident}_Controller::doTransition(${ident}_Event event,
''')
if self.EntryType != None:
code('''
${{self.EntryType.c_ident}}* m_cache_entry_ptr,
''')
if self.TBEType != None:
code('''
${{self.TBEType.c_ident}}* m_tbe_ptr,
''')
code('''
const Address &addr)
{
''')
if self.TBEType != None and self.EntryType != None:
code('${ident}_State state = getState(m_tbe_ptr, m_cache_entry_ptr, addr);')
elif self.TBEType != None:
code('${ident}_State state = getState(m_tbe_ptr, addr);')
elif self.EntryType != None:
code('${ident}_State state = getState(m_cache_entry_ptr, addr);')
else:
code('${ident}_State state = getState(addr);')
code('''
${ident}_State next_state = state;
DPRINTF(RubyGenerated, "%s, Time: %lld, state: %s, event: %s, addr: %s\\n",
*this,
g_eventQueue_ptr->getTime(),
${ident}_State_to_string(state),
${ident}_Event_to_string(event),
addr);
TransitionResult result =
''')
if self.TBEType != None and self.EntryType != None:
code('doTransitionWorker(event, state, next_state, m_tbe_ptr, m_cache_entry_ptr, addr);')
elif self.TBEType != None:
code('doTransitionWorker(event, state, next_state, m_tbe_ptr, addr);')
elif self.EntryType != None:
code('doTransitionWorker(event, state, next_state, m_cache_entry_ptr, addr);')
else:
code('doTransitionWorker(event, state, next_state, addr);')
code('''
if (result == TransitionResult_Valid) {
DPRINTF(RubyGenerated, "next_state: %s\\n",
${ident}_State_to_string(next_state));
m_profiler.countTransition(state, event);
DPRINTFR(ProtocolTrace, "%15d %3s %10s%20s %6s>%-6s %s %s\\n",
curTick(), m_version, "${ident}",
${ident}_Event_to_string(event),
${ident}_State_to_string(state),
${ident}_State_to_string(next_state),
addr, GET_TRANSITION_COMMENT());
CLEAR_TRANSITION_COMMENT();
''')
if self.TBEType != None and self.EntryType != None:
code('setState(m_tbe_ptr, m_cache_entry_ptr, addr, next_state);')
code('setAccessPermission(m_cache_entry_ptr, addr, next_state);')
elif self.TBEType != None:
code('setState(m_tbe_ptr, addr, next_state);')
code('setAccessPermission(addr, next_state);')
elif self.EntryType != None:
code('setState(m_cache_entry_ptr, addr, next_state);')
code('setAccessPermission(m_cache_entry_ptr, addr, next_state);')
else:
code('setState(addr, next_state);')
code('setAccessPermission(addr, next_state);')
code('''
} else if (result == TransitionResult_ResourceStall) {
DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\\n",
curTick(), m_version, "${ident}",
${ident}_Event_to_string(event),
${ident}_State_to_string(state),
${ident}_State_to_string(next_state),
addr, "Resource Stall");
} else if (result == TransitionResult_ProtocolStall) {
DPRINTF(RubyGenerated, "stalling\\n");
DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\\n",
curTick(), m_version, "${ident}",
${ident}_Event_to_string(event),
${ident}_State_to_string(state),
${ident}_State_to_string(next_state),
addr, "Protocol Stall");
}
return result;
}
TransitionResult
${ident}_Controller::doTransitionWorker(${ident}_Event event,
${ident}_State state,
${ident}_State& next_state,
''')
if self.TBEType != None:
code('''
${{self.TBEType.c_ident}}*& m_tbe_ptr,
''')
if self.EntryType != None:
code('''
${{self.EntryType.c_ident}}*& m_cache_entry_ptr,
''')
code('''
const Address& addr)
{
switch(HASH_FUN(state, event)) {
''')
# This map will allow suppress generating duplicate code
cases = orderdict()
for trans in self.transitions:
case_string = "%s_State_%s, %s_Event_%s" % \
(self.ident, trans.state.ident, self.ident, trans.event.ident)
case = self.symtab.codeFormatter()
# Only set next_state if it changes
if trans.state != trans.nextState:
ns_ident = trans.nextState.ident
case('next_state = ${ident}_State_${ns_ident};')
actions = trans.actions
# Check for resources
case_sorter = []
res = trans.resources
for key,val in res.iteritems():
if key.type.ident != "DNUCAStopTable":
val = '''
if (!%s.areNSlotsAvailable(%s))
return TransitionResult_ResourceStall;
''' % (key.code, val)
case_sorter.append(val)
# Emit the code sequences in a sorted order. This makes the
# output deterministic (without this the output order can vary
# since Map's keys() on a vector of pointers is not deterministic
for c in sorted(case_sorter):
case("$c")
# Figure out if we stall
stall = False
for action in actions:
if action.ident == "z_stall":
stall = True
break
if stall:
case('return TransitionResult_ProtocolStall;')
else:
if self.TBEType != None and self.EntryType != None:
for action in actions:
case('${{action.ident}}(m_tbe_ptr, m_cache_entry_ptr, addr);')
elif self.TBEType != None:
for action in actions:
case('${{action.ident}}(m_tbe_ptr, addr);')
elif self.EntryType != None:
for action in actions:
case('${{action.ident}}(m_cache_entry_ptr, addr);')
else:
for action in actions:
case('${{action.ident}}(addr);')
case('return TransitionResult_Valid;')
case = str(case)
# Look to see if this transition code is unique.
if case not in cases:
cases[case] = []
cases[case].append(case_string)
# Walk through all of the unique code blocks and spit out the
# corresponding case statement elements
for case,transitions in cases.iteritems():
# Iterative over all the multiple transitions that share
# the same code
for trans in transitions:
code(' case HASH_FUN($trans):')
code(' $case')
code('''
default:
fatal("Invalid transition\\n"
"%s time: %d addr: %s event: %s state: %s\\n",
name(), g_eventQueue_ptr->getTime(), addr, event, state);
}
return TransitionResult_Valid;
}
''')
code.write(path, "%s_Transitions.cc" % self.ident)
def printProfileDumperHH(self, path):
code = self.symtab.codeFormatter()
ident = self.ident
code('''
// Auto generated C++ code started by $__file__:$__line__
// ${ident}: ${{self.short}}
#ifndef __${ident}_PROFILE_DUMPER_HH__
#define __${ident}_PROFILE_DUMPER_HH__
#include <cassert>
#include <iostream>
#include <vector>
#include "${ident}_Event.hh"
#include "${ident}_Profiler.hh"
typedef std::vector<${ident}_Profiler *> ${ident}_profilers;
class ${ident}_ProfileDumper
{
public:
${ident}_ProfileDumper();
void registerProfiler(${ident}_Profiler* profiler);
void dumpStats(std::ostream& out) const;
private:
${ident}_profilers m_profilers;
};
#endif // __${ident}_PROFILE_DUMPER_HH__
''')
code.write(path, "%s_ProfileDumper.hh" % self.ident)
def printProfileDumperCC(self, path):
code = self.symtab.codeFormatter()
ident = self.ident
code('''
// Auto generated C++ code started by $__file__:$__line__
// ${ident}: ${{self.short}}
#include "mem/protocol/${ident}_ProfileDumper.hh"
${ident}_ProfileDumper::${ident}_ProfileDumper()
{
}
void
${ident}_ProfileDumper::registerProfiler(${ident}_Profiler* profiler)
{
m_profilers.push_back(profiler);
}
void
${ident}_ProfileDumper::dumpStats(std::ostream& out) const
{
out << " --- ${ident} ---\\n";
out << " - Event Counts -\\n";
for (${ident}_Event event = ${ident}_Event_FIRST;
event < ${ident}_Event_NUM;
++event) {
out << (${ident}_Event) event << " [";
uint64 total = 0;
for (int i = 0; i < m_profilers.size(); i++) {
out << m_profilers[i]->getEventCount(event) << " ";
total += m_profilers[i]->getEventCount(event);
}
out << "] " << total << "\\n";
}
out << "\\n";
out << " - Transitions -\\n";
for (${ident}_State state = ${ident}_State_FIRST;
state < ${ident}_State_NUM;
++state) {
for (${ident}_Event event = ${ident}_Event_FIRST;
event < ${ident}_Event_NUM;
++event) {
if (m_profilers[0]->isPossible(state, event)) {
out << (${ident}_State) state << " "
<< (${ident}_Event) event << " [";
uint64 total = 0;
for (int i = 0; i < m_profilers.size(); i++) {
out << m_profilers[i]->getTransitionCount(state, event) << " ";
total += m_profilers[i]->getTransitionCount(state, event);
}
out << "] " << total << "\\n";
}
}
out << "\\n";
}
}
''')
code.write(path, "%s_ProfileDumper.cc" % self.ident)
def printProfilerHH(self, path):
code = self.symtab.codeFormatter()
ident = self.ident
code('''
// Auto generated C++ code started by $__file__:$__line__
// ${ident}: ${{self.short}}
#ifndef __${ident}_PROFILER_HH__
#define __${ident}_PROFILER_HH__
#include <cassert>
#include <iostream>
#include "mem/protocol/${ident}_Event.hh"
#include "mem/protocol/${ident}_State.hh"
#include "mem/ruby/common/TypeDefines.hh"
class ${ident}_Profiler
{
public:
${ident}_Profiler();
void setVersion(int version);
void countTransition(${ident}_State state, ${ident}_Event event);
void possibleTransition(${ident}_State state, ${ident}_Event event);
uint64 getEventCount(${ident}_Event event);
bool isPossible(${ident}_State state, ${ident}_Event event);
uint64 getTransitionCount(${ident}_State state, ${ident}_Event event);
void clearStats();
private:
int m_counters[${ident}_State_NUM][${ident}_Event_NUM];
int m_event_counters[${ident}_Event_NUM];
bool m_possible[${ident}_State_NUM][${ident}_Event_NUM];
int m_version;
};
#endif // __${ident}_PROFILER_HH__
''')
code.write(path, "%s_Profiler.hh" % self.ident)
def printProfilerCC(self, path):
code = self.symtab.codeFormatter()
ident = self.ident
code('''
// Auto generated C++ code started by $__file__:$__line__
// ${ident}: ${{self.short}}
#include <cassert>
#include "mem/protocol/${ident}_Profiler.hh"
${ident}_Profiler::${ident}_Profiler()
{
for (int state = 0; state < ${ident}_State_NUM; state++) {
for (int event = 0; event < ${ident}_Event_NUM; event++) {
m_possible[state][event] = false;
m_counters[state][event] = 0;
}
}
for (int event = 0; event < ${ident}_Event_NUM; event++) {
m_event_counters[event] = 0;
}
}
void
${ident}_Profiler::setVersion(int version)
{
m_version = version;
}
void
${ident}_Profiler::clearStats()
{
for (int state = 0; state < ${ident}_State_NUM; state++) {
for (int event = 0; event < ${ident}_Event_NUM; event++) {
m_counters[state][event] = 0;
}
}
for (int event = 0; event < ${ident}_Event_NUM; event++) {
m_event_counters[event] = 0;
}
}
void
${ident}_Profiler::countTransition(${ident}_State state, ${ident}_Event event)
{
assert(m_possible[state][event]);
m_counters[state][event]++;
m_event_counters[event]++;
}
void
${ident}_Profiler::possibleTransition(${ident}_State state,
${ident}_Event event)
{
m_possible[state][event] = true;
}
uint64
${ident}_Profiler::getEventCount(${ident}_Event event)
{
return m_event_counters[event];
}
bool
${ident}_Profiler::isPossible(${ident}_State state, ${ident}_Event event)
{
return m_possible[state][event];
}
uint64
${ident}_Profiler::getTransitionCount(${ident}_State state,
${ident}_Event event)
{
return m_counters[state][event];
}
''')
code.write(path, "%s_Profiler.cc" % self.ident)
# **************************
# ******* HTML Files *******
# **************************
def frameRef(self, click_href, click_target, over_href, over_num, text):
code = self.symtab.codeFormatter(fix_newlines=False)
code("""<A href=\"$click_href\" target=\"$click_target\" onmouseover=\"
if (parent.frames[$over_num].location != parent.location + '$over_href') {
parent.frames[$over_num].location='$over_href'
}\">
${{html.formatShorthand(text)}}
</A>""")
return str(code)
def writeHTMLFiles(self, path):
# Create table with no row hilighted
self.printHTMLTransitions(path, None)
# Generate transition tables
for state in self.states.itervalues():
self.printHTMLTransitions(path, state)
# Generate action descriptions
for action in self.actions.itervalues():
name = "%s_action_%s.html" % (self.ident, action.ident)
code = html.createSymbol(action, "Action")
code.write(path, name)
# Generate state descriptions
for state in self.states.itervalues():
name = "%s_State_%s.html" % (self.ident, state.ident)
code = html.createSymbol(state, "State")
code.write(path, name)
# Generate event descriptions
for event in self.events.itervalues():
name = "%s_Event_%s.html" % (self.ident, event.ident)
code = html.createSymbol(event, "Event")
code.write(path, name)
def printHTMLTransitions(self, path, active_state):
code = self.symtab.codeFormatter()
code('''
<HTML>
<BODY link="blue" vlink="blue">
<H1 align="center">${{html.formatShorthand(self.short)}}:
''')
code.indent()
for i,machine in enumerate(self.symtab.getAllType(StateMachine)):
mid = machine.ident
if i != 0:
extra = " - "
else:
extra = ""
if machine == self:
code('$extra$mid')
else:
code('$extra<A target="Table" href="${mid}_table.html">$mid</A>')
code.dedent()
code("""
</H1>
<TABLE border=1>
<TR>
<TH> </TH>
""")
for event in self.events.itervalues():
href = "%s_Event_%s.html" % (self.ident, event.ident)
ref = self.frameRef(href, "Status", href, "1", event.short)
code('<TH bgcolor=white>$ref</TH>')
code('</TR>')
# -- Body of table
for state in self.states.itervalues():
# -- Each row
if state == active_state:
color = "yellow"
else:
color = "white"
click = "%s_table_%s.html" % (self.ident, state.ident)
over = "%s_State_%s.html" % (self.ident, state.ident)
text = html.formatShorthand(state.short)
ref = self.frameRef(click, "Table", over, "1", state.short)
code('''
<TR>
<TH bgcolor=$color>$ref</TH>
''')
# -- One column for each event
for event in self.events.itervalues():
trans = self.table.get((state,event), None)
if trans is None:
# This is the no transition case
if state == active_state:
color = "#C0C000"
else:
color = "lightgrey"
code('<TD bgcolor=$color> </TD>')
continue
next = trans.nextState
stall_action = False
# -- Get the actions
for action in trans.actions:
if action.ident == "z_stall" or \
action.ident == "zz_recycleMandatoryQueue":
stall_action = True
# -- Print out "actions/next-state"
if stall_action:
if state == active_state:
color = "#C0C000"
else:
color = "lightgrey"
elif active_state and next.ident == active_state.ident:
color = "aqua"
elif state == active_state:
color = "yellow"
else:
color = "white"
code('<TD bgcolor=$color>')
for action in trans.actions:
href = "%s_action_%s.html" % (self.ident, action.ident)
ref = self.frameRef(href, "Status", href, "1",
action.short)
code(' $ref')
if next != state:
if trans.actions:
code('/')
click = "%s_table_%s.html" % (self.ident, next.ident)
over = "%s_State_%s.html" % (self.ident, next.ident)
ref = self.frameRef(click, "Table", over, "1", next.short)
code("$ref")
code("</TD>")
# -- Each row
if state == active_state:
color = "yellow"
else:
color = "white"
click = "%s_table_%s.html" % (self.ident, state.ident)
over = "%s_State_%s.html" % (self.ident, state.ident)
ref = self.frameRef(click, "Table", over, "1", state.short)
code('''
<TH bgcolor=$color>$ref</TH>
</TR>
''')
code('''
<!- Column footer->
<TR>
<TH> </TH>
''')
for event in self.events.itervalues():
href = "%s_Event_%s.html" % (self.ident, event.ident)
ref = self.frameRef(href, "Status", href, "1", event.short)
code('<TH bgcolor=white>$ref</TH>')
code('''
</TR>
</TABLE>
</BODY></HTML>
''')
if active_state:
name = "%s_table_%s.html" % (self.ident, active_state.ident)
else:
name = "%s_table.html" % self.ident
code.write(path, name)
__all__ = [ "StateMachine" ]
| bsd-3-clause |
ptphp/PyLib | src/webpy1/src/common/config.py | 1 | 3691 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import sys,hashlib,os
reload(sys)
sys.setdefaultencoding('utf-8') #@UndefinedVariable
def checkPath(f1,f2,var):
hash = hashlib.md5(var).hexdigest().upper() #@UndefinedVariable
h1 = str(hash[0:2])+"\\"
h2 = str(hash[2:4])+"\\"
h3 = str(hash[4:6])+"\\"
h4 = str(hash[6:])+"\\"
path = f1+f2+h1+h2+h3+h4
print path
if os.path.isdir(path):
return True
else:
return False
def makePath(f1,f2,var):
hash = hashlib.md5(var).hexdigest().upper() #@UndefinedVariable
h1 = str(hash[0:2])+"\\"
h2 = str(hash[2:4])+"\\"
h3 = str(hash[4:6])+"\\"
h4 = str(hash[6:])+"\\"
path = f1+f2+h1+h2+h3+h4
print path
if not os.path.isdir(path):
os.makedirs(path)
def toward(str):
if not str:
return 6
dict = {
5 : '东西',
6 : '南北',
7 : '东南',
8 : '西南',
9 : '东北',
10 : '西北',
1 :'东',
2 : '南',
3 : '西',
4 : '北',
}
res = []
for v in dict:
if str.find(dict[v])!=-1:
res.append(v)
if res:
if len(res)==1:
return res[0]
else:
return res[len(res)-1]
def housetype_s(str):
if not str:
return 3
dict ={
2 : '平房',
3 : '普通住宅',
7 : '商住两用',
4 : '公寓',
5 : '别墅',
6 : '其他',
}
res =''
for v in dict:
if str.find(dict[v])!=-1:
res+='%d,' % v
return res
def house_room_s(str):
if not str:
return 2
dict ={
1 : '一居',
2 : '二居',
3 : '三居',
4 : '四居',
}
res =''
for v in dict:
if str.find(dict[v])!=-1:
res+='%d,' % v
return res
def house_room_s1(str):
if str=='1室':
return 1
if str=='2室':
return 2
if str=='3室':
return 3
if str=='4室':
return 4
return 5
def housetype(str):
if not str:
return 6
dict ={
2 : '平房',
3 : '普通住宅',
7 : '商住两用',
4 : '公寓',
5 : '别墅',
6 : '其他',
}
for v in dict:
if str.find(dict[v])!=-1:
return v
else:
return 6
def payType(str):
if str=='季':
return 3
if str=='半年':
return 6
if str=='年':
return 12
def fitment(str):
if not str:
return 2
dict ={
1 : '毛坯',
2 : '中等装修',
3 : '精装修',
4 : '豪华装修',
}
for v in dict:
if str.find(dict[v])!=-1:
return v
else:
return 2
def fitment_s(str):
if not str:
return 2
dict ={
1 : '毛坯',
2 : '中等装修',
3 : '精装修',
4 : '豪华装修',
}
res =''
for v in dict:
if str.find(dict[v])!=-1:
res+='%d,' % v
return res
def belong(str):
if not str:
return 0
dict ={
1 : '商品房',
2 : '经济适用房',
3 : '公房',
4 : '使用权',
}
for v in dict:
if str.find(dict[v])!=-1:
return v
else:
return 0
def install(str):
if not str:
return 0
dict ={
6 : '床',
8 : '热水器',
9 : ' 洗衣机',
10 : ' 空调',
11 : ' 冰箱',
12 : ' 电视机',
13 : '宽带',
}
res =''
for v in dict:
if str.find(dict[v])!=-1:
res+='%d,' % v
return res
def deposit(str):
if not str:
return 0
dict ={
2 : '面议',
1 : '押一付三',
3 : '押一付一',
6 : '半年付',
7 : '年付',
}
for v in dict:
if str.find(dict[v])!=-1:
return v
else:
return 2
| apache-2.0 |
ybellavance/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_renames.py | 203 | 2221 | """Fix incompatible renames
Fixes:
* sys.maxint -> sys.maxsize
"""
# Author: Christian Heimes
# based on Collin Winter's fix_import
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {"sys": {"maxint" : "maxsize"},
}
LOOKUP = {}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
#bare = set()
for module, replace in list(MAPPING.items()):
for old_attr, new_attr in list(replace.items()):
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield """
# import_name< 'import' (module=%r
# | dotted_as_names< any* module=%r any* >) >
# """ % (module, module)
yield """
import_from< 'from' module_name=%r 'import'
( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
""" % (module, old_attr, old_attr)
yield """
power< module_name=%r trailer< '.' attr_name=%r > any* >
""" % (module, old_attr)
#yield """bare_name=%s""" % alternates(bare)
class FixRenames(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node if it's within another match
def match(self, node):
match = super(FixRenames, self).match
results = match(node)
if results:
if any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
#def start_tree(self, tree, filename):
# super(FixRenames, self).start_tree(tree, filename)
# self.replace = {}
def transform(self, node, results):
mod_name = results.get("module_name")
attr_name = results.get("attr_name")
#bare_name = results.get("bare_name")
#import_mod = results.get("module")
if mod_name and attr_name:
new_attr = LOOKUP[(mod_name.value, attr_name.value)]
attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
| apache-2.0 |
daishaowei/dpark | dpark/tracker.py | 14 | 3097 | import socket
import zmq
import logging
import time
from dpark.env import env
from dpark.util import spawn
logger = logging.getLogger(__name__)
class TrackerMessage(object):
pass
class StopTrackerMessage(TrackerMessage):
pass
class SetValueMessage(TrackerMessage):
def __init__(self, key, value):
self.key = key
self.value = value
class AddItemMessage(TrackerMessage):
def __init__(self, key, item):
self.key = key
self.item = item
class RemoveItemMessage(TrackerMessage):
def __init__(self, key, item):
self.key = key
self.item = item
class GetValueMessage(TrackerMessage):
def __init__(self, key):
self.key = key
class TrackerServer(object):
locs = {}
def __init__(self):
self.addr = None
self.thread = None
def start(self):
self.thread = spawn(self.run)
while self.addr is None:
time.sleep(0.01)
def stop(self):
sock = env.ctx.socket(zmq.REQ)
sock.connect(self.addr)
sock.send_pyobj(StopTrackerMessage())
confirm_msg = sock.recv_pyobj()
sock.close()
self.thread.join()
return confirm_msg
def get(self, key):
return self.locs.get(key, [])
def set(self, key, value):
if not isinstance(value, list):
value = [value]
self.locs[key] = value
def add(self, key, item):
if key not in self.locs:
self.locs[key] = []
self.locs[key].append(item)
def remove(self, key, item):
if item in self.locs[key]:
self.locs[key].remove(item)
def run(self):
locs = self.locs
sock = env.ctx.socket(zmq.REP)
port = sock.bind_to_random_port("tcp://0.0.0.0")
self.addr = "tcp://%s:%d" % (socket.gethostname(), port)
logger.debug("TrackerServer started at %s", self.addr)
def reply(msg):
sock.send_pyobj(msg)
while True:
msg = sock.recv_pyobj()
if isinstance(msg, SetValueMessage):
self.set(msg.key, msg.value)
reply('OK')
elif isinstance(msg, AddItemMessage):
self.add(msg.key, msg.item)
reply('OK')
elif isinstance(msg, RemoveItemMessage):
self.remove(msg.key, msg.item)
reply('OK')
elif isinstance(msg, GetValueMessage):
reply(self.get(msg.key))
elif isinstance(msg, StopTrackerMessage):
reply('OK')
break
else:
logger.error("unexpected msg %s %s", msg, type(msg))
reply('ERROR')
sock.close()
logger.debug("stop TrackerServer %s", self.addr)
class TrackerClient(object):
def __init__(self, addr):
self.addr = addr
def call(self, msg):
try:
sock = env.ctx.socket(zmq.REQ)
sock.connect(self.addr)
sock.send_pyobj(msg)
return sock.recv_pyobj()
finally:
sock.close()
| bsd-3-clause |
yhj630520/dpark | dpark/dependency.py | 14 | 4235 | import bisect
from dpark.util import portable_hash
from dpark.serialize import load_func, dump_func
class Dependency:
def __init__(self, rdd):
self.rdd = rdd
def __getstate__(self):
raise ValueError("Should not pickle dependency: %r" % self)
class NarrowDependency(Dependency):
isShuffle = False
def getParents(self, outputPartition):
raise NotImplementedError
class OneToOneDependency(NarrowDependency):
def getParents(self, pid):
return [pid]
class OneToRangeDependency(NarrowDependency):
def __init__(self, rdd, splitSize, length):
Dependency.__init__(self, rdd)
self.splitSize = splitSize
self.length = length
def getParents(self, pid):
return range(pid * self.splitSize,
min((pid+1) * self.splitSize, self.length))
class CartesianDependency(NarrowDependency):
def __init__(self, rdd, first, numSplitsInRdd2):
NarrowDependency.__init__(self, rdd)
self.first = first
self.numSplitsInRdd2 = numSplitsInRdd2
def getParents(self, pid):
if self.first:
return [pid / self.numSplitsInRdd2]
else:
return [pid % self.numSplitsInRdd2]
class RangeDependency(NarrowDependency):
def __init__(self, rdd, inStart, outStart, length):
Dependency.__init__(self, rdd)
self.inStart = inStart
self.outStart = outStart
self.length = length
def getParents(self, pid):
if pid >= self.outStart and pid < self.outStart + self.length:
return [pid - self.outStart + self.inStart]
return []
class ShuffleDependency(Dependency):
isShuffle = True
def __init__(self, shuffleId, rdd, aggregator, partitioner):
Dependency.__init__(self, rdd)
self.shuffleId = shuffleId
self.aggregator = aggregator
self.partitioner = partitioner
class Aggregator:
def __init__(self, createCombiner, mergeValue,
mergeCombiners):
self.createCombiner = createCombiner
self.mergeValue = mergeValue
self.mergeCombiners = mergeCombiners
def __getstate__(self):
return (dump_func(self.createCombiner),
dump_func(self.mergeValue),
dump_func(self.mergeCombiners))
def __setstate__(self, state):
c1, c2, c3 = state
self.createCombiner = load_func(c1)
self.mergeValue = load_func(c2)
self.mergeCombiners = load_func(c3)
class AddAggregator:
def createCombiner(self, x):
return x
def mergeValue(self, s, x):
return s + x
def mergeCombiners(self, x, y):
return x + y
class MergeAggregator:
def createCombiner(self, x):
return [x]
def mergeValue(self, s, x):
s.append(x)
return s
def mergeCombiners(self, x, y):
x.extend(y)
return x
class UniqAggregator:
def createCombiner(self, x):
return set([x])
def mergeValue(self, s, x):
s.add(x)
return s
def mergeCombiners(self, x, y):
x |= y
return x
class Partitioner:
@property
def numPartitions(self):
raise NotImplementedError
def getPartition(self, key):
raise NotImplementedError
class HashPartitioner(Partitioner):
def __init__(self, partitions):
self.partitions = max(1, int(partitions))
@property
def numPartitions(self):
return self.partitions
def getPartition(self, key):
return portable_hash(key) % self.partitions
def __eq__(self, other):
if isinstance(other, Partitioner):
return other.numPartitions == self.numPartitions
return False
class RangePartitioner(Partitioner):
def __init__(self, keys, reverse=False):
self.keys = sorted(keys)
self.reverse = reverse
@property
def numPartitions(self):
return len(self.keys) + 1
def getPartition(self, key):
idx = bisect.bisect(self.keys, key)
return len(self.keys) - idx if self.reverse else idx
def __eq__(self, other):
if isinstance(other, RangePartitioner):
return other.keys == self.keys and self.reverse == other.reverse
return False
| bsd-3-clause |
jhcepas/ete | ete3/evol/parser/slrparser.py | 3 | 3177 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
#!/usr/bin/python
"""
06 Feb 2011
parser for slr outfile
"""
from __future__ import absolute_import
__author__ = "Francois-Jose Serra"
__email__ = "francois@barrabin.org"
__licence__ = "GPLv3"
__version__ = "0.0"
from re import match
def parse_slr (slrout):
SLR = {'pv':[],'w':[],'se':[], 'class':[],'note':[]}
w = ''
apv = ''
seP = ''
seN = ''
res = ''
note= ''
for line in open (slrout):
if line.startswith('#'):
w = line.strip().split().index('Omega')-1
apv = line.strip().split().index('Adj.Pval')-1
res = line.strip().split().index('Result')-1
note= line.strip().split().index('Note')-1
try:
seP = line.strip().split().index('upper')-1
seN = line.strip().split().index('lower')-1
except:
continue
continue
SLR['pv' ].append(1-float (line.split()[apv]))
SLR['w' ].append(line.split()[w])
corr = 0
try:
if not match('[-+]',line.split()[res]) is None:
SLR['class' ].append (5 - line.split()[res].count ('-') + line.split()[res].count ('+'))
else:
SLR['class' ].append(5)
corr = 1
except IndexError:
SLR['class' ].append(5)
try:
SLR['note' ].append(line.split()[note-corr])
except IndexError:
SLR['note' ].append('')
if not seN == '':
SLR['se'].append ([float (SLR['w'][-1]) - float (line.split()[seN]),
float (line.split()[seP]) - float (SLR['w'][-1])])
return {'sites': {'SLR': SLR},
'n_classes': {'SLR': 8}}
| gpl-3.0 |
hyperized/ansible | lib/ansible/modules/network/fortios/fortios_wireless_controller_hotspot20_anqp_venue_name.py | 13 | 10578 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_hotspot20_anqp_venue_name
short_description: Configure venue name duple in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller_hotspot20 feature and anqp_venue_name category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_hotspot20_anqp_venue_name:
description:
- Configure venue name duple.
default: null
type: dict
suboptions:
name:
description:
- Name of venue name duple.
required: true
type: str
value_list:
description:
- Name list.
type: list
suboptions:
index:
description:
- Value index.
required: true
type: int
lang:
description:
- Language code.
type: str
value:
description:
- Venue name value.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure venue name duple.
fortios_wireless_controller_hotspot20_anqp_venue_name:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wireless_controller_hotspot20_anqp_venue_name:
name: "default_name_3"
value_list:
-
index: "5"
lang: "<your_own_value>"
value: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_hotspot20_anqp_venue_name_data(json):
option_list = ['name', 'value_list']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_hotspot20_anqp_venue_name(data, fos):
vdom = data['vdom']
state = data['state']
wireless_controller_hotspot20_anqp_venue_name_data = data['wireless_controller_hotspot20_anqp_venue_name']
filtered_data = underscore_to_hyphen(filter_wireless_controller_hotspot20_anqp_venue_name_data(wireless_controller_hotspot20_anqp_venue_name_data))
if state == "present":
return fos.set('wireless-controller.hotspot20',
'anqp-venue-name',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller.hotspot20',
'anqp-venue-name',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller_hotspot20(data, fos):
if data['wireless_controller_hotspot20_anqp_venue_name']:
resp = wireless_controller_hotspot20_anqp_venue_name(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_hotspot20_anqp_venue_name": {
"required": False, "type": "dict", "default": None,
"options": {
"name": {"required": True, "type": "str"},
"value_list": {"required": False, "type": "list",
"options": {
"index": {"required": True, "type": "int"},
"lang": {"required": False, "type": "str"},
"value": {"required": False, "type": "str"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
yanheven/neutron | neutron/db/portbindings_db.py | 51 | 4645 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.v2 import attributes
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import portbindings_base
from neutron.extensions import portbindings
class PortBindingPort(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
host = sa.Column(sa.String(255), nullable=False)
port = orm.relationship(
models_v2.Port,
backref=orm.backref("portbinding",
lazy='joined', uselist=False,
cascade='delete'))
class PortBindingMixin(portbindings_base.PortBindingBaseMixin):
extra_binding_dict = None
def _port_model_hook(self, context, original_model, query):
query = query.outerjoin(PortBindingPort,
(original_model.id ==
PortBindingPort.port_id))
return query
def _port_result_filter_hook(self, query, filters):
values = filters and filters.get(portbindings.HOST_ID, [])
if not values:
return query
query = query.filter(PortBindingPort.host.in_(values))
return query
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Port,
"portbindings_port",
'_port_model_hook',
None,
'_port_result_filter_hook')
def _process_portbindings_create_and_update(self, context, port_data,
port):
binding_profile = port.get(portbindings.PROFILE)
binding_profile_set = attributes.is_attr_set(binding_profile)
if not binding_profile_set and binding_profile is not None:
del port[portbindings.PROFILE]
binding_vnic = port.get(portbindings.VNIC_TYPE)
binding_vnic_set = attributes.is_attr_set(binding_vnic)
if not binding_vnic_set and binding_vnic is not None:
del port[portbindings.VNIC_TYPE]
# REVISIT(irenab) Add support for vnic_type for plugins that
# can handle more than one type.
# Currently implemented for ML2 plugin that does not use
# PortBindingMixin.
host = port_data.get(portbindings.HOST_ID)
host_set = attributes.is_attr_set(host)
with context.session.begin(subtransactions=True):
bind_port = context.session.query(
PortBindingPort).filter_by(port_id=port['id']).first()
if host_set:
if not bind_port:
context.session.add(PortBindingPort(port_id=port['id'],
host=host))
else:
bind_port.host = host
else:
host = bind_port.host if bind_port else None
self._extend_port_dict_binding_host(port, host)
def get_port_host(self, context, port_id):
with context.session.begin(subtransactions=True):
bind_port = context.session.query(
PortBindingPort).filter_by(port_id=port_id).first()
return bind_port.host if bind_port else None
def _extend_port_dict_binding_host(self, port_res, host):
super(PortBindingMixin, self).extend_port_dict_binding(
port_res, None)
port_res[portbindings.HOST_ID] = host
def extend_port_dict_binding(self, port_res, port_db):
host = port_db.portbinding.host if port_db.portbinding else None
self._extend_port_dict_binding_host(port_res, host)
def _extend_port_dict_binding(plugin, port_res, port_db):
if not isinstance(plugin, PortBindingMixin):
return
plugin.extend_port_dict_binding(port_res, port_db)
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, [_extend_port_dict_binding])
| apache-2.0 |
herobd/fast-rcnn | lib/fast_rcnn/config.py | 20 | 6130 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
#
# Testing options
#
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# These are the values originally used for training VGG16
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
def get_output_dir(imdb, net):
"""Return the directory where experimental artifacts are placed.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return osp.join(path, net.name)
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
if type(b[k]) is not type(v):
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| mit |
lepistone/odoo | openerp/tools/win32.py | 457 | 1993 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import locale
import time
import datetime
if not hasattr(locale, 'D_FMT'):
locale.D_FMT = 1
if not hasattr(locale, 'T_FMT'):
locale.T_FMT = 2
if not hasattr(locale, 'nl_langinfo'):
def nl_langinfo(param):
if param == locale.D_FMT:
val = time.strptime('30/12/2004', '%d/%m/%Y')
dt = datetime.datetime(*val[:-2])
format_date = dt.strftime('%x')
for x, y in [('30', '%d'),('12', '%m'),('2004','%Y'),('04', '%Y')]:
format_date = format_date.replace(x, y)
return format_date
if param == locale.T_FMT:
val = time.strptime('13:24:56', '%H:%M:%S')
dt = datetime.datetime(*val[:-2])
format_time = dt.strftime('%X')
for x, y in [('13', '%H'),('24', '%M'),('56','%S')]:
format_time = format_time.replace(x, y)
return format_time
locale.nl_langinfo = nl_langinfo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Clyde-fare/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
undoware/neutron-drive | google_appengine/google/appengine/api/conversion/__init__.py | 6 | 1055 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Conversion API module."""
from conversion import *
__all__ = [
"BackendDeadlineExceeded",
"BackendError",
"ConversionTooLarge",
"ConversionUnsupported",
"Error",
"InvalidRequest",
"TooManyConversions",
"TransientError",
"CONVERSION_MAX_SIZE_BYTES",
"CONVERSION_MAX_NUM_PER_REQUEST",
"Asset",
"Conversion",
"ConversionOutput",
"convert",
"create_rpc",
"make_convert_call",
]
| bsd-3-clause |
xHeliotrope/injustice_dropper | env/lib/python3.4/site-packages/phonenumbers/data/region_ES.py | 5 | 2090 | """Auto-generated file, do not edit by hand. ES metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_ES = PhoneMetadata(id='ES', country_code=34, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[5-9]\\d{8}', possible_number_pattern='\\d{9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='8(?:[13]0|[28][0-8]|[47][1-9]|5[01346-9]|6[0457-9])\\d{6}|9(?:[1238][0-8]\\d{6}|4[1-9]\\d{6}|5\\d{7}|6(?:[0-8]\\d{6}|9(?:0(?:[0-57-9]\\d{4}|6(?:0[0-8]|1[1-9]|[2-9]\\d)\\d{2})|[1-9]\\d{5}))|7(?:[124-9]\\d{2}|3(?:[0-8]\\d|9[1-9]))\\d{4})', possible_number_pattern='\\d{9}', example_number='810123456'),
mobile=PhoneNumberDesc(national_number_pattern='(?:6\\d{6}|7[1-4]\\d{5}|9(?:6906(?:09|10)|7390\\d{2}))\\d{2}', possible_number_pattern='\\d{9}', example_number='612345678'),
toll_free=PhoneNumberDesc(national_number_pattern='[89]00\\d{6}', possible_number_pattern='\\d{9}', example_number='800123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='80[367]\\d{6}', possible_number_pattern='\\d{9}', example_number='803123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='90[12]\\d{6}', possible_number_pattern='\\d{9}', example_number='901123456'),
personal_number=PhoneNumberDesc(national_number_pattern='70\\d{7}', possible_number_pattern='\\d{9}', example_number='701234567'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='51\\d{7}', possible_number_pattern='\\d{9}', example_number='511234567'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='([5-9]\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[568]|[79][0-8]'])],
mobile_number_portable_region=True)
| mit |
hesseltuinhof/mxnet | python/mxnet/gluon/model_zoo/model_store.py | 1 | 4119 | # coding: utf-8
"""Model zoo for pre-trained models."""
from __future__ import print_function
__all__ = ['get_model_file', 'purge']
import hashlib
import os
import zipfile
from ...test_utils import download
_model_sha1 = {name: checksum for checksum, name in [
('44335d1f0046b328243b32a26a4fbd62d9057b45', 'alexnet'),
('f27dbf2dbd5ce9a80b102d89c7483342cd33cb31', 'densenet121'),
('b6c8a95717e3e761bd88d145f4d0a214aaa515dc', 'densenet161'),
('2603f878403c6aa5a71a124c4a3307143d6820e9', 'densenet169'),
('1cdbc116bc3a1b65832b18cf53e1cb8e7da017eb', 'densenet201'),
('ed47ec45a937b656fcc94dabde85495bbef5ba1f', 'inceptionv3'),
('d2b128fa89477c2e20061607a53a8d9f66ce239d', 'resnet101_v1'),
('6562166cd597a6328a32a0ce47bb651df80b3bbb', 'resnet152_v1'),
('38d6d423c22828718ec3397924b8e116a03e6ac0', 'resnet18_v1'),
('4dc2c2390a7c7990e0ca1e53aeebb1d1a08592d1', 'resnet34_v1'),
('2a903ab21260c85673a78fe65037819a843a1f43', 'resnet50_v1'),
('264ba4970a0cc87a4f15c96e25246a1307caf523', 'squeezenet1.0'),
('33ba0f93753c83d86e1eb397f38a667eaf2e9376', 'squeezenet1.1'),
('dd221b160977f36a53f464cb54648d227c707a05', 'vgg11'),
('ee79a8098a91fbe05b7a973fed2017a6117723a8', 'vgg11_bn'),
('6bc5de58a05a5e2e7f493e2d75a580d83efde38c', 'vgg13'),
('7d97a06c3c7a1aecc88b6e7385c2b373a249e95e', 'vgg13_bn'),
('649467530119c0f78c4859999e264e7bf14471a9', 'vgg16'),
('6b9dbe6194e5bfed30fd7a7c9a71f7e5a276cb14', 'vgg16_bn'),
('f713436691eee9a20d70a145ce0d53ed24bf7399', 'vgg19'),
('9730961c9cea43fd7eeefb00d792e386c45847d6', 'vgg19_bn')]}
_url_format = 'https://{bucket}.s3.amazonaws.com/gluon/models/{file_name}.zip'
bucket = 'apache-mxnet'
def short_hash(name):
if name not in _model_sha1:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha1[name][:8]
def verified(file_path, name):
sha1 = hashlib.sha1()
with open(file_path, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == _model_sha1[name]
def get_model_file(name, local_dir=os.path.expanduser('~/.mxnet/models/')):
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
Parameters
----------
name : str
Name of the model.
local_dir : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
file_name = '{name}-{short_hash}'.format(name=name,
short_hash=short_hash(name))
file_path = os.path.join(local_dir, file_name+'.params')
if os.path.exists(file_path):
if verified(file_path, name):
return file_path
else:
print('Mismatch in the content of model file detected. Downloading again.')
else:
print('Model file is not found. Downloading.')
if not os.path.exists(local_dir):
os.makedirs(local_dir)
download(_url_format.format(bucket=bucket,
file_name=file_name),
fname=file_name+'.zip',
dirname=local_dir,
overwrite=True)
zip_file_path = os.path.join(local_dir, file_name+'.zip')
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_dir)
os.remove(zip_file_path)
if verified(file_path, name):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.')
def purge(local_dir=os.path.expanduser('~/.mxnet/models/')):
r"""Purge all pretrained model files in local file store.
Parameters
----------
local_dir : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
files = os.listdir(local_dir)
for f in files:
if f.endswith(".params"):
os.remove(os.path.join(local_dir, f))
| apache-2.0 |
AnhellO/DAS_Sistemas | Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py | 488 | 1320 | import logging
from pip._vendor import requests
from pip._vendor.cachecontrol.adapter import CacheControlAdapter
from pip._vendor.cachecontrol.cache import DictCache
from pip._vendor.cachecontrol.controller import logger
from argparse import ArgumentParser
def setup_logging():
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
logger.addHandler(handler)
def get_session():
adapter = CacheControlAdapter(
DictCache(),
cache_etags=True,
serializer=None,
heuristic=None,
)
sess = requests.Session()
sess.mount('http://', adapter)
sess.mount('https://', adapter)
sess.cache_controller = adapter.controller
return sess
def get_args():
parser = ArgumentParser()
parser.add_argument('url', help='The URL to try and cache')
return parser.parse_args()
def main(args=None):
args = get_args()
sess = get_session()
# Make a request to get a response
resp = sess.get(args.url)
# Turn on logging
setup_logging()
# try setting the cache
sess.cache_controller.cache_response(resp.request, resp.raw)
# Now try to get it
if sess.cache_controller.cached_request(resp.request):
print('Cached!')
else:
print('Not cached :(')
if __name__ == '__main__':
main()
| mit |
agiliq/django | django/core/management/commands/startproject.py | 503 | 1359 | from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.utils.crypto import get_random_string
class Command(TemplateCommand):
help = ("Creates a Django project directory structure for the given "
"project name in the current directory or optionally in the "
"given directory.")
missing_args_message = "You must provide a project name."
def handle(self, **options):
project_name, target = options.pop('name'), options.pop('directory')
self.validate_name(project_name, "project")
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as a "
"project name. Please try another name." %
project_name)
# Create a random SECRET_KEY to put it in the main settings.
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
options['secret_key'] = get_random_string(50, chars)
super(Command, self).handle('project', project_name, target, **options)
| bsd-3-clause |
srimanthd/tweepy | tweepy/utils.py | 64 | 1272 | # Tweepy
# Copyright 2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import print_function
from datetime import datetime
import six
from six.moves.urllib.parse import quote
from email.utils import parsedate
def parse_datetime(string):
return datetime(*(parsedate(string)[:6]))
def parse_html_value(html):
return html[html.find('>')+1:html.rfind('<')]
def parse_a_href(atag):
start = atag.find('"') + 1
end = atag.find('"', start)
return atag[start:end]
def convert_to_utf8_str(arg):
# written by Michael Norton (http://docondev.blogspot.com/)
if isinstance(arg, six.text_type):
arg = arg.encode('utf-8')
elif not isinstance(arg, bytes):
arg = six.text_type(arg).encode('utf-8')
return arg
def import_simplejson():
try:
import simplejson as json
except ImportError:
try:
import json # Python 2.6+
except ImportError:
try:
# Google App Engine
from django.utils import simplejson as json
except ImportError:
raise ImportError("Can't load a json library")
return json
def list_to_csv(item_list):
if item_list:
return ','.join([str(i) for i in item_list])
| mit |
jpurma/Kataja | kataja/ui_support/ErrorDialog.py | 1 | 2175 | # coding=utf-8
from PyQt5 import QtWidgets
from kataja.ui_support.panel_utils import box_row
class ErrorDialog(QtWidgets.QDialog):
""" Dialog to show an error in operation """
def __init__(self, parent, retry=True):
# noinspection PyArgumentList
QtWidgets.QDialog.__init__(self, parent)
self.setModal(True)
self.message = 'Apologies, but there was a plugin that failed to register:'
self.error_text = ''
self.error_traceback_text = ''
layout = QtWidgets.QVBoxLayout()
hlayout = box_row(layout)
# noinspection PyArgumentList
self.message_widget = QtWidgets.QWidget(self)
self.traceback_widget = QtWidgets.QTextBrowser(self)
self.traceback_widget.setMinimumWidth(300)
self.setWindowTitle(self.message)
hlayout.addWidget(self.message_widget)
hlayout.addWidget(self.traceback_widget)
mlayout = QtWidgets.QVBoxLayout()
# self.message_header_label = QtWidgets.QLabel('', self.message_widget)
self.message_error_label = QtWidgets.QLabel(self.error_text, self.message_widget)
# mlayout.addWidget(self.message_header_label)
# noinspection PyArgumentList
mlayout.addWidget(self.message_error_label)
self.message_widget.setLayout(mlayout)
hlayout = box_row(layout)
self.retry_button = None
if retry:
self.retry_button = QtWidgets.QPushButton(self)
self.retry_button.setText('Try again, I fixed it')
self.retry_button.clicked.connect(self.accept)
hlayout.addWidget(self.retry_button)
self.pass_button = QtWidgets.QPushButton(self)
self.pass_button.setText('Disable plugin and continue')
self.pass_button.setDefault(True)
self.pass_button.clicked.connect(self.reject)
hlayout.addWidget(self.pass_button)
self.setLayout(layout)
def set_traceback(self, text):
self.traceback_widget.setText(text)
# def set_message(self, text):
# self.message_header_label.setText(text)
def set_error(self, text):
self.message_error_label.setText(text)
| gpl-3.0 |
rnoldo/django-avatar | storages/backends/ftp.py | 13 | 8364 | # FTP storage class for Django pluggable storage system.
# Author: Rafal Jonca <jonca.rafal@gmail.com>
# License: MIT
# Comes from http://www.djangosnippets.org/snippets/1269/
#
# Usage:
#
# Add below to settings.py:
# FTP_STORAGE_LOCATION = '[a]ftp://<user>:<pass>@<host>:<port>/[path]'
#
# In models.py you can write:
# from FTPStorage import FTPStorage
# fs = FTPStorage()
# class FTPTest(models.Model):
# file = models.FileField(upload_to='a/b/c/', storage=fs)
import os
import ftplib
import urlparse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured
class FTPStorageException(Exception): pass
class FTPStorage(Storage):
"""FTP Storage class for Django pluggable storage system."""
def __init__(self, location=settings.FTP_STORAGE_LOCATION, base_url=settings.MEDIA_URL):
self._config = self._decode_location(location)
self._base_url = base_url
self._connection = None
def _decode_location(self, location):
"""Return splitted configuration data from location."""
splitted_url = urlparse.urlparse(location)
config = {}
if splitted_url.scheme not in ('ftp', 'aftp'):
raise ImproperlyConfigured('FTPStorage works only with FTP protocol!')
if splitted_url.hostname == '':
raise ImproperlyConfigured('You must at least provide hostname!')
if splitted_url.scheme == 'aftp':
config['active'] = True
else:
config['active'] = False
config['path'] = splitted_url.path
config['host'] = splitted_url.hostname
config['user'] = splitted_url.username
config['passwd'] = splitted_url.password
config['port'] = int(splitted_url.port)
return config
def _start_connection(self):
# Check if connection is still alive and if not, drop it.
if self._connection is not None:
try:
self._connection.pwd()
except ftplib.all_errors, e:
self._connection = None
# Real reconnect
if self._connection is None:
ftp = ftplib.FTP()
try:
ftp.connect(self._config['host'], self._config['port'])
ftp.login(self._config['user'], self._config['passwd'])
if self._config['active']:
ftp.set_pasv(False)
if self._config['path'] != '':
ftp.cwd(self._config['path'])
self._connection = ftp
return
except ftplib.all_errors, e:
raise FTPStorageException('Connection or login error using data %s' % repr(self._config))
def disconnect(self):
self._connection.quit()
self._connection = None
def _mkremdirs(self, path):
pwd = self._connection.pwd()
path_splitted = path.split('/')
for path_part in path_splitted:
try:
self._connection.cwd(path_part)
except:
try:
self._connection.mkd(path_part)
self._connection.cwd(path_part)
except ftplib.all_errors, e:
raise FTPStorageException('Cannot create directory chain %s' % path)
self._connection.cwd(pwd)
return
def _put_file(self, name, content):
# Connection must be open!
try:
self._mkremdirs(os.path.dirname(name))
pwd = self._connection.pwd()
self._connection.cwd(os.path.dirname(name))
self._connection.storbinary('STOR ' + os.path.basename(name), content.file, content.DEFAULT_CHUNK_SIZE)
self._connection.cwd(pwd)
except ftplib.all_errors, e:
raise FTPStorageException('Error writing file %s' % name)
def _open(self, name, mode='rb'):
remote_file = FTPStorageFile(name, self, mode=mode)
return remote_file
def _read(self, name):
memory_file = StringIO()
try:
pwd = self._connection.pwd()
self._connection.cwd(os.path.dirname(name))
self._connection.retrbinary('RETR ' + os.path.basename(name), memory_file.write)
self._connection.cwd(pwd)
return memory_file
except ftplib.all_errors, e:
raise FTPStorageException('Error reading file %s' % name)
def _save(self, name, content):
content.open()
self._start_connection()
self._put_file(name, content)
content.close()
return name
def _get_dir_details(self, path):
# Connection must be open!
try:
lines = []
self._connection.retrlines('LIST '+path, lines.append)
dirs = {}
files = {}
for line in lines:
words = line.split()
if len(words) < 6:
continue
if words[-2] == '->':
continue
if words[0][0] == 'd':
dirs[words[-1]] = 0;
elif words[0][0] == '-':
files[words[-1]] = int(words[-5]);
return dirs, files
except ftplib.all_errors, msg:
raise FTPStorageException('Error getting listing for %s' % path)
def listdir(self, path):
self._start_connection()
try:
dirs, files = self._get_dir_details(path)
return dirs.keys(), files.keys()
except FTPStorageException, e:
raise
def delete(self, name):
if not self.exists(name):
return
self._start_connection()
try:
self._connection.delete(name)
except ftplib.all_errors, e:
raise FTPStorageException('Error when removing %s' % name)
def exists(self, name):
self._start_connection()
try:
if os.path.basename(name) in self._connection.nlst(os.path.dirname(name) + '/'):
return True
else:
return False
except ftplib.error_temp, e:
return False
except ftplib.error_perm, e:
# error_perm: 550 Can't find file
return False
except ftplib.all_errors, e:
raise FTPStorageException('Error when testing existence of %s' % name)
def size(self, name):
self._start_connection()
try:
dirs, files = self._get_dir_details(os.path.dirname(name))
if os.path.basename(name) in files:
return files[os.path.basename(name)]
else:
return 0
except FTPStorageException, e:
return 0
def url(self, name):
if self._base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urlparse.urljoin(self._base_url, name).replace('\\', '/')
class FTPStorageFile(File):
def __init__(self, name, storage, mode):
self._name = name
self._storage = storage
self._mode = mode
self._is_dirty = False
self.file = StringIO()
self._is_read = False
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self._name)
return self._size
def read(self, num_bytes=None):
if not self._is_read:
self._storage._start_connection()
self.file = self._storage._read(self._name)
self._storage._end_connection()
self._is_read = True
return self.file.read(num_bytes)
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file = StringIO(content)
self._is_dirty = True
self._is_read = True
def close(self):
if self._is_dirty:
self._storage._start_connection()
self._storage._put_file(self._name, self.file.getvalue())
self._storage._end_connection()
self.file.close() | bsd-3-clause |
eldilibra/mudsling | include/dlib-18.9/python_examples/sequence_segmenter.py | 3 | 8227 | #!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
#
# This example shows how to use dlib to learn to do sequence segmentation. In a sequence
# segmentation task we are given a sequence of objects (e.g. words in a sentence) and we
# are supposed to detect certain subsequences (e.g. the names of people). Therefore, in
# the code below we create some very simple training sequences and use them to learn a
# sequence segmentation model. In particular, our sequences will be sentences represented
# as arrays of words and our task will be to learn to identify person names. Once we have
# our segmentation model we can use it to find names in new sentences, as we will show.
#
# COMPILING THE DLIB PYTHON INTERFACE
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
# you are using another python version or operating system then you need to
# compile the dlib python interface before you can use this file. To do this,
# run compile_dlib_python_module.bat. This should work on any operating system
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
import dlib
import sys
# The sequence segmentation models we work with in this example are chain structured
# conditional random field style models. Therefore, central to a sequence segmentation
# model is some method for converting the elements of a sequence into feature vectors.
# That is, while you might start out representing your sequence as an array of strings, the
# dlib interface works in terms of arrays of feature vectors. Each feature vector should
# capture important information about its corresponding element in the original raw
# sequence. So in this example, since we work with sequences of words and want to identify
# names, we will create feature vectors that tell us if the word is capitalized or not. In
# our simple data, this will be enough to identify names. Therefore, we define
# sentence_to_vectors() which takes a sentence represented as a string and converts it into
# an array of words and then associates a feature vector with each word.
def sentence_to_vectors(sentence):
# Create an empty array of vectors
vects = dlib.vectors()
for word in sentence.split():
# Our vectors are very simple 1-dimensional vectors. The value of the single
# feature is 1 if the first letter of the word is capitalized and 0 otherwise.
if (word[0].isupper()):
vects.append(dlib.vector([1]))
else:
vects.append(dlib.vector([0]))
return vects
# Dlib also supports the use of a sparse vector representation. This is more efficient
# than the above form when you have very high dimensional vectors that are mostly full of
# zeros. In dlib, each sparse vector is represented as an array of pair objects. Each
# pair contains an index and value. Any index not listed in the vector is implicitly
# associated with a value of zero. Additionally, when using sparse vectors with
# dlib.train_sequence_segmenter() you can use "unsorted" sparse vectors. This means you
# can add the index/value pairs into your sparse vectors in any order you want and don't
# need to worry about them being in sorted order.
def sentence_to_sparse_vectors(sentence):
vects = dlib.sparse_vectors()
has_cap = dlib.sparse_vector()
no_cap = dlib.sparse_vector()
# make has_cap equivalent to dlib.vector([1])
has_cap.append(dlib.pair(0,1))
# Since we didn't add anything to no_cap it is equivalent to dlib.vector([0])
for word in sentence.split():
if (word[0].isupper()):
vects.append(has_cap)
else:
vects.append(no_cap)
return vects
def print_segment(sentence, names):
words = sentence.split()
for name in names:
for i in name:
sys.stdout.write(words[i] + " ")
sys.stdout.write("\n")
# Now let's make some training data. Each example is a sentence as well as a set of ranges
# which indicate the locations of any names.
names = dlib.ranges() # make an array of dlib.range objects.
segments = dlib.rangess() # make an array of arrays of dlib.range objects.
sentences = []
sentences.append("The other day I saw a man named Jim Smith")
# We want to detect person names. So we note that the name is located within the
# range [8, 10). Note that we use half open ranges to identify segments. So in
# this case, the segment identifies the string "Jim Smith".
names.append(dlib.range(8, 10))
segments.append(names)
names.clear() # make names empty for use again below
sentences.append("Davis King is the main author of the dlib Library")
names.append(dlib.range(0, 2))
segments.append(names)
names.clear()
sentences.append("Bob Jones is a name and so is George Clinton")
names.append(dlib.range(0, 2))
names.append(dlib.range(8, 10))
segments.append(names)
names.clear()
sentences.append("My dog is named Bob Barker")
names.append(dlib.range(4, 6))
segments.append(names)
names.clear()
sentences.append("ABC is an acronym but John James Smith is a name")
names.append(dlib.range(5, 8))
segments.append(names)
names.clear()
sentences.append("No names in this sentence at all")
segments.append(names)
names.clear()
# Now before we can pass these training sentences to the dlib tools we need to convert them
# into arrays of vectors as discussed above. We can use either a sparse or dense
# representation depending on our needs. In this example, we show how to do it both ways.
use_sparse_vects = False
if use_sparse_vects:
# Make an array of arrays of dlib.sparse_vector objects.
training_sequences = dlib.sparse_vectorss()
for s in sentences:
training_sequences.append(sentence_to_sparse_vectors(s))
else:
# Make an array of arrays of dlib.vector objects.
training_sequences = dlib.vectorss()
for s in sentences:
training_sequences.append(sentence_to_vectors(s))
# Now that we have a simple training set we can train a sequence segmenter. However, the
# sequence segmentation trainer has some optional parameters we can set. These parameters
# determine properties of the segmentation model we will learn. See the dlib documentation
# for the sequence_segmenter object for a full discussion of their meanings.
params = dlib.segmenter_params()
params.window_size = 3
params.use_high_order_features = True
params.use_BIO_model = True
# This is the common SVM C parameter. Larger values encourage the trainer to attempt to
# fit the data exactly but might overfit. In general, you determine this parameter by
# cross-validation.
params.C = 10
# Train a model. The model object is responsible for predicting the locations of names in
# new sentences.
model = dlib.train_sequence_segmenter(training_sequences, segments, params)
# Let's print out the things the model thinks are names. The output is a set of ranges
# which are predicted to contain names. If you run this example program you will see that
# it gets them all correct.
for i in range(len(sentences)):
print_segment(sentences[i], model(training_sequences[i]))
# Let's also try segmenting a new sentence. This will print out "Bob Bucket". Note that we
# need to remember to use the same vector representation as we used during training.
test_sentence = "There once was a man from Nantucket whose name rhymed with Bob Bucket"
if use_sparse_vects:
print_segment(test_sentence, model(sentence_to_sparse_vectors(test_sentence)))
else:
print_segment(test_sentence, model(sentence_to_vectors(test_sentence)))
# We can also measure the accuracy of a model relative to some labeled data. This
# statement prints the precision, recall, and F1-score of the model relative to the data in
# training_sequences/segments.
print "Test on training data:", dlib.test_sequence_segmenter(model, training_sequences, segments)
# We can also do 5-fold cross-validation and print the resulting precision, recall, and F1-score.
print "cross validation:", dlib.cross_validate_sequence_segmenter(training_sequences, segments, 5, params)
| mit |
Venturi/cms | env/lib/python2.7/site-packages/phonenumbers/data/region_TV.py | 9 | 1417 | """Auto-generated file, do not edit by hand. TV metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TV = PhoneMetadata(id='TV', country_code=688, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[29]\\d{4,5}', possible_number_pattern='\\d{5,6}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2[02-9]\\d{3}', possible_number_pattern='\\d{5}', example_number='20123'),
mobile=PhoneNumberDesc(national_number_pattern='90\\d{4}', possible_number_pattern='\\d{6}', example_number='901234'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'))
| gpl-2.0 |
timduru/platform-external-chromium_org | tools/telemetry/telemetry/page/actions/scroll.py | 23 | 3186 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.page.actions import page_action
class ScrollAction(page_action.PageAction):
def __init__(self, attributes=None):
super(ScrollAction, self).__init__(attributes)
def WillRunAction(self, page, tab):
with open(
os.path.join(os.path.dirname(__file__),
'scroll.js')) as f:
js = f.read()
tab.ExecuteJavaScript(js)
# Fail if this action requires touch and we can't send touch events.
if (hasattr(self, 'scroll_requires_touch') and
self.scroll_requires_touch and not
tab.EvaluateJavaScript(
'chrome.gpuBenchmarking.smoothScrollBySendsTouch()')):
raise page_action.PageActionNotSupported(
'Touch scroll not supported for this browser')
distance_func = 'null'
if hasattr(self, 'remaining_scroll_distance_function'):
distance_func = self.remaining_scroll_distance_function
done_callback = 'function() { window.__scrollActionDone = true; }'
tab.ExecuteJavaScript("""
window.__scrollActionDone = false;
window.__scrollAction = new __ScrollAction(%s, %s);"""
% (done_callback, distance_func))
def RunAction(self, page, tab, previous_action):
# scrollable_element_function is a function that passes the scrollable
# element on the page to a callback. For example:
# function (callback) {
# callback(document.getElementById('foo'));
# }
left_start_percentage = 0.5
top_start_percentage = 0.5
if hasattr(self, 'left_start_percentage'):
left_start_percentage = self.left_start_percentage
if hasattr(self, 'top_start_percentage'):
top_start_percentage = self.top_start_percentage
if hasattr(self, 'scrollable_element_function'):
tab.ExecuteJavaScript("""
(%s)(function(element) { window.__scrollAction.start(
{ element: element,
left_start_percentage: %s,
top_start_percentage: %s })
});""" % (self.scrollable_element_function,
left_start_percentage,
top_start_percentage))
else:
tab.ExecuteJavaScript("""
window.__scrollAction.start(
{ element: document.body,
left_start_percentage: %s,
top_start_percentage: %s });"""
% (left_start_percentage, top_start_percentage))
# Poll for scroll action completion.
util.WaitFor(lambda: tab.EvaluateJavaScript(
'window.__scrollActionDone'), 60)
def CanBeBound(self):
return True
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArg('--enable-gpu-benchmarking')
def BindMeasurementJavaScript(self, tab, start_js, stop_js):
# Make the scroll action start and stop measurement automatically.
tab.ExecuteJavaScript("""
window.__scrollAction.beginMeasuringHook = function() { %s };
window.__scrollAction.endMeasuringHook = function() { %s };
""" % (start_js, stop_js))
| bsd-3-clause |
kymbert/behave | test/test_configuration.py | 10 | 6000 | from __future__ import absolute_import, with_statement
from unittest import TestCase
import os.path
import tempfile
from nose.tools import *
from behave import configuration
from behave.configuration import Configuration, UserData
# one entry of each kind handled
TEST_CONFIG='''[behave]
outfiles= /absolute/path1
relative/path2
paths = /absolute/path3
relative/path4
tags = @foo,~@bar
@zap
format=pretty
tag-counter
stdout_capture=no
bogus=spam
[behave.userdata]
foo = bar
answer = 42
'''
class TestConfiguration(object):
def test_read_file(self):
tn = tempfile.mktemp()
tndir = os.path.dirname(tn)
with open(tn, 'w') as f:
f.write(TEST_CONFIG)
d = configuration.read_configuration(tn)
eq_(d['outfiles'], [
os.path.normpath('/absolute/path1'),
os.path.normpath(os.path.join(tndir, 'relative/path2')),
])
eq_(d['paths'], [
os.path.normpath('/absolute/path3'), # -- WINDOWS-REQUIRES: normpath
os.path.normpath(os.path.join(tndir, 'relative/path4')),
])
eq_(d['format'], ['pretty', 'tag-counter'])
eq_(d['tags'], ['@foo,~@bar', '@zap'])
eq_(d['stdout_capture'], False)
ok_('bogus' not in d)
eq_(d['userdata'], {'foo': 'bar', 'answer': '42'})
def ensure_stage_environment_is_not_set(self):
if "BEHAVE_STAGE" in os.environ:
del os.environ["BEHAVE_STAGE"]
def test_settings_without_stage(self):
# -- OR: Setup with default, unnamed stage.
self.ensure_stage_environment_is_not_set()
assert "BEHAVE_STAGE" not in os.environ
config = Configuration()
eq_("steps", config.steps_dir)
eq_("environment.py", config.environment_file)
def test_settings_with_stage(self):
config = Configuration(["--stage=STAGE1"])
eq_("STAGE1_steps", config.steps_dir)
eq_("STAGE1_environment.py", config.environment_file)
def test_settings_with_stage_and_envvar(self):
os.environ["BEHAVE_STAGE"] = "STAGE2"
config = Configuration(["--stage=STAGE1"])
eq_("STAGE1_steps", config.steps_dir)
eq_("STAGE1_environment.py", config.environment_file)
del os.environ["BEHAVE_STAGE"]
def test_settings_with_stage_from_envvar(self):
os.environ["BEHAVE_STAGE"] = "STAGE2"
config = Configuration()
eq_("STAGE2_steps", config.steps_dir)
eq_("STAGE2_environment.py", config.environment_file)
del os.environ["BEHAVE_STAGE"]
class TestConfigurationUserData(TestCase):
"""Test userdata aspects in behave.configuration.Configuration class."""
def test_cmdline_defines(self):
config = Configuration([
"-D", "foo=foo_value",
"--define=bar=bar_value",
"--define", "baz=BAZ_VALUE",
])
eq_("foo_value", config.userdata["foo"])
eq_("bar_value", config.userdata["bar"])
eq_("BAZ_VALUE", config.userdata["baz"])
def test_cmdline_defines_override_configfile(self):
userdata_init = {"foo": "XXX", "bar": "ZZZ", "baz": 42}
config = Configuration(
"-D foo=foo_value --define bar=123",
load_config=False, userdata=userdata_init)
eq_("foo_value", config.userdata["foo"])
eq_("123", config.userdata["bar"])
eq_(42, config.userdata["baz"])
def test_cmdline_defines_without_value_are_true(self):
config = Configuration("-D foo --define bar -Dbaz")
eq_("true", config.userdata["foo"])
eq_("true", config.userdata["bar"])
eq_("true", config.userdata["baz"])
eq_(True, config.userdata.getbool("foo"))
def test_cmdline_defines_with_empty_value(self):
config = Configuration("-D foo=")
eq_("", config.userdata["foo"])
def test_cmdline_defines_with_assign_character_as_value(self):
config = Configuration("-D foo=bar=baz")
eq_("bar=baz", config.userdata["foo"])
def test_cmdline_defines__with_quoted_name_value_pair(self):
cmdlines = [
'-D "person=Alice and Bob"',
"-D 'person=Alice and Bob'",
]
for cmdline in cmdlines:
config = Configuration(cmdline, load_config=False)
eq_(config.userdata, dict(person="Alice and Bob"))
def test_cmdline_defines__with_quoted_value(self):
cmdlines = [
'-D person="Alice and Bob"',
"-D person='Alice and Bob'",
]
for cmdline in cmdlines:
config = Configuration(cmdline, load_config=False)
eq_(config.userdata, dict(person="Alice and Bob"))
def test_setup_userdata(self):
config = Configuration("", load_config=False)
config.userdata = dict(person1="Alice", person2="Bob")
config.userdata_defines = [("person2", "Charly")]
config.setup_userdata()
expected_data = dict(person1="Alice", person2="Charly")
eq_(config.userdata, expected_data)
def test_update_userdata__with_cmdline_defines(self):
# -- NOTE: cmdline defines are reapplied.
config = Configuration("-D person2=Bea", load_config=False)
config.userdata = UserData(person1="AAA", person3="Charly")
config.update_userdata(dict(person1="Alice", person2="Bob"))
expected_data = dict(person1="Alice", person2="Bea", person3="Charly")
eq_(config.userdata, expected_data)
eq_(config.userdata_defines, [("person2", "Bea")])
def test_update_userdata__without_cmdline_defines(self):
config = Configuration("", load_config=False)
config.userdata = UserData(person1="AAA", person3="Charly")
config.update_userdata(dict(person1="Alice", person2="Bob"))
expected_data = dict(person1="Alice", person2="Bob", person3="Charly")
eq_(config.userdata, expected_data)
self.assertFalse(config.userdata_defines)
| bsd-2-clause |
rhdedgar/openshift-tools | openshift/installer/vendored/openshift-ansible-3.7.0/roles/openshift_health_checker/test/openshift_check_test.py | 49 | 4070 | import pytest
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
from openshift_checks import load_checks
# Fixtures
@pytest.fixture()
def task_vars():
return dict(foo=42, bar=dict(baz="openshift"))
@pytest.fixture(params=[
("notfound",),
("multiple", "keys", "not", "in", "task_vars"),
])
def missing_keys(request):
return request.param
# Tests
def test_OpenShiftCheck_init():
class TestCheck(OpenShiftCheck):
name = "test_check"
run = NotImplemented
# execute_module required at init if it will be used
with pytest.raises(RuntimeError) as excinfo:
TestCheck().execute_module("foo")
assert 'execute_module' in str(excinfo.value)
execute_module = object()
# initialize with positional argument
check = TestCheck(execute_module)
assert check._execute_module == execute_module
# initialize with keyword argument
check = TestCheck(execute_module=execute_module)
assert check._execute_module == execute_module
assert check.task_vars == {}
assert check.tmp is None
def test_subclasses():
"""OpenShiftCheck.subclasses should find all subclasses recursively."""
class TestCheck1(OpenShiftCheck):
pass
class TestCheck2(OpenShiftCheck):
pass
class TestCheck1A(TestCheck1):
pass
local_subclasses = set([TestCheck1, TestCheck1A, TestCheck2])
known_subclasses = set(OpenShiftCheck.subclasses())
assert local_subclasses - known_subclasses == set(), "local_subclasses should be a subset of known_subclasses"
def test_load_checks():
"""Loading checks should load and return Python modules."""
modules = load_checks()
assert modules
def dummy_check(task_vars):
class TestCheck(OpenShiftCheck):
name = "dummy"
run = NotImplemented
return TestCheck(task_vars=task_vars)
@pytest.mark.parametrize("keys,expected", [
(("foo",), 42),
(("bar", "baz"), "openshift"),
(("bar.baz",), "openshift"),
])
def test_get_var_ok(task_vars, keys, expected):
assert dummy_check(task_vars).get_var(*keys) == expected
def test_get_var_error(task_vars, missing_keys):
with pytest.raises(OpenShiftCheckException):
dummy_check(task_vars).get_var(*missing_keys)
def test_get_var_default(task_vars, missing_keys):
default = object()
assert dummy_check(task_vars).get_var(*missing_keys, default=default) == default
@pytest.mark.parametrize("keys, convert, expected", [
(("foo",), str, "42"),
(("foo",), float, 42.0),
(("bar", "baz"), bool, False),
])
def test_get_var_convert(task_vars, keys, convert, expected):
assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected
def convert_oscexc(_):
raise OpenShiftCheckException("known failure")
def convert_exc(_):
raise Exception("failure unknown")
@pytest.mark.parametrize("keys, convert, expect_text", [
(("bar", "baz"), int, "Cannot convert"),
(("bar.baz",), float, "Cannot convert"),
(("foo",), "bogus", "TypeError"),
(("foo",), lambda a, b: 1, "TypeError"),
(("foo",), lambda a: 1 / 0, "ZeroDivisionError"),
(("foo",), convert_oscexc, "known failure"),
(("foo",), convert_exc, "failure unknown"),
])
def test_get_var_convert_error(task_vars, keys, convert, expect_text):
with pytest.raises(OpenShiftCheckException) as excinfo:
dummy_check(task_vars).get_var(*keys, convert=convert)
assert expect_text in str(excinfo.value)
def test_register(task_vars):
check = dummy_check(task_vars)
check.register_failure(OpenShiftCheckException("spam"))
assert "spam" in str(check.failures[0])
with pytest.raises(OpenShiftCheckException) as excinfo:
check.register_file("spam") # no file contents specified
assert "not specified" in str(excinfo.value)
# normally execute_module registers the result file; test disabling that
check._execute_module = lambda *args, **_: dict()
check.execute_module("eggs", module_args={}, register=False)
assert not check.files_to_save
| apache-2.0 |
Chermnyx/dotfiles | static/py/WallSize/wallpaper-clean.py | 1 | 2483 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# requests python-pillow
import sys
import os
import pathlib
import shutil
import mimetypes
from PIL import Image as img
inf = float('inf')
pref = 'Прогресс:'
def printProgress(iteration, total, prefix=pref, end=False):
print(f'{prefix} {iteration}/{total}', end='\r')
if end:
print('\n')
def cleanpath(l1, pwd):
i = 0
l = l1.copy()
while i < len(l):
f = pwd + '/' + l[i]
try:
t = mimetypes.guess_type(pathlib.Path(f).as_uri())[0].split('/')[0]
except:
l.pop(i)
else:
if t == 'image':
i += 1
else:
l.pop(i)
return l
def process(inDir, outDir, x, y, remove=False):
inlist = cleanpath(os.listdir(inDir), inDir)
outlist = cleanpath(os.listdir(outDir), outDir)
badimg = []
prmax = len(inlist)
if remove:
if len(outlist) > 0:
for i in outlist:
try:
os.remove(outDir + '/' + i)
except:
pass
print('Мусор удалён')
else:
print('Нет мусора')
outlist = []
os.chdir(inDir)
for j in range(len(inlist)):
i = inlist[j]
if not(i in outlist):
with img.open(i) as im:
X, Y = im.width, im.height
if X < x or Y < y:
badimg.append(i + ' ' + str(X) + 'x' + str(Y))
else:
shutil.copy(i, outDir + '/' + i)
printProgress(j + 1, prmax)
printProgress(prmax, prmax, end=True)
return badimg
if __name__ == '__main__':
vals = ['yes', 'true', '!', '+']
try:
inDir, outDir, x, y = sys.argv[1], sys.argv[
2], int(sys.argv[3]), int(sys.argv[4])
except:
print(
'Использование: {0} <Исходный каталог> <Конечный каталог> <Ширина> <Высота> [Удалить старые файлы ({1})]'.format(
sys.argv[0], '|'.join(vals)
),
file=sys.stderr
)
sys.exit(1)
try:
rem = sys.argv[5]
r = rem.lower() in vals
except:
r = False
badimg = process(inDir, outDir, x, y, remove=r)
if len(badimg) > 0:
print('Следующие изображения удалены:', *badimg, sep='\n')
| gpl-3.0 |
dash-dash/pyzmq | zmq/backend/__init__.py | 27 | 1285 | """Import basic exposure of libzmq C API as a backend"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import platform
import sys
from zmq.utils.sixcerpt import reraise
from .select import public_api, select_backend
if 'PYZMQ_BACKEND' in os.environ:
backend = os.environ['PYZMQ_BACKEND']
if backend in ('cython', 'cffi'):
backend = 'zmq.backend.%s' % backend
_ns = select_backend(backend)
else:
# default to cython, fallback to cffi
# (reverse on PyPy)
if platform.python_implementation() == 'PyPy':
first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
else:
first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
try:
_ns = select_backend(first)
except Exception:
exc_info = sys.exc_info()
exc = exc_info[1]
try:
_ns = select_backend(second)
except ImportError:
# prevent 'During handling of the above exception...' on py3
# can't use `raise ... from` on Python 2
if hasattr(exc, '__cause__'):
exc.__cause__ = None
# raise the *first* error, not the fallback
reraise(*exc_info)
globals().update(_ns)
__all__ = public_api
| bsd-3-clause |
switchkiller/ProjDjanko | lib/python2.7/site-packages/django/core/signing.py | 149 | 6814 | """
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialized object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
u'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
from __future__ import unicode_literals
import base64
import datetime
import json
import time
import zlib
from django.conf import settings
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_string
class BadSignature(Exception):
"""
Signature does not match
"""
pass
class SignatureExpired(BadSignature):
"""
Signature timestamp is older than required max_age
"""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest())
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
Signer = import_string(settings.SIGNING_BACKEND)
key = force_bytes(settings.SECRET_KEY)
return Signer(b'django.http.cookies' + key, salt=salt)
class JSONSerializer(object):
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
def loads(self, data):
return json.loads(data.decode('latin-1'))
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
class Signer(object):
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.sep = force_str(sep)
self.key = key or settings.SECRET_KEY
self.salt = force_str(salt or
'%s.%s' % (self.__class__.__module__, self.__class__.__name__))
def signature(self, value):
signature = base64_hmac(self.salt + 'signer', value, self.key)
# Convert the signature from bytes to str only on Python 3
return force_str(signature)
def sign(self, value):
value = force_str(value)
return str('%s%s%s') % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
signed_value = force_str(signed_value)
if self.sep not in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = force_str(value)
value = str('%s%s%s') % (value, self.sep, self.timestamp())
return super(TimestampSigner, self).sign(value)
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super(TimestampSigner, self).unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value
| gpl-2.0 |
tardyp/buildbot | master/buildbot/test/unit/www/test_hooks_bitbucketcloud.py | 5 | 30136 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# Copyright Mamba Team
from io import BytesIO
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.test.fake.web import FakeRequest
from buildbot.test.fake.web import fakeMasterForHooks
from buildbot.test.util.misc import TestReactorMixin
from buildbot.util import unicode2bytes
from buildbot.www import change_hook
from buildbot.www.hooks.bitbucketcloud import _HEADER_EVENT
_CT_JSON = b'application/json'
bitbucketPRproperties = {
'pullrequesturl': 'http://localhost:7990/projects/CI/repos/py-repo/pull-requests/21',
'bitbucket.id': '21',
'bitbucket.link': 'http://localhost:7990/projects/CI/repos/py-repo/pull-requests/21',
'bitbucket.title': 'dot 1496311906',
'bitbucket.authorLogin': 'Buildbot',
'bitbucket.fromRef.branch.name': 'branch_1496411680',
'bitbucket.fromRef.branch.rawNode': 'a87e21f7433d8c16ac7be7413483fbb76c72a8ba',
'bitbucket.fromRef.commit.authorTimestamp': 0,
'bitbucket.fromRef.commit.date': None,
'bitbucket.fromRef.commit.hash': 'a87e21f7433d8c16ac7be7413483fbb76c72a8ba',
'bitbucket.fromRef.commit.message': None,
'bitbucket.fromRef.repository.fullName': 'CI/py-repo',
'bitbucket.fromRef.repository.links.self.href':
'http://localhost:7990/projects/CI/repos/py-repo',
'bitbucket.fromRef.repository.owner.display_name': 'CI',
'bitbucket.fromRef.repository.owner.nickname': 'CI',
'bitbucket.fromRef.repository.ownerName': 'CI',
'bitbucket.fromRef.repository.project.key': 'CI',
'bitbucket.fromRef.repository.project.name': 'Continuous Integration',
'bitbucket.fromRef.repository.public': False,
'bitbucket.fromRef.repository.scm': 'git',
'bitbucket.fromRef.repository.slug': 'py-repo',
'bitbucket.toRef.branch.name': 'master',
'bitbucket.toRef.branch.rawNode': '7aebbb0089c40fce138a6d0b36d2281ea34f37f5',
'bitbucket.toRef.commit.authorTimestamp': 0,
'bitbucket.toRef.commit.date': None,
'bitbucket.toRef.commit.hash': '7aebbb0089c40fce138a6d0b36d2281ea34f37f5',
'bitbucket.toRef.commit.message': None,
'bitbucket.toRef.repository.fullName': 'CI/py-repo',
'bitbucket.toRef.repository.links.self.href':
'http://localhost:7990/projects/CI/repos/py-repo',
'bitbucket.toRef.repository.owner.display_name': 'CI',
'bitbucket.toRef.repository.owner.nickname': 'CI',
'bitbucket.toRef.repository.ownerName': 'CI',
'bitbucket.toRef.repository.project.key': 'CI',
'bitbucket.toRef.repository.project.name': 'Continuous Integration',
'bitbucket.toRef.repository.public': False,
'bitbucket.toRef.repository.scm': 'git',
'bitbucket.toRef.repository.slug': 'py-repo'
}
pushJsonPayload = """
{
"actor": {
"nickname": "John",
"display_name": "John Smith"
},
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
},
"html": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"push": {
"changes": [
{
"created": false,
"closed": false,
"new": {
"type": "branch",
"name": "branch_1496411680",
"target": {
"type": "commit",
"hash": "793d4754230023d85532f9a38dba3290f959beb4"
}
},
"old": {
"type": "branch",
"name": "branch_1496411680",
"target": {
"type": "commit",
"hash": "a87e21f7433d8c16ac7be7413483fbb76c72a8ba"
}
}
}
]
}
}
"""
pullRequestCreatedJsonPayload = """
{
"actor": {
"nickname": "John",
"display_name": "John Smith"
},
"pullrequest": {
"id": "21",
"title": "dot 1496311906",
"link": "http://localhost:7990/projects/CI/repos/py-repo/pull-requests/21",
"authorLogin": "Buildbot",
"fromRef": {
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"commit": {
"message": null,
"date": null,
"hash": "a87e21f7433d8c16ac7be7413483fbb76c72a8ba",
"authorTimestamp": 0
},
"branch": {
"rawNode": "a87e21f7433d8c16ac7be7413483fbb76c72a8ba",
"name": "branch_1496411680"
}
},
"toRef": {
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"commit": {
"message": null,
"date": null,
"hash": "7aebbb0089c40fce138a6d0b36d2281ea34f37f5",
"authorTimestamp": 0
},
"branch": {
"rawNode": "7aebbb0089c40fce138a6d0b36d2281ea34f37f5",
"name": "master"
}
}
},
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
}
}
"""
pullRequestUpdatedJsonPayload = """
{
"actor": {
"nickname": "John",
"display_name": "John Smith"
},
"pullrequest": {
"id": "21",
"title": "dot 1496311906",
"link": "http://localhost:7990/projects/CI/repos/py-repo/pull-requests/21",
"authorLogin": "Buildbot",
"fromRef": {
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"commit": {
"message": null,
"date": null,
"hash": "a87e21f7433d8c16ac7be7413483fbb76c72a8ba",
"authorTimestamp": 0
},
"branch": {
"rawNode": "a87e21f7433d8c16ac7be7413483fbb76c72a8ba",
"name": "branch_1496411680"
}
},
"toRef": {
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"commit": {
"message": null,
"date": null,
"hash": "7aebbb0089c40fce138a6d0b36d2281ea34f37f5",
"authorTimestamp": 0
},
"branch": {
"rawNode": "7aebbb0089c40fce138a6d0b36d2281ea34f37f5",
"name": "master"
}
}
},
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
}
}
"""
pullRequestRejectedJsonPayload = """
{
"actor": {
"nickname": "John",
"display_name": "John Smith"
},
"pullrequest": {
"id": "21",
"title": "dot 1496311906",
"link": "http://localhost:7990/projects/CI/repos/py-repo/pull-requests/21",
"authorLogin": "Buildbot",
"fromRef": {
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"commit": {
"message": null,
"date": null,
"hash": "a87e21f7433d8c16ac7be7413483fbb76c72a8ba",
"authorTimestamp": 0
},
"branch": {
"rawNode": "a87e21f7433d8c16ac7be7413483fbb76c72a8ba",
"name": "branch_1496411680"
}
},
"toRef": {
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"commit": {
"message": null,
"date": null,
"hash": "7aebbb0089c40fce138a6d0b36d2281ea34f37f5",
"authorTimestamp": 0
},
"branch": {
"rawNode": "7aebbb0089c40fce138a6d0b36d2281ea34f37f5",
"name": "master"
}
}
},
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
}
}
"""
pullRequestFulfilledJsonPayload = """
{
"actor": {
"nickname": "John",
"display_name": "John Smith"
},
"pullrequest": {
"id": "21",
"title": "dot 1496311906",
"link": "http://localhost:7990/projects/CI/repos/py-repo/pull-requests/21",
"authorLogin": "Buildbot",
"fromRef": {
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"commit": {
"message": null,
"date": null,
"hash": "a87e21f7433d8c16ac7be7413483fbb76c72a8ba",
"authorTimestamp": 0
},
"branch": {
"rawNode": "a87e21f7433d8c16ac7be7413483fbb76c72a8ba",
"name": "branch_1496411680"
}
},
"toRef": {
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"commit": {
"message": null,
"date": null,
"hash": "7aebbb0089c40fce138a6d0b36d2281ea34f37f5",
"authorTimestamp": 0
},
"branch": {
"rawNode": "7aebbb0089c40fce138a6d0b36d2281ea34f37f5",
"name": "master"
}
}
},
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
}
}
"""
deleteTagJsonPayload = """
{
"actor": {
"nickname": "John",
"display_name": "John Smith"
},
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
},
"html": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"ownerName": "BUIL",
"public": false,
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"push": {
"changes": [
{
"created": false,
"closed": true,
"old": {
"type": "tag",
"name": "1.0.0",
"target": {
"type": "commit",
"hash": "793d4754230023d85532f9a38dba3290f959beb4"
}
},
"new": null
}
]
}
}
"""
deleteBranchJsonPayload = """
{
"actor": {
"nickname": "John",
"display_name": "John Smith"
},
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
},
"html": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"ownerName": "CI",
"public": false,
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"push": {
"changes": [
{
"created": false,
"closed": true,
"old": {
"type": "branch",
"name": "branch_1496758965",
"target": {
"type": "commit",
"hash": "793d4754230023d85532f9a38dba3290f959beb4"
}
},
"new": null
}
]
}
}
"""
newTagJsonPayload = """
{
"actor": {
"nickname": "John",
"display_name": "John Smith"
},
"repository": {
"scm": "git",
"project": {
"key": "CI",
"name": "Continuous Integration"
},
"slug": "py-repo",
"links": {
"self": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
},
"html": {
"href": "http://localhost:7990/projects/CI/repos/py-repo"
}
},
"public": false,
"ownerName": "CI",
"owner": {
"nickname": "CI",
"display_name": "CI"
},
"fullName": "CI/py-repo"
},
"push": {
"changes": [
{
"created": true,
"closed": false,
"old": null,
"new": {
"type": "tag",
"name": "1.0.0",
"target": {
"type": "commit",
"hash": "793d4754230023d85532f9a38dba3290f959beb4"
}
}
}
]
}
}
"""
def _prepare_request(payload, headers=None, change_dict=None):
headers = headers or {}
request = FakeRequest(change_dict)
request.uri = b"/change_hook/bitbucketcloud"
request.method = b"POST"
if isinstance(payload, str):
payload = unicode2bytes(payload)
request.content = BytesIO(payload)
request.received_headers[b'Content-Type'] = _CT_JSON
request.received_headers.update(headers)
return request
class TestChangeHookConfiguredWithGitChange(unittest.TestCase,
TestReactorMixin):
def setUp(self):
self.setUpTestReactor()
self.change_hook = change_hook.ChangeHookResource(
dialects={'bitbucketcloud': {
'bitbucket_property_whitelist': ["bitbucket.*"],
}},
master=fakeMasterForHooks(self)
)
def assertDictSubset(self, expected_dict, response_dict):
expected = {}
for key in expected_dict.keys():
self.assertIn(key, set(response_dict.keys()))
expected[key] = response_dict[key]
self.assertDictEqual(expected_dict, expected)
def _checkPush(self, change):
self.assertEqual(
change['repository'],
'http://localhost:7990/projects/CI/repos/py-repo')
self.assertEqual(change['author'], 'John Smith <John>')
self.assertEqual(change['project'], 'Continuous Integration')
self.assertEqual(change['revision'],
'793d4754230023d85532f9a38dba3290f959beb4')
self.assertEqual(
change['comments'], 'Bitbucket Cloud commit '
'793d4754230023d85532f9a38dba3290f959beb4')
self.assertEqual(
change['revlink'],
'http://localhost:7990/projects/CI/repos/py-repo/commits/'
'793d4754230023d85532f9a38dba3290f959beb4')
@defer.inlineCallbacks
def testHookWithChangeOnPushEvent(self):
request = _prepare_request(
pushJsonPayload, headers={_HEADER_EVENT: 'repo:push'})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 1)
change = self.change_hook.master.data.updates.changesAdded[0]
self._checkPush(change)
self.assertEqual(change['branch'], 'refs/heads/branch_1496411680')
self.assertEqual(change['category'], 'push')
@defer.inlineCallbacks
def testHookWithNonDictOption(self):
self.change_hook.dialects = {'bitbucketcloud': True}
yield self.testHookWithChangeOnPushEvent()
def _checkPullRequest(self, change):
self.assertEqual(
change['repository'],
'http://localhost:7990/projects/CI/repos/py-repo')
self.assertEqual(change['author'], 'John Smith <John>')
self.assertEqual(change['project'], 'Continuous Integration')
self.assertEqual(change['comments'],
'Bitbucket Cloud Pull Request #21')
self.assertEqual(change['revlink'],
'http://localhost:7990/projects/'
'CI/repos/py-repo/pull-requests/21')
self.assertEqual(change['revision'],
'a87e21f7433d8c16ac7be7413483fbb76c72a8ba')
self.assertDictSubset(bitbucketPRproperties, change["properties"])
@defer.inlineCallbacks
def testHookWithChangeOnPullRequestCreated(self):
request = _prepare_request(
pullRequestCreatedJsonPayload,
headers={_HEADER_EVENT: 'pullrequest:created'})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 1)
change = self.change_hook.master.data.updates.changesAdded[0]
self._checkPullRequest(change)
self.assertEqual(change['branch'], 'refs/pull-requests/21/merge')
self.assertEqual(change['category'], 'pull-created')
@defer.inlineCallbacks
def testHookWithChangeOnPullRequestUpdated(self):
request = _prepare_request(
pullRequestUpdatedJsonPayload,
headers={_HEADER_EVENT: 'pullrequest:updated'})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 1)
change = self.change_hook.master.data.updates.changesAdded[0]
self._checkPullRequest(change)
self.assertEqual(change['branch'], 'refs/pull-requests/21/merge')
self.assertEqual(change['category'], 'pull-updated')
@defer.inlineCallbacks
def testHookWithChangeOnPullRequestRejected(self):
request = _prepare_request(
pullRequestRejectedJsonPayload,
headers={_HEADER_EVENT: 'pullrequest:rejected'})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 1)
change = self.change_hook.master.data.updates.changesAdded[0]
self._checkPullRequest(change)
self.assertEqual(change['branch'], 'refs/heads/branch_1496411680')
self.assertEqual(change['category'], 'pull-rejected')
@defer.inlineCallbacks
def testHookWithChangeOnPullRequestFulfilled(self):
request = _prepare_request(
pullRequestFulfilledJsonPayload,
headers={_HEADER_EVENT: 'pullrequest:fulfilled'})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 1)
change = self.change_hook.master.data.updates.changesAdded[0]
self._checkPullRequest(change)
self.assertEqual(change['branch'], 'refs/heads/master')
self.assertEqual(change['category'], 'pull-fulfilled')
@defer.inlineCallbacks
def _checkCodebase(self, event_type, expected_codebase):
payloads = {
'repo:push': pushJsonPayload,
'pullrequest:updated': pullRequestUpdatedJsonPayload}
request = _prepare_request(
payloads[event_type], headers={_HEADER_EVENT: event_type})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 1)
change = self.change_hook.master.data.updates.changesAdded[0]
self.assertEqual(change['codebase'], expected_codebase)
@defer.inlineCallbacks
def testHookWithCodebaseValueOnPushEvent(self):
self.change_hook.dialects = {
'bitbucketcloud': {'codebase': 'super-codebase'}}
yield self._checkCodebase('repo:push', 'super-codebase')
@defer.inlineCallbacks
def testHookWithCodebaseFunctionOnPushEvent(self):
self.change_hook.dialects = {
'bitbucketcloud': {
'codebase':
lambda payload: payload['repository']['project']['key']}}
yield self._checkCodebase('repo:push', 'CI')
@defer.inlineCallbacks
def testHookWithCodebaseValueOnPullEvent(self):
self.change_hook.dialects = {
'bitbucketcloud': {'codebase': 'super-codebase'}}
yield self._checkCodebase('pullrequest:updated', 'super-codebase')
@defer.inlineCallbacks
def testHookWithCodebaseFunctionOnPullEvent(self):
self.change_hook.dialects = {
'bitbucketcloud': {
'codebase':
lambda payload: payload['repository']['project']['key']}}
yield self._checkCodebase('pullrequest:updated', 'CI')
@defer.inlineCallbacks
def testHookWithUnhandledEvent(self):
request = _prepare_request(
pushJsonPayload, headers={_HEADER_EVENT: 'invented:event'})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 0)
self.assertEqual(request.written, b"Unknown event: invented_event")
@defer.inlineCallbacks
def testHookWithChangeOnCreateTag(self):
request = _prepare_request(
newTagJsonPayload, headers={_HEADER_EVENT: 'repo:push'})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 1)
change = self.change_hook.master.data.updates.changesAdded[0]
self._checkPush(change)
self.assertEqual(change['branch'], 'refs/tags/1.0.0')
self.assertEqual(change['category'], 'push')
@defer.inlineCallbacks
def testHookWithChangeOnDeleteTag(self):
request = _prepare_request(
deleteTagJsonPayload, headers={_HEADER_EVENT: 'repo:push'})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 1)
change = self.change_hook.master.data.updates.changesAdded[0]
self._checkPush(change)
self.assertEqual(change['branch'], 'refs/tags/1.0.0')
self.assertEqual(change['category'], 'ref-deleted')
@defer.inlineCallbacks
def testHookWithChangeOnDeleteBranch(self):
request = _prepare_request(
deleteBranchJsonPayload, headers={_HEADER_EVENT: 'repo:push'})
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 1)
change = self.change_hook.master.data.updates.changesAdded[0]
self._checkPush(change)
self.assertEqual(change['branch'], 'refs/heads/branch_1496758965')
self.assertEqual(change['category'], 'ref-deleted')
@defer.inlineCallbacks
def testHookWithInvalidContentType(self):
request = _prepare_request(
pushJsonPayload, headers={_HEADER_EVENT: b'repo:push'})
request.received_headers[b'Content-Type'] = b'invalid/content'
yield request.test_render(self.change_hook)
self.assertEqual(len(self.change_hook.master.data.updates.changesAdded), 0)
self.assertEqual(request.written,
b"Unknown content type: invalid/content")
| gpl-2.0 |
skosukhin/spack | lib/spack/external/_pytest/cacheprovider.py | 188 | 8939 | """
merged implementation of the cache provider
the name cache was not choosen to ensure pluggy automatically
ignores the external pytest-cache
"""
import py
import pytest
import json
from os.path import sep as _sep, altsep as _altsep
class Cache(object):
def __init__(self, config):
self.config = config
self._cachedir = config.rootdir.join(".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("cacheclear"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if _sep in name or _altsep is not None and _altsep in name:
raise ValueError("name is not allowed to contain path separators")
return self._cachedir.ensure_dir("d", name)
def _getvaluepath(self, key):
return self._cachedir.join('v', *key.split('/'))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
if path.check():
try:
with path.open("r") as f:
return json.load(f)
except ValueError:
self.trace("cache-invalid at %s" % (path,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.dirpath().ensure_dir()
except (py.error.EEXIST, py.error.EACCES):
self.config.warn(
code='I9', message='could not create cache path %s' % (path,)
)
return
try:
f = path.open('w')
except py.error.ENOTDIR:
self.config.warn(
code='I9', message='cache could not write path %s' % (path,))
else:
with f:
self.trace("cache-write %s: %r" % (key, value,))
json.dump(value, f, indent=2, sort_keys=True)
class LFPlugin:
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = 'lf', 'failedfirst'
self.active = any(config.getvalue(key) for key in active_keys)
if self.active:
self.lastfailed = config.cache.get("cache/lastfailed", {})
else:
self.lastfailed = {}
def pytest_report_header(self):
if self.active:
if not self.lastfailed:
mode = "run all (no recorded failures)"
else:
mode = "rerun last %d failures%s" % (
len(self.lastfailed),
" first" if self.config.getvalue("failedfirst") else "")
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if report.failed and "xfail" not in report.keywords:
self.lastfailed[report.nodeid] = True
elif not report.failed:
if report.when == "call":
self.lastfailed.pop(report.nodeid, None)
def pytest_collectreport(self, report):
passed = report.outcome in ('passed', 'skipped')
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update(
(item.nodeid, True)
for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
if not previously_failed and previously_passed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
pass
elif self.config.getvalue("failedfirst"):
items[:] = previously_failed + previously_passed
else:
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
def pytest_sessionfinish(self, session):
config = self.config
if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
return
prev_failed = config.cache.get("cache/lastfailed", None) is not None
if (session.testscollected and prev_failed) or self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
'--lf', '--last-failed', action='store_true', dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)")
group.addoption(
'--ff', '--failed-first', action='store_true', dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--cache-show', action='store_true', dest="cacheshow",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--cache-clear', action='store_true', dest="cacheclear",
help="remove all cache contents at start of test run.")
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
relpath = py.path.local().bestrelpath(config.cache._cachedir)
return "cachedir: %s" % relpath
def cacheshow(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.check():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir.join("v")
tw.sep("-", "cache values")
for valpath in vdir.visit(lambda x: x.isfile()):
key = valpath.relto(vdir).replace(valpath.sep, "/")
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, "
"will be ignored" % key)
else:
tw.line("%s contains:" % key)
stream = py.io.TextIO()
pprint(val, stream=stream)
for line in stream.getvalue().splitlines():
tw.line(" " + line)
ddir = basedir.join("d")
if ddir.isdir() and ddir.listdir():
tw.sep("-", "cache directories")
for p in basedir.join("d").visit():
#if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.isfile():
key = p.relto(basedir)
tw.line("%s is a file of length %d" % (
key, p.size()))
return 0
| lgpl-2.1 |
pavelchristof/gomoku-ai | tensorflow/tools/common/public_api.py | 71 | 4753 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visitor restricting traversal to only the public tensorflow API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.util import tf_inspect
class PublicAPIVisitor(object):
"""Visitor to use with `traverse` to visit exactly the public TF API."""
def __init__(self, visitor):
"""Constructor.
`visitor` should be a callable suitable as a visitor for `traverse`. It will
be called only for members of the public TensorFlow API.
Args:
visitor: A visitor to call for the public API.
"""
self._visitor = visitor
self._root_name = 'tf'
# Modules/classes we want to suppress entirely.
self._private_map = {
# Some implementations have this internal module that we shouldn't
# expose.
'tf.flags': ['cpp_flags'],
}
# Modules/classes we do not want to descend into if we hit them. Usually,
# system modules exposed through platforms for compatibility reasons.
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
'core',
'examples',
'flags', # Don't add flags
# TODO(drpng): This can be removed once sealed off.
'platform',
# TODO(drpng): This can be removed once sealed.
'pywrap_tensorflow',
# TODO(drpng): This can be removed once sealed.
'user_ops',
'python',
'tools',
'tensorboard',
],
## Everything below here is legitimate.
# It'll stay, but it's not officially part of the API.
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
}
@property
def private_map(self):
"""A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
"""
return self._private_map
@property
def do_not_descend_map(self):
"""A map from parents to symbols that should not be descended into.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not explore.
"""
return self._do_not_descend_map
def set_root_name(self, root_name):
"""Override the default root name of 'tf'."""
self._root_name = root_name
def _is_private(self, path, name):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
return ((path in self._private_map and
name in self._private_map[path]) or
(name.startswith('_') and not re.match('__.*__$', name) or
name in ['__base__', '__class__']))
def _do_not_descend(self, path, name):
"""Safely queries if a specific fully qualified name should be excluded."""
return (path in self._do_not_descend_map and
name in self._do_not_descend_map[path])
def __call__(self, path, parent, children):
"""Visitor interface, see `traverse` for details."""
# Avoid long waits in cases of pretty unambiguous failure.
if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:
raise RuntimeError('Modules nested too deep:\n%s.%s\n\nThis is likely a '
'problem with an accidental public import.' %
(self._root_name, path))
# Includes self._root_name
full_path = '.'.join([self._root_name, path]) if path else self._root_name
# Remove things that are not visible.
for name, child in list(children):
if self._is_private(full_path, name):
children.remove((name, child))
self._visitor(path, parent, children)
# Remove things that are visible, but which should not be descended into.
for name, child in list(children):
if self._do_not_descend(full_path, name):
children.remove((name, child))
| apache-2.0 |
wuxianghou/phantomjs | src/qt/qtbase/src/3rdparty/freetype/src/tools/docmaker/formatter.py | 515 | 4962 | # Formatter (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org>
#
from sources import *
from content import *
from utils import *
# This is the base Formatter class. Its purpose is to convert
# a content processor's data into specific documents (i.e., table of
# contents, global index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example,
# the file tohtml.py contains the definition of the HtmlFormatter sub-class
# used to output -- you guessed it -- HTML.
#
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( index_sort )
def add_identifier( self, name, block ):
if self.identifiers.has_key( name ):
# duplicate name!
sys.stderr.write( \
"WARNING: duplicate definition for '" + name + "' in " + \
block.location() + ", previous definition in " + \
self.identifiers[name].location() + "\n" )
else:
self.identifiers[name] = block
#
# Formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# Formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# Formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
block = self.identifiers[name]
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof
| bsd-3-clause |
gibxxi/nzbToMedia | libs/mutagen/easymp4.py | 4 | 8718 | # -*- coding: utf-8 -*-
# Copyright (C) 2009 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
from mutagen import Tags
from mutagen._util import DictMixin, dict_match
from mutagen.mp4 import MP4, MP4Tags, error, delete
from ._compat import PY2, text_type, PY3
__all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"]
class EasyMP4KeyError(error, KeyError, ValueError):
pass
class EasyMP4Tags(DictMixin, Tags):
"""A file with MPEG-4 iTunes metadata.
Like Vorbis comments, EasyMP4Tags keys are case-insensitive ASCII
strings, and values are a list of Unicode strings (and these lists
are always of length 0 or 1).
If you need access to the full MP4 metadata feature set, you should use
MP4, not EasyMP4.
"""
Set = {}
Get = {}
Delete = {}
List = {}
def __init__(self, *args, **kwargs):
self.__mp4 = MP4Tags(*args, **kwargs)
self.load = self.__mp4.load
self.save = self.__mp4.save
self.delete = self.__mp4.delete
self._padding = self.__mp4._padding
filename = property(lambda s: s.__mp4.filename,
lambda s, fn: setattr(s.__mp4, 'filename', fn))
@classmethod
def RegisterKey(cls, key,
getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping.
A key mapping is four functions, a getter, setter, deleter,
and lister. The key may be either a string or a glob pattern.
The getter, deleted, and lister receive an MP4Tags instance
and the requested key name. The setter also receives the
desired value, which will be a list of strings.
The getter, setter, and deleter are used to implement __getitem__,
__setitem__, and __delitem__.
The lister is used to implement keys(). It should return a
list of keys that are actually in the MP4 instance, provided
by its associated getter.
"""
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister
@classmethod
def RegisterTextKey(cls, key, atomid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 atom name to EasyMP4Tags key, then you can use this
function::
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
"""
def getter(tags, key):
return tags[atomid]
def setter(tags, key, value):
tags[atomid] = value
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1):
"""Register a scalar integer key.
"""
def getter(tags, key):
return list(map(text_type, tags[atomid]))
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
tags[atomid] = [clamp(v) for v in map(int, value)]
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterIntPairKey(cls, key, atomid, min_value=0,
max_value=(2 ** 16) - 1):
def getter(tags, key):
ret = []
for (track, total) in tags[atomid]:
if total:
ret.append(u"%d/%d" % (track, total))
else:
ret.append(text_type(track))
return ret
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
data = []
for v in value:
try:
tracks, total = v.split("/")
tracks = clamp(int(tracks))
total = clamp(int(total))
except (ValueError, TypeError):
tracks = clamp(int(v))
total = min_value
data.append((tracks, total))
tags[atomid] = data
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 freeform atom (----) and name to EasyMP4Tags key, then
you can use this function::
EasyMP4Tags.RegisterFreeformKey(
"musicbrainz_artistid", "MusicBrainz Artist Id")
"""
atomid = "----:" + mean + ":" + name
def getter(tags, key):
return [s.decode("utf-8", "replace") for s in tags[atomid]]
def setter(tags, key, value):
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
v = v.decode("utf-8")
encoded.append(v.encode("utf-8"))
tags[atomid] = encoded
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
def __getitem__(self, key):
key = key.lower()
func = dict_match(self.Get, key)
if func is not None:
return func(self.__mp4, key)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def __setitem__(self, key, value):
key = key.lower()
if PY2:
if isinstance(value, basestring):
value = [value]
else:
if isinstance(value, text_type):
value = [value]
func = dict_match(self.Set, key)
if func is not None:
return func(self.__mp4, key, value)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def __delitem__(self, key):
key = key.lower()
func = dict_match(self.Delete, key)
if func is not None:
return func(self.__mp4, key)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def keys(self):
keys = []
for key in self.Get.keys():
if key in self.List:
keys.extend(self.List[key](self.__mp4, key))
elif key in self:
keys.append(key)
return keys
def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings)
for atomid, key in {
'\xa9nam': 'title',
'\xa9alb': 'album',
'\xa9ART': 'artist',
'aART': 'albumartist',
'\xa9day': 'date',
'\xa9cmt': 'comment',
'desc': 'description',
'\xa9grp': 'grouping',
'\xa9gen': 'genre',
'cprt': 'copyright',
'soal': 'albumsort',
'soaa': 'albumartistsort',
'soar': 'artistsort',
'sonm': 'titlesort',
'soco': 'composersort',
}.items():
EasyMP4Tags.RegisterTextKey(key, atomid)
for name, key in {
'MusicBrainz Artist Id': 'musicbrainz_artistid',
'MusicBrainz Track Id': 'musicbrainz_trackid',
'MusicBrainz Album Id': 'musicbrainz_albumid',
'MusicBrainz Album Artist Id': 'musicbrainz_albumartistid',
'MusicIP PUID': 'musicip_puid',
'MusicBrainz Album Status': 'musicbrainz_albumstatus',
'MusicBrainz Album Type': 'musicbrainz_albumtype',
'MusicBrainz Release Country': 'releasecountry',
}.items():
EasyMP4Tags.RegisterFreeformKey(key, name)
for name, key in {
"tmpo": "bpm",
}.items():
EasyMP4Tags.RegisterIntKey(key, name)
for name, key in {
"trkn": "tracknumber",
"disk": "discnumber",
}.items():
EasyMP4Tags.RegisterIntPairKey(key, name)
class EasyMP4(MP4):
"""Like :class:`MP4 <mutagen.mp4.MP4>`,
but uses :class:`EasyMP4Tags` for tags.
:ivar info: :class:`MP4Info <mutagen.mp4.MP4Info>`
:ivar tags: :class:`EasyMP4Tags`
"""
MP4Tags = EasyMP4Tags
Get = EasyMP4Tags.Get
Set = EasyMP4Tags.Set
Delete = EasyMP4Tags.Delete
List = EasyMP4Tags.List
RegisterTextKey = EasyMP4Tags.RegisterTextKey
RegisterKey = EasyMP4Tags.RegisterKey
| gpl-3.0 |
Endika/git | contrib/hg-to-git/hg-to-git.py | 342 | 8074 | #!/usr/bin/env python
""" hg-to-git.py - A Mercurial to GIT converter
Copyright (C)2007 Stelian Pop <stelian@popies.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import os, os.path, sys
import tempfile, pickle, getopt
import re
if sys.hexversion < 0x02030000:
# The behavior of the pickle module changed significantly in 2.3
sys.stderr.write("hg-to-git.py: requires Python 2.3 or later.\n")
sys.exit(1)
# Maps hg version -> git version
hgvers = {}
# List of children for each hg revision
hgchildren = {}
# List of parents for each hg revision
hgparents = {}
# Current branch for each hg revision
hgbranch = {}
# Number of new changesets converted from hg
hgnewcsets = 0
#------------------------------------------------------------------------------
def usage():
print """\
%s: [OPTIONS] <hgprj>
options:
-s, --gitstate=FILE: name of the state to be saved/read
for incrementals
-n, --nrepack=INT: number of changesets that will trigger
a repack (default=0, -1 to deactivate)
-v, --verbose: be verbose
required:
hgprj: name of the HG project to import (directory)
""" % sys.argv[0]
#------------------------------------------------------------------------------
def getgitenv(user, date):
env = ''
elems = re.compile('(.*?)\s+<(.*)>').match(user)
if elems:
env += 'export GIT_AUTHOR_NAME="%s" ;' % elems.group(1)
env += 'export GIT_COMMITTER_NAME="%s" ;' % elems.group(1)
env += 'export GIT_AUTHOR_EMAIL="%s" ;' % elems.group(2)
env += 'export GIT_COMMITTER_EMAIL="%s" ;' % elems.group(2)
else:
env += 'export GIT_AUTHOR_NAME="%s" ;' % user
env += 'export GIT_COMMITTER_NAME="%s" ;' % user
env += 'export GIT_AUTHOR_EMAIL= ;'
env += 'export GIT_COMMITTER_EMAIL= ;'
env += 'export GIT_AUTHOR_DATE="%s" ;' % date
env += 'export GIT_COMMITTER_DATE="%s" ;' % date
return env
#------------------------------------------------------------------------------
state = ''
opt_nrepack = 0
verbose = False
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:n:v', ['gitstate=', 'tempdir=', 'nrepack=', 'verbose'])
for o, a in opts:
if o in ('-s', '--gitstate'):
state = a
state = os.path.abspath(state)
if o in ('-n', '--nrepack'):
opt_nrepack = int(a)
if o in ('-v', '--verbose'):
verbose = True
if len(args) != 1:
raise Exception('params')
except:
usage()
sys.exit(1)
hgprj = args[0]
os.chdir(hgprj)
if state:
if os.path.exists(state):
if verbose:
print 'State does exist, reading'
f = open(state, 'r')
hgvers = pickle.load(f)
else:
print 'State does not exist, first run'
sock = os.popen('hg tip --template "{rev}"')
tip = sock.read()
if sock.close():
sys.exit(1)
if verbose:
print 'tip is', tip
# Calculate the branches
if verbose:
print 'analysing the branches...'
hgchildren["0"] = ()
hgparents["0"] = (None, None)
hgbranch["0"] = "master"
for cset in range(1, int(tip) + 1):
hgchildren[str(cset)] = ()
prnts = os.popen('hg log -r %d --template "{parents}"' % cset).read().strip().split(' ')
prnts = map(lambda x: x[:x.find(':')], prnts)
if prnts[0] != '':
parent = prnts[0].strip()
else:
parent = str(cset - 1)
hgchildren[parent] += ( str(cset), )
if len(prnts) > 1:
mparent = prnts[1].strip()
hgchildren[mparent] += ( str(cset), )
else:
mparent = None
hgparents[str(cset)] = (parent, mparent)
if mparent:
# For merge changesets, take either one, preferably the 'master' branch
if hgbranch[mparent] == 'master':
hgbranch[str(cset)] = 'master'
else:
hgbranch[str(cset)] = hgbranch[parent]
else:
# Normal changesets
# For first children, take the parent branch, for the others create a new branch
if hgchildren[parent][0] == str(cset):
hgbranch[str(cset)] = hgbranch[parent]
else:
hgbranch[str(cset)] = "branch-" + str(cset)
if not hgvers.has_key("0"):
print 'creating repository'
os.system('git init')
# loop through every hg changeset
for cset in range(int(tip) + 1):
# incremental, already seen
if hgvers.has_key(str(cset)):
continue
hgnewcsets += 1
# get info
log_data = os.popen('hg log -r %d --template "{tags}\n{date|date}\n{author}\n"' % cset).readlines()
tag = log_data[0].strip()
date = log_data[1].strip()
user = log_data[2].strip()
parent = hgparents[str(cset)][0]
mparent = hgparents[str(cset)][1]
#get comment
(fdcomment, filecomment) = tempfile.mkstemp()
csetcomment = os.popen('hg log -r %d --template "{desc}"' % cset).read().strip()
os.write(fdcomment, csetcomment)
os.close(fdcomment)
print '-----------------------------------------'
print 'cset:', cset
print 'branch:', hgbranch[str(cset)]
print 'user:', user
print 'date:', date
print 'comment:', csetcomment
if parent:
print 'parent:', parent
if mparent:
print 'mparent:', mparent
if tag:
print 'tag:', tag
print '-----------------------------------------'
# checkout the parent if necessary
if cset != 0:
if hgbranch[str(cset)] == "branch-" + str(cset):
print 'creating new branch', hgbranch[str(cset)]
os.system('git checkout -b %s %s' % (hgbranch[str(cset)], hgvers[parent]))
else:
print 'checking out branch', hgbranch[str(cset)]
os.system('git checkout %s' % hgbranch[str(cset)])
# merge
if mparent:
if hgbranch[parent] == hgbranch[str(cset)]:
otherbranch = hgbranch[mparent]
else:
otherbranch = hgbranch[parent]
print 'merging', otherbranch, 'into', hgbranch[str(cset)]
os.system(getgitenv(user, date) + 'git merge --no-commit -s ours "" %s %s' % (hgbranch[str(cset)], otherbranch))
# remove everything except .git and .hg directories
os.system('find . \( -path "./.hg" -o -path "./.git" \) -prune -o ! -name "." -print | xargs rm -rf')
# repopulate with checkouted files
os.system('hg update -C %d' % cset)
# add new files
os.system('git ls-files -x .hg --others | git update-index --add --stdin')
# delete removed files
os.system('git ls-files -x .hg --deleted | git update-index --remove --stdin')
# commit
os.system(getgitenv(user, date) + 'git commit --allow-empty --allow-empty-message -a -F %s' % filecomment)
os.unlink(filecomment)
# tag
if tag and tag != 'tip':
os.system(getgitenv(user, date) + 'git tag %s' % tag)
# delete branch if not used anymore...
if mparent and len(hgchildren[str(cset)]):
print "Deleting unused branch:", otherbranch
os.system('git branch -d %s' % otherbranch)
# retrieve and record the version
vvv = os.popen('git show --quiet --pretty=format:%H').read()
print 'record', cset, '->', vvv
hgvers[str(cset)] = vvv
if hgnewcsets >= opt_nrepack and opt_nrepack != -1:
os.system('git repack -a -d')
# write the state for incrementals
if state:
if verbose:
print 'Writing state'
f = open(state, 'w')
pickle.dump(hgvers, f)
# vim: et ts=8 sw=4 sts=4
| gpl-2.0 |
ehashman/oh-mainline | vendor/packages/twisted/doc/core/howto/listings/pb/pbAnonServer.py | 18 | 2759 | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implement the realm for and run on port 8800 a PB service which allows both
anonymous and username/password based access.
Successful username/password-based login requests given an instance of
MyPerspective with a name which matches the username with which they
authenticated. Success anonymous login requests are given an instance of
MyPerspective with the name "Anonymous".
"""
from sys import stdout
from zope.interface import implements
from twisted.python.log import startLogging
from twisted.cred.checkers import ANONYMOUS, AllowAnonymousAccess
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.portal import IRealm, Portal
from twisted.internet import reactor
from twisted.spread.pb import Avatar, IPerspective, PBServerFactory
class MyPerspective(Avatar):
"""
Trivial avatar exposing a single remote method for demonstrative
purposes. All successful login attempts in this example will result in
an avatar which is an instance of this class.
@type name: C{str}
@ivar name: The username which was used during login or C{"Anonymous"}
if the login was anonymous (a real service might want to avoid the
collision this introduces between anonoymous users and authenticated
users named "Anonymous").
"""
def __init__(self, name):
self.name = name
def perspective_foo(self, arg):
"""
Print a simple message which gives the argument this method was
called with and this avatar's name.
"""
print "I am %s. perspective_foo(%s) called on %s." % (
self.name, arg, self)
class MyRealm(object):
"""
Trivial realm which supports anonymous and named users by creating
avatars which are instances of MyPerspective for either.
"""
implements(IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
if IPerspective not in interfaces:
raise NotImplementedError("MyRealm only handles IPerspective")
if avatarId is ANONYMOUS:
avatarId = "Anonymous"
return IPerspective, MyPerspective(avatarId), lambda: None
def main():
"""
Create a PB server using MyRealm and run it on port 8800.
"""
startLogging(stdout)
p = Portal(MyRealm())
# Here the username/password checker is registered.
c1 = InMemoryUsernamePasswordDatabaseDontUse(user1="pass1", user2="pass2")
p.registerChecker(c1)
# Here the anonymous checker is registered.
c2 = AllowAnonymousAccess()
p.registerChecker(c2)
reactor.listenTCP(8800, PBServerFactory(p))
reactor.run()
if __name__ == '__main__':
main()
| agpl-3.0 |
cisco-openstack/neutron | neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.py | 40 | 1206 | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Initial operations in support of address scopes
"""
# revision identifiers, used by Alembic.
revision = '52c5312f6baf'
down_revision = '599c6a226151'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'address_scopes',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('tenant_id', sa.String(length=255), nullable=True,
index=True),
sa.Column('shared', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id'))
| apache-2.0 |
cmorgan/pybrain | pybrain/rl/environments/mazes/tasks/shuttle.py | 25 | 2692 | __author__ = 'Tom Schaul, tom@idsia.ch'
from scipy import array, zeros
from random import random
from .maze import MazeTask
from pybrain.rl.environments.mazes import PolarMaze
class ShuttleDocking(MazeTask):
"""
#######
#. *#
#######
The spaceship needs to dock backwards into the goal station.
"""
actions = 3
observations = 5
discount = 0.95
mazeclass = PolarMaze
finalReward = 10
bangPenalty = -3
initPos = [(1, 1)]
topology = array([[1] * 7,
[1, 0, 0, 0, 0, 0, 1],
[1] * 7, ])
goal = (1, 5)
Backup = 0
Forward = 1
TurnAround = 2
def reset(self):
MazeTask.reset(self)
self.env.perseusDir = 1
def getObservation(self):
""" inold, seeold, black, seenew, innew """
res = zeros(5)
if self.env.perseus == self.env.goal:
res[4] = 1
elif self.env.perseus == self.env.initPos[0]:
res[0] = 1
elif self.env.perseus[1] == 3:
if random() > 0.7:
res[self.env.perseusDir] = 1
else:
res[(self.env.perseusDir + 2) % 4] = 1
else:
res[(self.env.perseusDir + 2) % 4] = 1
return res
def performAction(self, action):
self.steps += 1
if action == self.TurnAround:
self._turn()
elif action == self.Forward:
self._forward()
else: # noisy backup
r = random()
if self.env.perseus[1] == 3:
# in space
if r < 0.1:
self._turn()
elif r < 0.9:
self._backup()
elif ((self.env.perseus[1] == 2 and self.env.perseusDir == 3) or
(self.env.perseus[1] == 4 and self.env.perseusDir == 1)):
# close to station, front to station
if r < 0.3:
self._turn()
elif r < 0.6:
self._backup()
else:
# close to station, back to station
if r < 0.7:
self._backup()
def _backup(self):
self.env.performAction(PolarMaze.TurnAround)
self.env.performAction(PolarMaze.Forward)
self.env.performAction(PolarMaze.TurnAround)
def _turn(self):
self.env.performAction(PolarMaze.TurnAround)
def _forward(self):
old = self.env.perseus
self.env.performAction(PolarMaze.TurnAround)
if self.env.perseus == self.env.goal or self.env.perseus == self.env.initPos[0]:
self.env.perseus = old
self.env.bang = True
| bsd-3-clause |
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/tools/roslaunch/src/roslaunch/server.py | 1 | 19895 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
from __future__ import print_function
"""
XML-RPC servers for parent and children
Following typical XmlRpcNode code, code is divided into:
a) Handlers: these actually define and execute the XML-RPC API
b) Nodes: these run the XML-RPC server
In this code you'll find 'Parent' and 'Child' code. The parent node
is the original roslaunch process. The child nodes are the child
processes it launches in order to handle remote launching (or
execution as a different user).
"""
import logging
import os
import socket
import sys
import time
import traceback
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from xmlrpc.client import ServerProxy
except ImportError:
from xmlrpclib import ServerProxy
import rosgraph.network as network
import rosgraph.xmlrpc as xmlrpc
import roslaunch.config
from roslaunch.pmon import ProcessListener, Process
import roslaunch.xmlloader
from roslaunch.launch import ROSLaunchRunner
from roslaunch.core import RLException, \
add_printlog_handler, add_printerrlog_handler, printlog, printerrlog, printlog_bold
#For using Log message level constants
from rosgraph_msgs.msg import Log
# interface class so that we don't have circular deps
class ChildROSLaunchProcess(Process):
"""
API for remote roslaunch processes
"""
def __init__(self, name, args, env):
super(ChildROSLaunchProcess, self).__init__('roslaunch', name, args, env, False)
self.uri = None
def set_uri(self, uri):
self.uri = uri
class ROSLaunchBaseHandler(xmlrpc.XmlRpcHandler):
"""
Common XML-RPC API for the roslaunch server and child node
"""
def __init__(self, pm):
self.pm = pm
self.logger = logging.getLogger('roslaunch.server')
if self.pm is None:
raise RLException("cannot create xmlrpc handler: pm is not initialized")
#TODO: kill process, restart (with optional prefix). list active, list dead. CPU usage
def list_processes(self):
"""
@return: code, msg, process list.
Process list is two lists, where first list of
active process names along with the number of times that
process has been spawned. Second list contains dead process
names and their spawn count.
@rtype: int, str, [[(str, int),], [(str,int),]]
"""
return 1, "processes on parent machine", self.pm.get_process_names_with_spawn_count()
def process_info(self, process_name):
"""
@return: dictionary of metadata about process. Keys vary by implementation
@rtype: int, str, dict
"""
p = self.pm.get_process(process_name)
if p is None:
return -1, "no process by that name", {}
else:
return 1, "process info", p.get_info()
def get_pid(self):
"""
@return: code, msg, pid
@rtype: int, str, int
"""
pid = os.getpid()
return 1, str(pid), pid
def get_node_names(self):
"""
@return: code, msg, list of node names
@rtype: int, str, [str]
"""
if self.pm is None:
return 0, "uninitialized", []
return 1, "node names", self.pm.get_active_names()
def _shutdown(self, reason):
"""
xmlrpc.XmlRpcHandler API: inform handler of shutdown
@param reason: human-readable shutdown reason
@type reason: str
"""
return 1, '', 1
# Uses camel-case network-API naming conventions
class ROSLaunchParentHandler(ROSLaunchBaseHandler):
"""
XML-RPC API for the roslaunch server node
"""
def __init__(self, pm, child_processes, listeners):
"""
@param child_processes: Map of remote processes so that server can update processes
with information as children register. Handler will not modify
keys.
@type child_processes: {name : ChildROSLaunchProcess}.
@param listeners [ProcessListener]: list of
listeners to notify when process_died events occur.
"""
super(ROSLaunchParentHandler, self).__init__(pm)
self.child_processes = child_processes
self.listeners = listeners
def register(self, client, uri):
"""
Registration callback from newly launched roslaunch clients
@param client: name of client
@type client: str
@param uri: XML-RPC URI of client
@type uri: str
@return: code, msg, ignore
@rtype: int, str, int
"""
if client not in self.child_processes:
self.logger.error("Unknown child [%s] registered with server", client)
return -1, "unknown child [%s]"%client, 0
else:
self.logger.info("child [%s] registered with server, uri[%s]", client, uri)
self.child_processes[client].set_uri(uri)
return 1, "registered", 1
def list_children(self):
"""
List the roslaunch child processes.
@return int, str, [str]: code, msg, list of the roslaunch children URIS
"""
return 1, 'roslaunch children', [v.uri for v in self.child_processes.values() if v.uri is not None]
def process_died(self, process_name, exit_code):
"""
Inform roslaunch server that a remote process has died
@param process_name: name of process that died
@type process_name: str
@param exit_code: exit code of remote process
@type exit_code: int
@return: code, msg, ignore
@rtype: int, str, int
"""
for l in self.listeners:
try:
l.process_died(process_name, exit_code)
except:
self.logger.error(traceback.format_exc())
return 1, '', 0
def log(self, client, level, message):
"""
Report a log message to the server
@param client: name of client
@type client: str
@param level: log level (uses rosgraph_msgs.msg.Log levels)
@type level: int
@param message: message to log
@type message: str
"""
try:
if level >= Log.ERROR:
printerrlog("[%s]: %s"%(client, message))
else:
#hack due to the fact that we only have one INFO level
if 'started with pid' in message:
printlog_bold("[%s]: %s"%(client, message))
else:
printlog("[%s]: %s"%(client, message))
except:
# can't trust the logging system at this point, so just dump to screen
traceback.print_exc()
return 1, '', 1
class ROSLaunchChildHandler(ROSLaunchBaseHandler):
"""
XML-RPC API implementation for child roslaunches
NOTE: the client handler runs a process monitor so that
it can track processes across requests
"""
def __init__(self, run_id, name, server_uri, pm):
"""
@param server_uri: XML-RPC URI of server
@type server_uri: str
@param pm: process monitor to use
@type pm: L{ProcessMonitor}
@raise RLException: If parameters are invalid
"""
super(ROSLaunchChildHandler, self).__init__(pm)
if server_uri is None:
raise RLException("server_uri is not initialized")
self.run_id = run_id
# parse the URI to make sure it's valid
_, urlport = network.parse_http_host_and_port(server_uri)
if urlport <= 0:
raise RLException("ERROR: roslaunch server URI is not a valid XML-RPC URI. Value is [%s]"%m.uri)
self.name = name
self.pm = pm
self.server_uri = server_uri
self.server = ServerProxy(server_uri)
def _shutdown(self, reason):
"""
xmlrpc.XmlRpcHandler API: inform handler of shutdown
@param reason: human-readable shutdown reason
@type reason: str
"""
if self.pm is not None:
self.pm.shutdown()
self.pm.join()
self.pm = None
def shutdown(self):
"""
@return: code, msg, ignore
@rtype: int, str, int
"""
self._shutdown("external call")
return 1, "success", 1
def _log(self, level, message):
"""
log message to log file and roslaunch server
@param level: log level
@type level: int
@param message: message to log
@type message: str
"""
try:
if self.logger is not None:
self.logger.debug(message)
if self.server is not None:
self.server.log(str(self.name), level, str(message))
except:
self.logger.error(traceback.format_exc())
def launch(self, launch_xml):
"""
Launch the roslaunch XML file. Because this is a child
roslaunch, it will not set parameters nor manipulate the
master. Call blocks until launch is complete
@param xml: roslaunch XML file to launch
@type xml: str
@return: code, msg, [ [ successful launches], [failed launches] ]
@rtype: int, str, [ [str], [str] ]
"""
if self.pm is None:
return 0, "uninitialized", -1
rosconfig = roslaunch.config.ROSLaunchConfig()
try:
roslaunch.xmlloader.XmlLoader().load_string(launch_xml, rosconfig)
except roslaunch.xmlloader.XmlParseException as e:
return -1, "ERROR: %s"%e, [[], []]
# won't actually do anything other than local, but still required
rosconfig.assign_machines()
try:
# roslaunch clients try to behave like normal roslaunches as much as possible. It's
# mainly the responsibility of the roslaunch server to not give us any XML that might
# cause conflict (e.g. master tags, param tags, etc...).
self._log(Log.INFO, "launching nodes...")
runner = ROSLaunchRunner(self.run_id, rosconfig, server_uri=self.server_uri, pmon=self.pm)
succeeded, failed = runner.launch()
self._log(Log.INFO, "... done launching nodes")
# enable the process monitor to exit of all processes die
self.pm.registrations_complete()
return 1, "launched", [ succeeded, failed ]
except Exception as e:
return 0, "ERROR: %s"%traceback.format_exc(), [[], []]
_STARTUP_TIMEOUT = 5.0 #seconds
class ROSLaunchNode(xmlrpc.XmlRpcNode):
"""
Base XML-RPC server for roslaunch parent/child processes
"""
def __init__(self, handler):
"""
@param handler: xmlrpc api handler
@type handler: L{ROSLaunchBaseHandler}
"""
super(ROSLaunchNode, self).__init__(0, handler)
def start(self):
"""
Startup roslaunch server XML-RPC services
@raise RLException: if server fails to start
"""
logger = logging.getLogger('roslaunch.server')
logger.info("starting roslaunch XML-RPC server")
super(ROSLaunchNode, self).start()
# wait for node thread to initialize
timeout_t = time.time() + _STARTUP_TIMEOUT
logger.info("waiting for roslaunch XML-RPC server to initialize")
while not self.uri and time.time() < timeout_t:
time.sleep(0.01)
if not self.uri:
raise RLException("XML-RPC initialization failed")
# Make sure our xmlrpc server is actually up. We've seen very
# odd cases where remote nodes are unable to contact the
# server but have been unable to prove this is the cause.
server_up = False
while not server_up and time.time() < timeout_t:
try:
code, msg, val = ServerProxy(self.uri).get_pid()
if val != os.getpid():
raise RLException("Server at [%s] did not respond with correct PID. There appears to be something wrong with the networking configuration"%self.uri)
server_up = True
except IOError:
# presumably this can occur if we call in a small time
# interval between the server socket port being
# assigned and the XMLRPC server initializing, but it
# is highly unlikely and unconfirmed
time.sleep(0.1)
except socket.error as e:
if e.errno == 113:
p = urlparse(self.uri)
raise RLException("Unable to contact the address [%s], which should be local.\nThis is generally caused by:\n * bad local network configuration\n * bad ROS_IP environment variable\n * bad ROS_HOSTNAME environment variable\nCan you ping %s?"%(self.uri, p.hostname))
else:
time.sleep(0.1)
if not server_up:
p = urlparse(self.uri)
raise RLException("""Unable to contact my own server at [%s].
This usually means that the network is not configured properly.
A common cause is that the machine cannot ping itself. Please check
for errors by running:
\tping %s
For more tips, please see
\thttp://www.ros.org/wiki/ROS/NetworkSetup
"""%(self.uri, p.hostname))
printlog_bold("started roslaunch server %s"%(self.uri))
def run(self):
"""
run() should not be called by higher-level code. ROSLaunchNode
overrides underlying xmlrpc.XmlRpcNode implementation in order
to log errors.
"""
try:
super(ROSLaunchNode, self).run()
except:
logging.getLogger("roslaunch.remote").error(traceback.format_exc())
print("ERROR: failed to launch XML-RPC server for roslaunch", file=sys.stderr)
class ROSLaunchParentNode(ROSLaunchNode):
"""
XML-RPC server for parent roslaunch.
"""
def __init__(self, rosconfig, pm):
"""
@param config: ROSConfig launch configuration
@type config: L{ROSConfig}
@param pm: process monitor
@type pm: L{ProcessMonitor}
"""
self.rosconfig = rosconfig
self.listeners = []
self.child_processes = {} #{ child-name : ChildROSLaunchProcess}.
if pm is None:
raise RLException("cannot create parent node: pm is not initialized")
handler = ROSLaunchParentHandler(pm, self.child_processes, self.listeners)
super(ROSLaunchParentNode, self).__init__(handler)
def add_child(self, name, p):
"""
@param name: child roslaunch's name. NOTE: \a name is not
the same as the machine config key.
@type name: str
@param p: process handle of child
@type p: L{Process}
"""
self.child_processes[name] = p
def add_process_listener(self, l):
"""
Listen to events about remote processes dying. Not
threadsafe. Must be called before processes started.
@param l: Process listener
@type l: L{ProcessListener}
"""
self.listeners.append(l)
class _ProcessListenerForwarder(ProcessListener):
"""
Simple listener that forwards ProcessListener events to a roslaunch server
"""
def __init__(self, server):
self.server = server
def process_died(self, process_name, exit_code):
try:
self.server.process_died(process_name, exit_code)
except Exception as e:
logging.getLogger("roslaunch.remote").error(traceback.format_exc())
class ROSLaunchChildNode(ROSLaunchNode):
"""
XML-RPC server for roslaunch child processes
"""
def __init__(self, run_id, name, server_uri, pm):
"""
## Startup roslaunch remote client XML-RPC services. Blocks until shutdown
## @param name: name of remote client
## @type name: str
## @param server_uri: XML-RPC URI of roslaunch server
## @type server_uri: str
## @return: XML-RPC URI
## @rtype: str
"""
self.logger = logging.getLogger("roslaunch.server")
self.run_id = run_id
self.name = name
self.server_uri = server_uri
self.pm = pm
if self.pm is None:
raise RLException("cannot create child node: pm is not initialized")
handler = ROSLaunchChildHandler(self.run_id, self.name, self.server_uri, self.pm)
super(ROSLaunchChildNode, self).__init__(handler)
def _register_with_server(self):
"""
Register child node with server
"""
name = self.name
self.logger.info("attempting to register with roslaunch parent [%s]"%self.server_uri)
try:
server = ServerProxy(self.server_uri)
code, msg, _ = server.register(name, self.uri)
if code != 1:
raise RLException("unable to register with roslaunch server: %s"%msg)
except Exception as e:
self.logger.error("Exception while registering with roslaunch parent [%s]: %s"%(self.server_uri, traceback.format_exc()))
# fail
raise RLException("Exception while registering with roslaunch parent [%s]: %s"%(self.server_uri, traceback.format_exc()))
self.logger.debug("child registered with server")
# register printlog handler so messages are funneled to remote
def serverlog(msg):
server.log(name, Log.INFO, msg)
def servererrlog(msg):
server.log(name, Log.ERROR, msg)
add_printlog_handler(serverlog)
add_printerrlog_handler(servererrlog)
# register process listener to forward process death events to main server
self.pm.add_process_listener(_ProcessListenerForwarder(server))
def start(self):
"""
Initialize child. Must be called before run
"""
self.logger.info("starting roslaunch child process [%s], server URI is [%s]", self.name, self.server_uri)
super(ROSLaunchChildNode, self).start()
self._register_with_server()
| bsd-3-clause |
ndparker/wtf | wtf/app/dispatcher.py | 2 | 4308 | # -*- coding: ascii -*-
#
# Copyright 2007-2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Application Wrapper
===================
This modules wraps the WSGI interface, initializes middleware and provides
an application friendly wrapper.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
from wtf import util as _util
from wtf.app import response as _response
class Dispatcher(object):
"""
Main dispatching loop
:IVariables:
- `config`: Configuration
- `opts`: Command line options
- `args`: Positioned command line arguments
:Types:
- `config`: `wtf.config.Config`
- `opts`: ``optparse.OptionContainer``
- `args`: ``list``
"""
def __init__(self, config, opts, args):
"""
Initialization
:Parameters:
- `config`: Configuration
- `opts`: Command line options
- `args`: Positioned command line arguments
:Types:
- `config`: `wtf.config.Config`
- `opts`: ``optparse.OptionContainer``
- `args`: ``list``
"""
self.config, self.opts, self.args = config, opts, args
self._resolver = _util.load_dotted(config.app.resolver)(
config, opts, args
)
self._request = _util.load_dotted(
config.app('request', 'wtf.app.request.Request'))
self._response = _util.load_dotted(
config.app('response', 'wtf.app.response.Response'))
if 'codec' in config.app and 'cookie' in config.app.codec:
cookie_codec = config.app.codec.cookie.encode('ascii')
else:
cookie_codec = 'wtf.app.cookie.DefaultCookie'
self._addenv = {
'wtf.codec.cookie':
_util.load_dotted(cookie_codec)(config, opts, args)(),
}
def __call__(self, environ, start_response):
"""
WSGI entry point
:Parameters:
- `environ`: WSGI environment
- `start_response`: Response starter callable
:Types:
- `environ`: ``dict``
- `start_response`: ``callable``
"""
environ.update(self._addenv)
req = self._request(environ)
resp = self._response(req, start_response)
func, errorfuncs = None, set()
while True:
try:
try:
try:
if func is None:
func = self._resolver.resolve(req)
ret = func(req, resp)
except _response.Done:
ret = None
resp.write('') # make sure, start_response is called
return ret or []
except _response.http.HTTPRedirectResponse, e:
e.param['location'] = abs_location(
req, e.param['location']
)
raise
except _response.http.HTTPResponse, e:
resp.status(e.status, e.reason)
func = self._resolver.error(e.status)
if func and func not in errorfuncs: # avoid error loops
errorfuncs.add(func)
continue
e.headers(resp.headers)
resp.write('')
return [e.body()]
# never reached:
break
def abs_location(request, location):
""" Make absolute location """
import urlparse as _urlparse
if isinstance(location, unicode):
location = location.encode('utf-8')
else:
location = str(location)
parsed = _urlparse.urlparse(location)
if parsed[0] and parsed[1]:
return location
return str(request.abs_uri(location, decode=False))
| apache-2.0 |
AICP/kernel_motorola_ghost | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
7kbird/chrome | build/get_landmines.py | 6 | 2421 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import sys
import landmine_utils
builder = landmine_utils.builder
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines():
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: To delete generated class files (we just use jars now).'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'linux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if platform() in ('win', 'mac'):
print ('Improper dependency for create_nmf.py broke in r240802, '
'fixed in r240860.')
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version().startswith('2013')):
print "Switched win from VS2010 to VS2013."
print "Update to VS2013 Update 2."
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
print 'Need to clobber everything due to gen file moves in r175513 (Blink)'
if (platform() != 'ios'):
print 'Clobber to get rid of obselete test plugin after r248358'
print 'Clobber to rebuild GN files for V8'
print 'Need to clobber everything due to build_nexe change in nacl r13424'
print '[chromium-dev] PSA: clobber build needed for IDR_INSPECTOR_* compil...'
print 'blink_resources.grd changed: crbug.com/400860'
def main():
print_landmines()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
pixelrebel/st2 | st2api/st2api/controllers/root.py | 5 | 1860 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan import expose
from st2common import __version__
from st2common import log as logging
from st2common.controllers import BaseRootController
import st2api.controllers.exp.root as exp_root
import st2api.controllers.v1.root as v1_root
__all__ = [
'RootController'
]
LOG = logging.getLogger(__name__)
class RootController(BaseRootController):
logger = LOG
def __init__(self):
v1_controller = v1_root.RootController()
exp_controller = exp_root.RootController()
self.controllers = {
'v1': v1_controller,
'exp': exp_controller
}
self.default_controller = v1_controller
@expose(generic=True, template='index.html')
def index(self):
data = {}
if 'dev' in __version__:
docs_url = 'http://docs.stackstorm.com/latest'
else:
docs_version = '.'.join(__version__.split('.')[:2])
docs_url = 'http://docs.stackstorm.com/%s' % docs_version
data['version'] = __version__
data['docs_url'] = docs_url
return data
| apache-2.0 |
todaychi/hue | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/chart/data_source.py | 10 | 4013 | """
Collection of utility primitives for charts.
"""
from openpyxl.compat import unicode
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Bool,
Typed,
Alias,
String,
Integer,
Sequence,
)
from openpyxl.descriptors.excel import ExtensionList
from openpyxl.descriptors.nested import (
NestedString,
NestedText,
NestedInteger,
)
class NumFmt(Serialisable):
formatCode = String()
sourceLinked = Bool()
def __init__(self,
formatCode=None,
sourceLinked=False
):
self.formatCode = formatCode
self.sourceLinked = sourceLinked
class NumVal(Serialisable):
idx = Integer()
formatCode = NestedText(allow_none=True, expected_type=unicode)
v = NestedText(allow_none=True, expected_type=float)
def __init__(self,
idx=None,
formatCode=None,
v=None,
):
self.idx = idx
self.formatCode = formatCode
self.v = v
class NumData(Serialisable):
formatCode = NestedText(expected_type=str, allow_none=True)
ptCount = NestedInteger(allow_none=True)
pt = Sequence(expected_type=NumVal)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('formatCode', 'ptCount', 'pt')
def __init__(self,
formatCode=None,
ptCount=None,
pt=None,
extLst=None,
):
self.formatCode = formatCode
self.ptCount = ptCount
self.pt = pt
class NumRef(Serialisable):
f = NestedText(expected_type=unicode)
ref = Alias('f')
numCache = Typed(expected_type=NumData, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('f')
def __init__(self,
f=None,
numCache=None,
extLst=None,
):
self.f = f
class StrVal(Serialisable):
tagname = "strVal"
idx = Integer()
v = NestedString()
def __init__(self,
idx=0,
v=None,
):
self.idx = idx
self.v = v
class StrData(Serialisable):
tagname = "strData"
ptCount = NestedInteger(allow_none=True)
pt = Typed(expected_type=StrVal, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('ptCount', 'pt')
def __init__(self,
ptCount=None,
pt=None,
extLst=None,
):
self.ptCount = ptCount
self.pt = pt
class StrRef(Serialisable):
tagname = "strRef"
f = NestedText(expected_type=unicode, allow_none=True)
strCache = Typed(expected_type=StrData, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('f', 'strCache')
def __init__(self,
f=None,
strCache=None,
extLst=None,
):
self.f = f
self.strCache = strCache
class NumDataSource(Serialisable):
numRef = Typed(expected_type=NumRef, allow_none=True)
numLit = Typed(expected_type=NumData, allow_none=True)
def __init__(self,
numRef=None,
numLit=None,
):
self.numRef = numRef
self.numLit = numLit
class AxDataSource(Serialisable):
numRef = Typed(expected_type=NumRef, allow_none=True)
numLit = Typed(expected_type=NumData, allow_none=True)
strRef = Typed(expected_type=StrRef, allow_none=True)
strLit = Typed(expected_type=StrData, allow_none=True)
def __init__(self,
numRef=None,
numLit=None,
strRef=None,
strLit=None,
):
self.numRef = numRef
self.numLit = numLit
self.strRef = strRef
self.strLit = strLit
| apache-2.0 |
marqh/iris | lib/iris/tests/unit/analysis/geometry/test__extract_relevant_cube_slice.py | 17 | 3899 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for :func:`iris.analysis.geometry._extract_relevant_cube_slice`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import iris.tests.stock as stock
import shapely.geometry
from iris.analysis.geometry import _extract_relevant_cube_slice
class Test(tests.IrisTest):
def test_polygon_smaller_than_cube(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.box(-0.4, -0.4, 0.4, 0.4)
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[1, 1],
cube[1, 1].coords(axis='x')[0],
cube[1, 1].coords(axis='y')[0],
(1, 1, 1, 1))
self.assertEqual(target, actual)
def test_polygon_larger_than_cube(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.box(-0.6, -0.6, 0.6, 0.6)
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[:, :3],
cube[:, :3].coords(axis='x')[0],
cube[:, :3].coords(axis='y')[0],
(0, 0, 2, 2))
self.assertEqual(target, actual)
def test_polygon_on_cube_boundary(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.box(-0.5, -0.5, 0.5, 0.5)
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[1, 1],
cube[1, 1].coords(axis='x')[0],
cube[1, 1].coords(axis='y')[0],
(1, 1, 1, 1))
self.assertEqual(target, actual)
def test_rotated_polygon_on_cube_boundary(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.Polygon(((0., -.5), (-.5, 0.), (0., .5),
(.5, 0.)))
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[1, 1],
cube[1, 1].coords(axis='x')[0],
cube[1, 1].coords(axis='y')[0],
(1, 1, 1, 1))
self.assertEqual(target, actual)
def test_rotated_polygon_larger_than_cube_boundary(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.Polygon(((0., -.6), (-.6, 0.), (0., .6),
(.6, 0.)))
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[:, :3],
cube[:, :3].coords(axis='x')[0],
cube[:, :3].coords(axis='y')[0],
(0, 0, 2, 2))
self.assertEqual(target, actual)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
ReganBell/QReview | networkx/generators/tests/test_degree_seq.py | 61 | 5734 | #!/usr/bin/env python
from nose.tools import *
import networkx
from networkx import *
from networkx.generators.degree_seq import *
from networkx.utils import uniform_sequence,powerlaw_sequence
def test_configuration_model_empty():
# empty graph has empty degree sequence
deg_seq=[]
G=configuration_model(deg_seq)
assert_equal(G.degree(), {})
def test_configuration_model():
deg_seq=[5,3,3,3,3,2,2,2,1,1,1]
G=configuration_model(deg_seq,seed=12345678)
assert_equal(sorted(G.degree().values(),reverse=True),
[5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
assert_equal(sorted(G.degree(range(len(deg_seq))).values(),
reverse=True),
[5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
# test that fixed seed delivers the same graph
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=configuration_model(deg_seq,seed=1000)
G2=configuration_model(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
G1=configuration_model(deg_seq,seed=10)
G2=configuration_model(deg_seq,seed=10)
assert_true(is_isomorphic(G1,G2))
@raises(NetworkXError)
def test_configuation_raise():
z=[5,3,3,3,3,2,2,2,1,1,1]
G = configuration_model(z, create_using=DiGraph())
@raises(NetworkXError)
def test_configuation_raise_odd():
z=[5,3,3,3,3,2,2,2,1,1]
G = configuration_model(z, create_using=DiGraph())
@raises(NetworkXError)
def test_directed_configuation_raise_unequal():
zin = [5,3,3,3,3,2,2,2,1,1]
zout = [5,3,3,3,3,2,2,2,1,2]
G = directed_configuration_model(zin, zout)
def test_directed_configuation_mode():
G = directed_configuration_model([],[],seed=0)
assert_equal(len(G),0)
def test_expected_degree_graph_empty():
# empty graph has empty degree sequence
deg_seq=[]
G=expected_degree_graph(deg_seq)
assert_equal(G.degree(), {})
def test_expected_degree_graph():
# test that fixed seed delivers the same graph
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=expected_degree_graph(deg_seq,seed=1000)
G2=expected_degree_graph(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
G1=expected_degree_graph(deg_seq,seed=10)
G2=expected_degree_graph(deg_seq,seed=10)
assert_true(is_isomorphic(G1,G2))
def test_expected_degree_graph_selfloops():
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=expected_degree_graph(deg_seq,seed=1000, selfloops=False)
G2=expected_degree_graph(deg_seq,seed=1000, selfloops=False)
assert_true(is_isomorphic(G1,G2))
def test_expected_degree_graph_skew():
deg_seq=[10,2,2,2,2]
G1=expected_degree_graph(deg_seq,seed=1000)
G2=expected_degree_graph(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
def test_havel_hakimi_construction():
G = havel_hakimi_graph([])
assert_equal(len(G),0)
z=[1000,3,3,3,3,2,2,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=["A",3,3,3,3,2,2,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=[5,4,3,3,3,2,2,2]
G=havel_hakimi_graph(z)
G=configuration_model(z)
z=[6,5,4,4,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=[10,3,3,3,3,2,2,2,2,2,2]
G=havel_hakimi_graph(z)
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z, create_using=DiGraph())
def test_directed_havel_hakimi():
# Test range of valid directed degree sequences
n, r = 100, 10
p = 1.0 / r
for i in range(r):
G1 = nx.erdos_renyi_graph(n,p*(i+1),None,True)
din = list(G1.in_degree().values())
dout = list(G1.out_degree().values())
G2 = nx.directed_havel_hakimi_graph(din, dout)
assert_true(din == list(G2.in_degree().values()))
assert_true(dout == list(G2.out_degree().values()))
# Test non-graphical sequence
dout = [1000,3,3,3,3,2,2,2,1,1,1]
din=[103,102,102,102,102,102,102,102,102,102]
assert_raises(nx.exception.NetworkXError,
nx.directed_havel_hakimi_graph, din, dout)
# Test valid sequences
dout=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
din=[2, 2, 2, 2, 2, 2, 2, 2, 0, 2]
G2 = nx.directed_havel_hakimi_graph(din, dout)
assert_true(din == list(G2.in_degree().values()))
assert_true(dout == list(G2.out_degree().values()))
# Test unequal sums
din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
assert_raises(nx.exception.NetworkXError,
nx.directed_havel_hakimi_graph, din, dout)
# Test for negative values
din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2]
assert_raises(nx.exception.NetworkXError,
nx.directed_havel_hakimi_graph, din, dout)
def test_degree_sequence_tree():
z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
G=degree_sequence_tree(z)
assert_true(len(G.nodes())==len(z))
assert_true(len(G.edges())==sum(z)/2)
assert_raises(networkx.exception.NetworkXError,
degree_sequence_tree, z, create_using=DiGraph())
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_raises(networkx.exception.NetworkXError,
degree_sequence_tree, z)
def test_random_degree_sequence_graph():
d=[1,2,2,3]
G = nx.random_degree_sequence_graph(d)
assert_equal(d, list(G.degree().values()))
def test_random_degree_sequence_graph_raise():
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_raises(networkx.exception.NetworkXUnfeasible,
random_degree_sequence_graph, z)
def test_random_degree_sequence_large():
G = nx.fast_gnp_random_graph(100,0.1)
d = G.degree().values()
G = nx.random_degree_sequence_graph(d, seed=0)
assert_equal(sorted(d), sorted(list(G.degree().values())))
| bsd-3-clause |
IndraVikas/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
Julian/home-assistant | homeassistant/components/light/vera.py | 1 | 1886 | """
Support for Vera lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.vera/
"""
import logging
from homeassistant.components.light import ATTR_BRIGHTNESS, Light
from homeassistant.const import (
STATE_OFF, STATE_ON)
from homeassistant.components.vera import (
VeraDevice, VERA_DEVICES, VERA_CONTROLLER)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup Vera lights."""
add_devices_callback(
VeraLight(device, VERA_CONTROLLER) for device in VERA_DEVICES['light'])
class VeraLight(VeraDevice, Light):
"""Representation of a Vera Light, including dimmable."""
def __init__(self, vera_device, controller):
"""Initialize the light."""
self._state = False
VeraDevice.__init__(self, vera_device, controller)
@property
def brightness(self):
"""Return the brightness of the light."""
if self.vera_device.is_dimmable:
return self.vera_device.get_brightness()
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs and self.vera_device.is_dimmable:
self.vera_device.set_brightness(kwargs[ATTR_BRIGHTNESS])
else:
self.vera_device.switch_on()
self._state = STATE_ON
self.update_ha_state(True)
def turn_off(self, **kwargs):
"""Turn the light off."""
self.vera_device.switch_off()
self._state = STATE_OFF
self.update_ha_state()
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def update(self):
"""Called by the vera device callback to update state."""
self._state = self.vera_device.is_switched_on()
| mit |
ottobackwards/metron | metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.14/package/scripts/params.py | 17 | 6818 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script import Script
def yamlify_variables(var) :
if isinstance(var, type(True)):
return str(var).lower()
else:
return var
# server configurations
config = Script.get_config()
masters_also_are_datanodes = config['configurations']['elastic-site']['masters_also_are_datanodes']
elastic_home = config['configurations']['elastic-sysconfig']['elastic_home']
data_dir = config['configurations']['elastic-sysconfig']['data_dir']
work_dir = config['configurations']['elastic-sysconfig']['work_dir']
conf_dir = config['configurations']['elastic-sysconfig']['conf_dir']
heap_size = config['configurations']['elastic-sysconfig']['heap_size']
max_open_files = config['configurations']['elastic-sysconfig']['max_open_files']
max_map_count = config['configurations']['elastic-sysconfig']['max_map_count']
elastic_user = config['configurations']['elastic-env']['elastic_user']
elastic_group = config['configurations']['elastic-env']['elastic_group']
log_dir = config['configurations']['elastic-env']['elastic_log_dir']
pid_dir = config['configurations']['elastic-env']['elastic_pid_dir']
hostname = config['hostname']
java64_home = config['hostLevelParams']['java_home']
elastic_env_sh_template = config['configurations']['elastic-env']['content']
sysconfig_template = config['configurations']['elastic-sysconfig']['content']
cluster_name = config['configurations']['elastic-site']['cluster_name']
zen_discovery_ping_unicast_hosts = config['configurations']['elastic-site']['zen_discovery_ping_unicast_hosts']
path_data = config['configurations']['elastic-site']['path_data']
http_cors_enabled = config['configurations']['elastic-site']['http_cors_enabled']
http_port = config['configurations']['elastic-site']['http_port']
transport_tcp_port = config['configurations']['elastic-site']['transport_tcp_port']
recover_after_time = config['configurations']['elastic-site']['recover_after_time']
gateway_recover_after_data_nodes = config['configurations']['elastic-site']['gateway_recover_after_data_nodes']
expected_data_nodes = config['configurations']['elastic-site']['expected_data_nodes']
index_merge_scheduler_max_thread_count = config['configurations']['elastic-site']['index_merge_scheduler_max_thread_count']
index_translog_flush_threshold_size = config['configurations']['elastic-site']['index_translog_flush_threshold_size']
index_refresh_interval = config['configurations']['elastic-site']['index_refresh_interval']
indices_memory_index_store_throttle_type = config['configurations']['elastic-site']['indices_memory_index_store_throttle_type']
index_number_of_shards = config['configurations']['elastic-site']['index_number_of_shards']
index_number_of_replicas = config['configurations']['elastic-site']['index_number_of_replicas']
indices_memory_index_buffer_size = config['configurations']['elastic-site']['indices_memory_index_buffer_size']
bootstrap_memory_lock = yamlify_variables(config['configurations']['elastic-site']['bootstrap_memory_lock'])
threadpool_bulk_queue_size = config['configurations']['elastic-site']['threadpool_bulk_queue_size']
cluster_routing_allocation_node_concurrent_recoveries = config['configurations']['elastic-site']['cluster_routing_allocation_node_concurrent_recoveries']
cluster_routing_allocation_disk_watermark_low = config['configurations']['elastic-site']['cluster_routing_allocation_disk_watermark_low']
cluster_routing_allocation_disk_threshold_enabled = yamlify_variables(config['configurations']['elastic-site']['cluster_routing_allocation_disk_threshold_enabled'])
cluster_routing_allocation_disk_watermark_high = config['configurations']['elastic-site']['cluster_routing_allocation_disk_watermark_high']
indices_fielddata_cache_size = config['configurations']['elastic-site']['indices_fielddata_cache_size']
indices_cluster_send_refresh_mapping = yamlify_variables(config['configurations']['elastic-site']['indices_cluster_send_refresh_mapping'])
threadpool_index_queue_size = config['configurations']['elastic-site']['threadpool_index_queue_size']
discovery_zen_ping_timeout = config['configurations']['elastic-site']['discovery_zen_ping_timeout']
discovery_zen_fd_ping_interval = config['configurations']['elastic-site']['discovery_zen_fd_ping_interval']
discovery_zen_fd_ping_timeout = config['configurations']['elastic-site']['discovery_zen_fd_ping_timeout']
discovery_zen_fd_ping_retries = config['configurations']['elastic-site']['discovery_zen_fd_ping_retries']
network_host = config['configurations']['elastic-site']['network_host']
network_publish_host = config['configurations']['elastic-site']['network_publish_host']
limits_conf_dir = "/etc/security/limits.d"
limits_conf_file = limits_conf_dir + "/elasticsearch.conf"
elastic_user_nofile_limit = config['configurations']['elastic-env']['elastic_user_nofile_limit']
elastic_user_nproc_limit = config['configurations']['elastic-env']['elastic_user_nproc_limit']
elastic_user_memlock_soft_limit = config['configurations']['elastic-env']['elastic_user_memlock_soft_limit']
elastic_user_memlock_hard_limit = config['configurations']['elastic-env']['elastic_user_memlock_hard_limit']
# the status check (service elasticsearch status) cannot be run by the 'elasticsearch'
# user due to the default permissions that are set when the package is installed. the
# status check must be run as root
elastic_status_check_user = 'root'
# when using the RPM or Debian packages on systems that use systemd, system limits
# must be specified via systemd.
# see https://www.elastic.co/guide/en/elasticsearch/reference/5.6/setting-system-settings.html#systemd
systemd_parent_dir = '/etc/systemd/system/'
systemd_elasticsearch_dir = systemd_parent_dir + 'elasticsearch.service.d/'
systemd_override_file = systemd_elasticsearch_dir + 'override.conf'
systemd_override_template = config['configurations']['elastic-systemd']['content']
heap_size = config['configurations']['elastic-jvm-options']['heap_size']
jvm_options_template = config['configurations']['elastic-jvm-options']['content']
| apache-2.0 |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/plat-os2emx/grp.py | 67 | 5291 | # this module is an OS/2 oriented replacement for the grp standard
# extension module.
# written by Andrew MacIntyre, April 2001.
# updated July 2003, adding field accessor support
# note that this implementation checks whether ":" or ";" as used as
# the field separator character.
"""Replacement for grp standard extension module, intended for use on
OS/2 and similar systems which don't normally have an /etc/group file.
The standard Unix group database is an ASCII text file with 4 fields per
record (line), separated by a colon:
- group name (string)
- group password (optional encrypted string)
- group id (integer)
- group members (comma delimited list of userids, with no spaces)
Note that members are only included in the group file for groups that
aren't their primary groups.
(see the section 8.2 of the Python Library Reference)
This implementation differs from the standard Unix implementation by
allowing use of the platform's native path separator character - ';' on OS/2,
DOS and MS-Windows - as the field separator in addition to the Unix
standard ":".
The module looks for the group database at the following locations
(in order first to last):
- ${ETC_GROUP} (or %ETC_GROUP%)
- ${ETC}/group (or %ETC%/group)
- ${PYTHONHOME}/Etc/group (or %PYTHONHOME%/Etc/group)
Classes
-------
None
Functions
---------
getgrgid(gid) - return the record for group-id gid as a 4-tuple
getgrnam(name) - return the record for group 'name' as a 4-tuple
getgrall() - return a list of 4-tuples, each tuple being one record
(NOTE: the order is arbitrary)
Attributes
----------
group_file - the path of the group database file
"""
import os
# try and find the group file
__group_path = []
if os.environ.has_key('ETC_GROUP'):
__group_path.append(os.environ['ETC_GROUP'])
if os.environ.has_key('ETC'):
__group_path.append('%s/group' % os.environ['ETC'])
if os.environ.has_key('PYTHONHOME'):
__group_path.append('%s/Etc/group' % os.environ['PYTHONHOME'])
group_file = None
for __i in __group_path:
try:
__f = open(__i, 'r')
__f.close()
group_file = __i
break
except:
pass
# decide what field separator we can try to use - Unix standard, with
# the platform's path separator as an option. No special field conversion
# handlers are required for the group file.
__field_sep = [':']
if os.pathsep:
if os.pathsep != ':':
__field_sep.append(os.pathsep)
# helper routine to identify which separator character is in use
def __get_field_sep(record):
fs = None
for c in __field_sep:
# there should be 3 delimiter characters (for 4 fields)
if record.count(c) == 3:
fs = c
break
if fs:
return fs
else:
raise KeyError, '>> group database fields not delimited <<'
# class to match the new record field name accessors.
# the resulting object is intended to behave like a read-only tuple,
# with each member also accessible by a field name.
class Group:
def __init__(self, name, passwd, gid, mem):
self.__dict__['gr_name'] = name
self.__dict__['gr_passwd'] = passwd
self.__dict__['gr_gid'] = gid
self.__dict__['gr_mem'] = mem
self.__dict__['_record'] = (self.gr_name, self.gr_passwd,
self.gr_gid, self.gr_mem)
def __len__(self):
return 4
def __getitem__(self, key):
return self._record[key]
def __setattr__(self, name, value):
raise AttributeError('attribute read-only: %s' % name)
def __repr__(self):
return str(self._record)
def __cmp__(self, other):
this = str(self._record)
if this == other:
return 0
elif this < other:
return -1
else:
return 1
# read the whole file, parsing each entry into tuple form
# with dictionaries to speed recall by GID or group name
def __read_group_file():
if group_file:
group = open(group_file, 'r')
else:
raise KeyError, '>> no group database <<'
gidx = {}
namx = {}
sep = None
while 1:
entry = group.readline().strip()
if len(entry) > 3:
if sep is None:
sep = __get_field_sep(entry)
fields = entry.split(sep)
fields[2] = int(fields[2])
fields[3] = [f.strip() for f in fields[3].split(',')]
record = Group(*fields)
if not gidx.has_key(fields[2]):
gidx[fields[2]] = record
if not namx.has_key(fields[0]):
namx[fields[0]] = record
elif len(entry) > 0:
pass # skip empty or malformed records
else:
break
group.close()
if len(gidx) == 0:
raise KeyError
return (gidx, namx)
# return the group database entry by GID
def getgrgid(gid):
g, n = __read_group_file()
return g[gid]
# return the group database entry by group name
def getgrnam(name):
g, n = __read_group_file()
return n[name]
# return all the group database entries
def getgrall():
g, n = __read_group_file()
return g.values()
# test harness
if __name__ == '__main__':
getgrall()
| apache-2.0 |
vhavlena/appreal | netbench/pattern_match/b_state.py | 1 | 7324 | ###############################################################################
# b_state.py: Module for PATTERN MATCH - base state class
# Copyright (C) 2010 Brno University of Technology, ANT @ FIT
# Author(s): Vlastimil Kosar <ikosar@fit.vutbr.cz>
###############################################################################
#
# LICENSE TERMS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. All advertising materials mentioning features or use of this software
# or firmware must display the following acknowledgement:
#
# This product includes software developed by the University of
# Technology, Faculty of Information Technology, Brno and its
# contributors.
#
# 4. Neither the name of the Company nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# This software or firmware is provided ``as is'', and any express or implied
# warranties, including, but not limited to, the implied warranties of
# merchantability and fitness for a particular purpose are disclaimed.
# In no event shall the company or contributors be liable for any
# direct, indirect, incidental, special, exemplary, or consequential
# damages (including, but not limited to, procurement of substitute
# goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether
# in contract, strict liability, or tort (including negligence or
# otherwise) arising in any way out of the use of this software, even
# if advised of the possibility of such damage.
#
# $Id$
import pattern_exceptions
types = {"b_State":'0', "ColouredState":'1'}
""" Maps class name to coresponding class specification char. For testing if object is of specified state class this dictionary have to be used. """
reverse_types = {'0':"b_State", '1':"ColouredState"}
class b_State:
"""
A base class for state representation.
:param mid: State unique identification number
:type mid: int
:param rnum: Set of regular expression numbres. If set is empty the state is not final. The set of regular expression numbers identify which RE are matched in particular final state.
:type rnum: set(int)
"""
def __init__(self, mid = 0, rnum = set()):
""" Class constructor.
:param mid: State unique identification number
:type mid: int
:param rnum: Set of regular expression numbres. If set is empty the state is not final. The set of regular expression numbers identify which RE are matched in particular final state.
:type rnum: set(int)
"""
self._id = mid; # State identification value
self._rnum = rnum; # Regular expression numbers (final state), set value
self.ctype = types["b_State"]
self.stypes = [types["b_State"]]
def get_text(self):
"""
Returns text description for graph representation.
:returns: Text description of state
:rtype: string
"""
return str(self._id)
def get_id(self):
"""
Returns state identification number.
:returns: State identification number
:rtype: int
"""
return self._id
def set_id(self, new_id):
""" Sets the id of state to new_id.
:param new_id: New unique state identification number.
:type new_id: int
"""
self._id = new_id
def is_final(self):
"""
Returns true if the state is a final state.
:returns: True if the state is a final state, False otherwise.
:rtype: boolean
"""
return self._rnum != set()
def get_regexp_number(self):
"""
Returns set of indexes of regular expression, which corresponds to the
final state. If empty set value is returned the state is not final and
do not represent any regular expression.
:returns: Set of regular expression numbres coresponding to the state
:rtype: set(int)
"""
return self._rnum;
def set_regexp_number(self, new_rnum):
"""
Sets set of indexes of regular expression, which corresponds to the
final state. If empty set is set the state is not final and
do not represent any regular expression.
:param new_rnum: New set of regular expression numbres
:type new_rnum: Set of Int
"""
self._rnum = new_rnum
def get_type(self):
"""
Returns type of state.
:returns: Returns type of state.
:rtype: int
"""
return self.ctype
def get_support_type(self):
"""
Returns supported types of states for current type of state.
:returns: Returns supported types of states for current type of state.
:rtype: list(int)
"""
return self.stypes
def join(self, other):
"""
Joins using self and other state. Creates new state.
:param other: Other state.
:type other: b_State
:returns: New joined state.
:rtype: b_State
:raises: state_join_exception() if join can't be computed.
"""
if other.get_type() in self.get_support_type():
return self.compute_join(other)
elif self.get_type() in other.get_support_type():
return other.compute_join(self)
else:
raise pattern_exceptions.state_join_exception(self.get_type(), other.get_type())
def compute_join(self, other):
"""
Computes join of two states. Note that new state ID must be set after. Default value is -2.
:param other: Second state.
:type other: b_State
:returns: Joined states.
:rtype: b_State
"""
return b_State(-2, self.get_regexp_number() | other.get_regexp_number())
def __str__(self):
"""
Returns state identification value as string.
:returns: Text description of state
:rtype: string
"""
return str(self._id)
def __repr__(self):
"""
Returns representation of state identification value and regular expression numeber.
:returns: Text description of state
:rtype: string
"""
return "<" + str(self._id) + ", " + str(self._rnum) + ">"
###############################################################################
# End of File b_state.py #
###############################################################################
| gpl-2.0 |
craynot/django | tests/aggregation/models.py | 282 | 1444 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField()
friends = models.ManyToManyField('self', blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Publisher(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
duration = models.DurationField(blank=True, null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Book(models.Model):
isbn = models.CharField(max_length=9)
name = models.CharField(max_length=255)
pages = models.IntegerField()
rating = models.FloatField()
price = models.DecimalField(decimal_places=2, max_digits=6)
authors = models.ManyToManyField(Author)
contact = models.ForeignKey(Author, models.CASCADE, related_name='book_contact_set')
publisher = models.ForeignKey(Publisher, models.CASCADE)
pubdate = models.DateField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Store(models.Model):
name = models.CharField(max_length=255)
books = models.ManyToManyField(Book)
original_opening = models.DateTimeField()
friday_night_closing = models.TimeField()
def __str__(self):
return self.name
| bsd-3-clause |
danielbair/aeneas | aeneas/tests/tool_test_validate.py | 5 | 6291 | #!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from aeneas.tools.validate import ValidateCLI
import aeneas.globalfunctions as gf
class TestValidateCLI(unittest.TestCase):
def execute(self, parameters, expected_exit_code):
output_path = gf.tmp_directory()
params = ["placeholder"]
for p_type, p_value in parameters:
if p_type == "in":
params.append(gf.absolute_path(p_value, __file__))
elif p_type == "out":
params.append(os.path.join(output_path, p_value))
else:
params.append(p_value)
exit_code = ValidateCLI(use_sys=False).run(arguments=params)
gf.delete_directory(output_path)
self.assertEqual(exit_code, expected_exit_code)
def test_help(self):
self.execute([], 2)
self.execute([("", "-h")], 2)
self.execute([("", "--help")], 2)
self.execute([("", "--help-rconf")], 2)
self.execute([("", "--version")], 2)
def test_bad_type(self):
self.execute([
("", "foo"),
("in", "../tools/res/config.txt")
], 2)
def test_config_txt(self):
self.execute([
("", "config"),
("in", "../tools/res/config.txt")
], 0)
def test_config_txt_bad(self):
self.execute([
("", "config"),
("in", "../tools/res/config.bad.txt")
], 1)
def test_config_xml(self):
self.execute([
("", "config"),
("in", "../tools/res/config.xml")
], 0)
def test_config_xml_bad(self):
self.execute([
("", "config"),
("in", "../tools/res/config.bad.xml")
], 1)
def test_container(self):
self.execute([
("", "container"),
("in", "../tools/res/job.zip")
], 0)
def test_container_bad(self):
self.execute([
("", "container"),
("in", "../tools/res/job_no_config.zip")
], 1)
def test_container_too_many_tasks(self):
self.execute([
("", "container"),
("in", "../tools/res/job.zip"),
("", "-r=\"job_max_tasks=1\"")
], 1)
def test_job(self):
self.execute([
("", "job"),
("", "job_language=it|os_job_file_name=output.zip|os_job_file_container=zip|is_hierarchy_type=flat")
], 0)
def test_job_bad(self):
self.execute([
("", "job"),
("", "os_job_file_name=output.zip|os_job_file_container=zip|is_hierarchy_type=flat")
], 1)
def test_task(self):
self.execute([
("", "task"),
("", "task_language=it|is_text_type=plain|os_task_file_name=output.txt|os_task_file_format=txt")
], 0)
def test_task_bad(self):
self.execute([
("", "task"),
("", "task_language=it|is_text_type=plain|os_task_file_name=output.txt")
], 1)
def test_wizard(self):
self.execute([
("", "wizard"),
("", "is_hierarchy_type=flat|is_hierarchy_prefix=assets/|is_text_file_relative_path=.|is_text_file_name_regex=.*\.xhtml|is_text_type=unparsed|is_audio_file_relative_path=.|is_audio_file_name_regex=.*\.mp3|is_text_unparsed_id_regex=f[0-9]+|is_text_unparsed_id_sort=numeric|os_job_file_name=demo_sync_job_output|os_job_file_container=zip|os_job_file_hierarchy_type=flat|os_job_file_hierarchy_prefix=assets/|os_task_file_name=\$PREFIX.xhtml.smil|os_task_file_format=smil|os_task_file_smil_page_ref=\$PREFIX.xhtml|os_task_file_smil_audio_ref=../Audio/\$PREFIX.mp3|job_language=en|job_description=Demo Sync Job"),
("in", "../tools/res/job_no_config.zip")
], 0)
def test_wizard_bad(self):
self.execute([
("", "wizard"),
("", "job_language=it|invalid=string"),
("in", "../tools/res/job_no_config.zip")
], 1)
def test_read_missing_1(self):
self.execute([
("", "config")
], 2)
def test_read_missing_2(self):
self.execute([
("in", "../tools/res/config.txt")
], 2)
def test_read_missing_3(self):
self.execute([
("", "job_language=it|invalid=string"),
("in", "../tools/res/job_no_config.zip")
], 2)
def test_read_missing_4(self):
self.execute([
("", "wizard"),
("in", "../tools/res/job_no_config.zip")
], 2)
def test_read_missing_5(self):
self.execute([
("", "wizard"),
("", "job_language=it|invalid=string")
], 2)
def test_read_cannot_read_1(self):
self.execute([
("", "config"),
("", "/foo/bar/baz.txt")
], 1)
def test_read_cannot_read_2(self):
self.execute([
("", "container"),
("", "/foo/bar/baz.txt")
], 1)
def test_read_cannot_read_3(self):
self.execute([
("", "config"),
("", "../tools/res/parsed.txt")
], 1)
def test_read_cannot_read_4(self):
self.execute([
("", "container"),
("", "../tools/res/config.txt")
], 1)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
atomiccheese/SoundScrape | setup.py | 1 | 1273 | import os
from setuptools import setup
# Set external files
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
required = f.read().splitlines()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='soundscrape',
version='0.10.0',
packages=['soundscrape'],
install_requires=required,
include_package_data=True,
license='MIT License',
description='Scrape an artist from SoundCloud',
long_description=README,
url='https://github.com/Miserlou/SoundScrape',
author='Rich Jones',
author_email='rich@openwatch.net',
entry_points={
'console_scripts': [
'soundscrape = soundscrape.soundscrape:main',
]
},
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| mit |
mcardillo55/django | django/contrib/admin/bin/compress.py | 266 | 2282 | #!/usr/bin/env python
import argparse
import os
import subprocess
import sys
try:
import closure
except ImportError:
closure_compiler = None
else:
closure_compiler = os.path.join(os.path.dirname(closure.__file__), 'closure.jar')
js_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'static', 'admin', 'js')
def main():
description = """With no file paths given this script will automatically
compress all jQuery-based files of the admin app. Requires the Google Closure
Compiler library and Java version 6 or later."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file', nargs='*')
parser.add_argument("-c", dest="compiler", default="~/bin/compiler.jar",
help="path to Closure Compiler jar file")
parser.add_argument("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_argument("-q", "--quiet",
action="store_false", dest="verbose")
options = parser.parse_args()
compiler = closure_compiler if closure_compiler else os.path.expanduser(options.compiler)
if not os.path.exists(compiler):
sys.exit(
"Google Closure compiler jar file %s not found. Please use the -c "
"option to specify the path." % compiler
)
if not options.file:
if options.verbose:
sys.stdout.write("No filenames given; defaulting to admin scripts\n")
files = [os.path.join(js_path, f) for f in [
"actions.js", "collapse.js", "inlines.js", "prepopulate.js"]]
else:
files = options.file
for file_name in files:
if not file_name.endswith(".js"):
file_name = file_name + ".js"
to_compress = os.path.expanduser(file_name)
if os.path.exists(to_compress):
to_compress_min = "%s.min.js" % "".join(file_name.rsplit(".js"))
cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min)
if options.verbose:
sys.stdout.write("Running: %s\n" % cmd)
subprocess.call(cmd.split())
else:
sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress)
if __name__ == '__main__':
main()
| bsd-3-clause |
bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/plat-irix5/flp.py | 64 | 13387 | #
# flp - Module to load fl forms from fd files
#
# Jack Jansen, December 1991
#
from warnings import warnpy3k
warnpy3k("the flp module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import string
import os
import sys
import FL
SPLITLINE = '--------------------'
FORMLINE = '=============== FORM ==============='
ENDLINE = '=============================='
class error(Exception):
pass
##################################################################
# Part 1 - The parsing routines #
##################################################################
#
# Externally visible function. Load form.
#
def parse_form(filename, formname):
forms = checkcache(filename)
if forms is None:
forms = parse_forms(filename)
if forms.has_key(formname):
return forms[formname]
else:
raise error, 'No such form in fd file'
#
# Externally visible function. Load all forms.
#
def parse_forms(filename):
forms = checkcache(filename)
if forms is not None: return forms
fp = _open_formfile(filename)
nforms = _parse_fd_header(fp)
forms = {}
for i in range(nforms):
form = _parse_fd_form(fp, None)
forms[form[0].Name] = form
writecache(filename, forms)
return forms
#
# Internal: see if a cached version of the file exists
#
MAGIC = '.fdc'
_internal_cache = {} # Used by frozen scripts only
def checkcache(filename):
if _internal_cache.has_key(filename):
altforms = _internal_cache[filename]
return _unpack_cache(altforms)
import marshal
fp, filename = _open_formfile2(filename)
fp.close()
cachename = filename + 'c'
try:
fp = open(cachename, 'r')
except IOError:
#print 'flp: no cache file', cachename
return None
try:
if fp.read(4) != MAGIC:
print 'flp: bad magic word in cache file', cachename
return None
cache_mtime = rdlong(fp)
file_mtime = getmtime(filename)
if cache_mtime != file_mtime:
#print 'flp: outdated cache file', cachename
return None
#print 'flp: valid cache file', cachename
altforms = marshal.load(fp)
return _unpack_cache(altforms)
finally:
fp.close()
def _unpack_cache(altforms):
forms = {}
for name in altforms.keys():
altobj, altlist = altforms[name]
obj = _newobj()
obj.make(altobj)
list = []
for altobj in altlist:
nobj = _newobj()
nobj.make(altobj)
list.append(nobj)
forms[name] = obj, list
return forms
def rdlong(fp):
s = fp.read(4)
if len(s) != 4: return None
a, b, c, d = s[0], s[1], s[2], s[3]
return ord(a)<<24 | ord(b)<<16 | ord(c)<<8 | ord(d)
def wrlong(fp, x):
a, b, c, d = (x>>24)&0xff, (x>>16)&0xff, (x>>8)&0xff, x&0xff
fp.write(chr(a) + chr(b) + chr(c) + chr(d))
def getmtime(filename):
import os
from stat import ST_MTIME
try:
return os.stat(filename)[ST_MTIME]
except os.error:
return None
#
# Internal: write cached version of the form (parsing is too slow!)
#
def writecache(filename, forms):
import marshal
fp, filename = _open_formfile2(filename)
fp.close()
cachename = filename + 'c'
try:
fp = open(cachename, 'w')
except IOError:
print 'flp: can\'t create cache file', cachename
return # Never mind
fp.write('\0\0\0\0') # Seek back and write MAGIC when done
wrlong(fp, getmtime(filename))
altforms = _pack_cache(forms)
marshal.dump(altforms, fp)
fp.seek(0)
fp.write(MAGIC)
fp.close()
#print 'flp: wrote cache file', cachename
#
# External: print some statements that set up the internal cache.
# This is for use with the "freeze" script. You should call
# flp.freeze(filename) for all forms used by the script, and collect
# the output on a file in a module file named "frozenforms.py". Then
# in the main program of the script import frozenforms.
# (Don't forget to take this out when using the unfrozen version of
# the script!)
#
def freeze(filename):
forms = parse_forms(filename)
altforms = _pack_cache(forms)
print 'import flp'
print 'flp._internal_cache[', repr(filename), '] =', altforms
#
# Internal: create the data structure to be placed in the cache
#
def _pack_cache(forms):
altforms = {}
for name in forms.keys():
obj, list = forms[name]
altobj = obj.__dict__
altlist = []
for obj in list: altlist.append(obj.__dict__)
altforms[name] = altobj, altlist
return altforms
#
# Internal: Locate form file (using PYTHONPATH) and open file
#
def _open_formfile(filename):
return _open_formfile2(filename)[0]
def _open_formfile2(filename):
if filename[-3:] != '.fd':
filename = filename + '.fd'
if filename[0] == '/':
try:
fp = open(filename,'r')
except IOError:
fp = None
else:
for pc in sys.path:
pn = os.path.join(pc, filename)
try:
fp = open(pn, 'r')
filename = pn
break
except IOError:
fp = None
if fp is None:
raise error, 'Cannot find forms file ' + filename
return fp, filename
#
# Internal: parse the fd file header, return number of forms
#
def _parse_fd_header(file):
# First read the magic header line
datum = _parse_1_line(file)
if datum != ('Magic', 12321):
raise error, 'Not a forms definition file'
# Now skip until we know number of forms
while 1:
datum = _parse_1_line(file)
if type(datum) == type(()) and datum[0] == 'Numberofforms':
break
return datum[1]
#
# Internal: parse fd form, or skip if name doesn't match.
# the special value None means 'always parse it'.
#
def _parse_fd_form(file, name):
datum = _parse_1_line(file)
if datum != FORMLINE:
raise error, 'Missing === FORM === line'
form = _parse_object(file)
if form.Name == name or name is None:
objs = []
for j in range(form.Numberofobjects):
obj = _parse_object(file)
objs.append(obj)
return (form, objs)
else:
for j in range(form.Numberofobjects):
_skip_object(file)
return None
#
# Internal class: a convenient place to store object info fields
#
class _newobj:
def add(self, name, value):
self.__dict__[name] = value
def make(self, dict):
for name in dict.keys():
self.add(name, dict[name])
#
# Internal parsing routines.
#
def _parse_string(str):
if '\\' in str:
s = '\'' + str + '\''
try:
return eval(s)
except:
pass
return str
def _parse_num(str):
return eval(str)
def _parse_numlist(str):
slist = string.split(str)
nlist = []
for i in slist:
nlist.append(_parse_num(i))
return nlist
# This dictionary maps item names to parsing routines.
# If no routine is given '_parse_num' is default.
_parse_func = { \
'Name': _parse_string, \
'Box': _parse_numlist, \
'Colors': _parse_numlist, \
'Label': _parse_string, \
'Name': _parse_string, \
'Callback': _parse_string, \
'Argument': _parse_string }
# This function parses a line, and returns either
# a string or a tuple (name,value)
import re
prog = re.compile('^([^:]*): *(.*)')
def _parse_line(line):
match = prog.match(line)
if not match:
return line
name, value = match.group(1, 2)
if name[0] == 'N':
name = string.join(string.split(name),'')
name = string.lower(name)
name = string.capitalize(name)
try:
pf = _parse_func[name]
except KeyError:
pf = _parse_num
value = pf(value)
return (name, value)
def _readline(file):
line = file.readline()
if not line:
raise EOFError
return line[:-1]
def _parse_1_line(file):
line = _readline(file)
while line == '':
line = _readline(file)
return _parse_line(line)
def _skip_object(file):
line = ''
while not line in (SPLITLINE, FORMLINE, ENDLINE):
pos = file.tell()
line = _readline(file)
if line == FORMLINE:
file.seek(pos)
def _parse_object(file):
obj = _newobj()
while 1:
pos = file.tell()
datum = _parse_1_line(file)
if datum in (SPLITLINE, FORMLINE, ENDLINE):
if datum == FORMLINE:
file.seek(pos)
return obj
if type(datum) is not type(()) or len(datum) != 2:
raise error, 'Parse error, illegal line in object: '+datum
obj.add(datum[0], datum[1])
#################################################################
# Part 2 - High-level object/form creation routines #
#################################################################
#
# External - Create a form an link to an instance variable.
#
def create_full_form(inst, (fdata, odatalist)):
form = create_form(fdata)
exec 'inst.'+fdata.Name+' = form\n'
for odata in odatalist:
create_object_instance(inst, form, odata)
#
# External - Merge a form into an existing form in an instance
# variable.
#
def merge_full_form(inst, form, (fdata, odatalist)):
exec 'inst.'+fdata.Name+' = form\n'
if odatalist[0].Class != FL.BOX:
raise error, 'merge_full_form() expects FL.BOX as first obj'
for odata in odatalist[1:]:
create_object_instance(inst, form, odata)
#################################################################
# Part 3 - Low-level object/form creation routines #
#################################################################
#
# External Create_form - Create form from parameters
#
def create_form(fdata):
import fl
return fl.make_form(FL.NO_BOX, fdata.Width, fdata.Height)
#
# External create_object - Create an object. Make sure there are
# no callbacks. Returns the object created.
#
def create_object(form, odata):
obj = _create_object(form, odata)
if odata.Callback:
raise error, 'Creating free object with callback'
return obj
#
# External create_object_instance - Create object in an instance.
#
def create_object_instance(inst, form, odata):
obj = _create_object(form, odata)
if odata.Callback:
cbfunc = eval('inst.'+odata.Callback)
obj.set_call_back(cbfunc, odata.Argument)
if odata.Name:
exec 'inst.' + odata.Name + ' = obj\n'
#
# Internal _create_object: Create the object and fill options
#
def _create_object(form, odata):
crfunc = _select_crfunc(form, odata.Class)
obj = crfunc(odata.Type, odata.Box[0], odata.Box[1], odata.Box[2], \
odata.Box[3], odata.Label)
if not odata.Class in (FL.BEGIN_GROUP, FL.END_GROUP):
obj.boxtype = odata.Boxtype
obj.col1 = odata.Colors[0]
obj.col2 = odata.Colors[1]
obj.align = odata.Alignment
obj.lstyle = odata.Style
obj.lsize = odata.Size
obj.lcol = odata.Lcol
return obj
#
# Internal crfunc: helper function that returns correct create function
#
def _select_crfunc(fm, cl):
if cl == FL.BEGIN_GROUP: return fm.bgn_group
elif cl == FL.END_GROUP: return fm.end_group
elif cl == FL.BITMAP: return fm.add_bitmap
elif cl == FL.BOX: return fm.add_box
elif cl == FL.BROWSER: return fm.add_browser
elif cl == FL.BUTTON: return fm.add_button
elif cl == FL.CHART: return fm.add_chart
elif cl == FL.CHOICE: return fm.add_choice
elif cl == FL.CLOCK: return fm.add_clock
elif cl == FL.COUNTER: return fm.add_counter
elif cl == FL.DIAL: return fm.add_dial
elif cl == FL.FREE: return fm.add_free
elif cl == FL.INPUT: return fm.add_input
elif cl == FL.LIGHTBUTTON: return fm.add_lightbutton
elif cl == FL.MENU: return fm.add_menu
elif cl == FL.POSITIONER: return fm.add_positioner
elif cl == FL.ROUNDBUTTON: return fm.add_roundbutton
elif cl == FL.SLIDER: return fm.add_slider
elif cl == FL.VALSLIDER: return fm.add_valslider
elif cl == FL.TEXT: return fm.add_text
elif cl == FL.TIMER: return fm.add_timer
else:
raise error, 'Unknown object type: %r' % (cl,)
def test():
import time
t0 = time.time()
if len(sys.argv) == 2:
forms = parse_forms(sys.argv[1])
t1 = time.time()
print 'parse time:', 0.001*(t1-t0), 'sec.'
keys = forms.keys()
keys.sort()
for i in keys:
_printform(forms[i])
elif len(sys.argv) == 3:
form = parse_form(sys.argv[1], sys.argv[2])
t1 = time.time()
print 'parse time:', round(t1-t0, 3), 'sec.'
_printform(form)
else:
print 'Usage: test fdfile [form]'
def _printform(form):
f = form[0]
objs = form[1]
print 'Form ', f.Name, ', size: ', f.Width, f.Height, ' Nobj ', f.Numberofobjects
for i in objs:
print ' Obj ', i.Name, ' type ', i.Class, i.Type
print ' Box ', i.Box, ' btype ', i.Boxtype
print ' Label ', i.Label, ' size/style/col/align ', i.Size,i.Style, i.Lcol, i.Alignment
print ' cols ', i.Colors
print ' cback ', i.Callback, i.Argument
| mit |
a10networks/a10-neutron-lbaas | a10_neutron_lbaas/v2/handler_persist.py | 2 | 2513 | # Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import acos_client.errors as acos_errors
import a10_neutron_lbaas.a10_exceptions as a10_ex
LOG = logging.getLogger(__name__)
class PersistHandler(object):
def __init__(self, c, context, pool, old_pool=None, deprecated_arg=None):
self.c = c
self.context = context
self.pool = pool
self.c_pers = None
self.s_pers = None
# self.old_pool = old_pool
self.sp_obj_dict = {
'HTTP_COOKIE': "cookie_persistence",
'APP_COOKIE': "cookie_persistence",
'SOURCE_IP': "src_ip_persistence",
}
if pool:
self.name = pool.id
if pool and pool.session_persistence:
self.sp = pool.session_persistence
if self.sp.type == 'HTTP_COOKIE' or self.sp.type == 'APP_COOKIE':
self.c_pers = self.name
elif self.sp.type == 'SOURCE_IP':
self.s_pers = self.name
else:
raise a10_ex.UnsupportedFeature()
else:
self.sp = None
def c_persistence(self):
return self.c_pers
def s_persistence(self):
return self.s_pers
def create(self):
if self.sp is None:
return
sp_type = self.sp.type
if sp_type is not None and sp_type in self.sp_obj_dict:
try:
m = getattr(self.c.client.slb.template, self.sp_obj_dict[sp_type])
m.create(self.name)
except acos_errors.Exists:
pass
def delete(self):
if self.sp is None:
return
sp_type = self.sp.type
if sp_type in self.sp_obj_dict.keys():
try:
m = getattr(self.c.client.slb.template, self.sp_obj_dict[sp_type])
m.delete(self.name)
except acos_errors.NotExists:
pass
| apache-2.0 |
onitake/ansible | lib/ansible/modules/files/patch.py | 33 | 6754 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Luis Alberto Perez Lazaro <luisperlazaro@gmail.com>
# Copyright: (c) 2015, Jakub Jirutka <jakub@jirutka.cz>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: patch
author:
- Jakub Jirutka (@jirutka)
- Luis Alberto Perez Lazaro (@luisperlaz)
version_added: '1.9'
description:
- Apply patch files using the GNU patch tool.
short_description: Apply patch files using the GNU patch tool
options:
basedir:
description:
- Path of a base directory in which the patch file will be applied.
May be omitted when C(dest) option is specified, otherwise required.
dest:
description:
- Path of the file on the remote machine to be patched.
- The names of the files to be patched are usually taken from the patch
file, but if there's just one file to be patched it can specified with
this option.
aliases: [ originalfile ]
src:
description:
- Path of the patch file as accepted by the GNU patch tool. If
C(remote_src) is 'no', the patch source file is looked up from the
module's I(files) directory.
required: true
aliases: [ patchfile ]
state:
version_added: "2.6"
description:
- Whether the patch should be applied or reverted.
choices: [ absent, present ]
default: present
remote_src:
description:
- If C(no), it will search for src at originating/master machine, if C(yes) it will
go to the remote/target machine for the C(src).
type: bool
default: 'no'
strip:
description:
- Number that indicates the smallest prefix containing leading slashes
that will be stripped from each file name found in the patch file.
For more information see the strip parameter of the GNU patch tool.
default: 0
backup:
version_added: "2.0"
description:
- Passes C(--backup --version-control=numbered) to patch,
producing numbered backup copies.
type: bool
default: 'no'
binary:
version_added: "2.0"
description:
- Setting to C(yes) will disable patch's heuristic for transforming CRLF
line endings into LF. Line endings of src and dest must match. If set to
C(no), C(patch) will replace CRLF in C(src) files on POSIX.
type: bool
default: 'no'
notes:
- This module requires GNU I(patch) utility to be installed on the remote host.
'''
EXAMPLES = r'''
- name: Apply patch to one file
patch:
src: /tmp/index.html.patch
dest: /var/www/index.html
- name: Apply patch to multiple files under basedir
patch:
src: /tmp/customize.patch
basedir: /var/www
strip: 1
- name: Revert patch to one file
patch:
src: /tmp/index.html.patch
dest: /var/www/index.html
state: absent
'''
import os
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class PatchError(Exception):
pass
def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, state='present'):
opts = ['--quiet', '--forward', '--dry-run',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if state == 'present':
opts.append('--reverse')
(rc, _, _) = patch_func(opts)
return rc == 0
def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False, state='present'):
opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if dry_run:
opts.append('--dry-run')
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if backup:
opts.append('--backup --version-control=numbered')
if state == 'absent':
opts.append('--reverse')
(rc, out, err) = patch_func(opts)
if rc != 0:
msg = err or out
raise PatchError(msg)
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path', required=True, aliases=['patchfile']),
dest=dict(type='path', aliases=['originalfile']),
basedir=dict(type='path'),
strip=dict(type='int', default=0),
remote_src=dict(type='bool', default=False),
# NB: for 'backup' parameter, semantics is slightly different from standard
# since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~")
backup=dict(type='bool', default=False),
binary=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
required_one_of=[['dest', 'basedir']],
supports_check_mode=True,
)
# Create type object as namespace for module params
p = type('Params', (), module.params)
if not os.access(p.src, os.R_OK):
module.fail_json(msg="src %s doesn't exist or not readable" % (p.src))
if p.dest and not os.access(p.dest, os.W_OK):
module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest))
if p.basedir and not os.path.exists(p.basedir):
module.fail_json(msg="basedir %s doesn't exist" % (p.basedir))
if not p.basedir:
p.basedir = os.path.dirname(p.dest)
patch_bin = module.get_bin_path('patch')
if patch_bin is None:
module.fail_json(msg="patch command not found")
def patch_func(opts):
return module.run_command('%s %s' % (patch_bin, ' '.join(opts)))
# patch need an absolute file name
p.src = os.path.abspath(p.src)
changed = False
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, state=p.state):
try:
apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip,
dry_run=module.check_mode, backup=p.backup, state=p.state)
changed = True
except PatchError as e:
module.fail_json(msg=to_native(e), exception=format_exc())
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 |
benosment/recipes | functional_tests/test_export_recipe.py | 1 | 9194 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from .base import FunctionalTest
import os
import shutil
import requests
class RecipeExportTest(FunctionalTest):
def test_can_export_a_recipe(self):
# Ben goes to the recipe website homepage
self.browser.get(self.server_url)
# He notices the page title mention cookbook
self.assertIn('cookbook', self.browser.title)
# He is invited to enter his name to create his own cookbook or
# view other user's cookbook's
# Ben wants to create his own right now, so he enters his name
# and then clicks the 'get started button'
username_input = self.browser.find_element_by_id('id_username')
username_input.send_keys('Ben')
username_input.send_keys(Keys.ENTER)
# Ben goes to a unique URL which includes his name
ben_url = self.browser.current_url
self.assertRegex(ben_url, '/users/ben.+')
# He is invited to click on a link to add a new recipe
add_recipe_button = self.browser.find_element_by_id('id_add_recipe_button')
self.assertIn('Add recipe', add_recipe_button.text)
# He clicks on the link and new page appears
add_recipe_button.click()
# When he adds a new recipe, he is taken to a new URL
self.assertRegex(self.browser.current_url, '/users/.*/add_recipe')
# He sees a form with a textbox for name, ingredients, directions and servings
# along with a 'cancel' and 'add' button
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('Add Recipe', header_text)
name_textbox = self.browser.find_element_by_id('id_title')
self.assertEqual(name_textbox.get_attribute('placeholder'),
'Enter the title of the recipe')
ingredients_textbox = self.browser.find_element_by_id('id_ingredients')
directions_textbox = self.browser.find_element_by_id('id_directions')
servings_textbox = self.browser.find_element_by_id('id_servings')
add_button = self.browser.find_element_by_id('id_add_button')
# He types in Grilled Halibut with Mango-Avocado Salsa into the textbox for name
name_textbox.send_keys('Grilled Halibut with Mango-Avocado Salsa')
# He types in ingredients:
ingredients_textbox.send_keys('1 medium ripe avocado, peeled and cut into 1/2" dice')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('1 medium ripe mango, peeled and cut into 1/2" dice')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('1 cup cherry tomatoes, quartered')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('4 large fresh basil leaves, thinly sliced')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('3 tablespoons extra-virgin olive oil, divided, plus more for brushing')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('3 tablespoons fresh lime juice, divided')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('Kosher salt and freshly ground black pepper')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('4 6-ounce halibut or mahi-mahi fillets')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('4 lime wedges')
# He then types in the following for directions:
directions_textbox.send_keys('Prepare a grill to medium-high heat. Gently combine the avocado, mango, '
'tomatoes, basil, 1 tablespoon oil, and 1 tablespoon lime juice in a large mixing '
'bowl. Season salsa to taste with salt and pepper and set aside at room '
'temperature, gently tossing occasionally.')
directions_textbox.send_keys(Keys.ENTER)
directions_textbox.send_keys('Place fish fillets in a 13x9x2" glass baking dish. Drizzle remaining 2 '
'tablespoon oil and 2 tablespoon lime juice over. Season fish with salt and '
'pepper. Let marinate at room temperature for 10 minutes, turning fish '
'occasionally.')
directions_textbox.send_keys(Keys.ENTER)
directions_textbox.send_keys('Brush grill rack with oil. Grill fish until just opaque in center, about 5 '
'minutes per side. Transfer to plates. Spoon mango-avocado salsa over fish. '
'Squeeze a lime wedge over each and serve.')
# He then types in the servings
servings_textbox.send_keys('4')
# Finally, he clicks the add button
add_button.click()
# He is returned to the main page
# He sees that the recipe appears in the list of recipes
self.check_for_row_in_list_table('Grilled Halibut with Mango-Avocado Salsa')
# He then goes to add another recipe
add_recipe_button = self.browser.find_element_by_id('id_add_recipe_button')
add_recipe_button.click()
# He then goes to add yet another recipe
# He sees a form with a textbox for name, ingredients, directions and servings
# along with a 'cancel' and 'add' button
name_textbox = self.browser.find_element_by_id('id_title')
add_button = self.browser.find_element_by_id('id_add_button')
ingredients_textbox = self.browser.find_element_by_id('id_ingredients')
directions_textbox = self.browser.find_element_by_id('id_directions')
# He types in Grilled Halibut with Mango-Avocado Salsa into the textbox for name
name_textbox.send_keys('Yogurt-Marinated Grilled Chicken')
ingredients_textbox.send_keys('yogurt')
directions_textbox.send_keys('grill')
add_button.click()
# He sees that both recipes appear in the list of recipes
self.check_for_row_in_list_table('Grilled Halibut with Mango-Avocado Salsa')
self.check_for_row_in_list_table('Yogurt-Marinated Grilled Chicken')
# Ben wants to back up his recipes locally to make sure that all the time
# that he put into curating the recipes does not get lost.
# He sees there is an export button (TODO: no export button when no recipes?)
export_button = self.browser.find_element_by_id('id_export_button')
# He clicks the export button
# don't actually click, but use wget/requests to get the file
export_button_url = export_button.get_attribute('href')
response = requests.get(export_button_url)
# He receives a zip file
zip_content = response.content
with open('/tmp/recipes.zip', 'wb') as f:
f.write(zip_content)
# He unzips the file and sees his two recipes
shutil.unpack_archive('/tmp/recipes.zip', '/tmp')
self.assertEqual(len(os.listdir('/tmp/recipes')), 2)
lines = []
with open('/tmp/recipes/grilled-halibut-with-mango-avocado-salsa') as f:
lines = f.readlines()
# He verifies the content of the recipes
self.assertIn('4 6-ounce halibut or mahi-mahi fillets\n', lines)
os.remove('/tmp/recipes.zip')
shutil.rmtree('/tmp/recipes')
# He then edits the recipe to add Gruyere cheese
recipe_link = self.browser.find_element_by_link_text('Grilled Halibut with Mango-Avocado Salsa')
recipe_link.click()
edit_button = self.browser.find_element_by_id('id_edit_button')
self.assertIn('Edit', edit_button.text)
edit_button.click()
ingredients_textbox = self.browser.find_element_by_id('id_ingredients')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('Gruyère')
save_button = self.browser.find_element_by_id('id_save_button')
save_button.click()
# He then goes back to the list of all recipes
back_button = self.browser.find_element_by_id('id_back_button')
self.assertIn('Back', back_button.text)
back_button.click()
# he exports the recipes again
export_button = self.browser.find_element_by_id('id_export_button')
export_button_url = export_button.get_attribute('href')
response = requests.get(export_button_url)
# He receives a zip file
zip_content = response.content
with open('/tmp/recipes.zip', 'wb') as f:
f.write(zip_content)
# He unzips the file and sees his two recipes
shutil.unpack_archive('/tmp/recipes.zip', '/tmp')
self.assertEqual(len(os.listdir('/tmp/recipes')), 2)
lines = []
with open('/tmp/recipes/grilled-halibut-with-mango-avocado-salsa') as f:
lines = f.readlines()
# He verifies the content of the recipes
self.assertIn('Gruyère\n', lines)
os.remove('/tmp/recipes.zip')
shutil.rmtree('/tmp/recipes')
| mit |
ecesena/oscars2016 | oscars_gae/lib/rsa/cli.py | 79 | 12016 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Commandline scripts.
These scripts are called by the executables defined in setup.py.
'''
from __future__ import with_statement, print_function
import abc
import sys
from optparse import OptionParser
import rsa
import rsa.bigfile
import rsa.pkcs1
HASH_METHODS = sorted(rsa.pkcs1.HASH_METHODS.keys())
def keygen():
'''Key generator.'''
# Parse the CLI options
parser = OptionParser(usage='usage: %prog [options] keysize',
description='Generates a new RSA keypair of "keysize" bits.')
parser.add_option('--pubout', type='string',
help='Output filename for the public key. The public key is '
'not saved if this option is not present. You can use '
'pyrsa-priv2pub to create the public key file later.')
parser.add_option('-o', '--out', type='string',
help='Output filename for the private key. The key is '
'written to stdout if this option is not present.')
parser.add_option('--form',
help='key format of the private and public keys - default PEM',
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != 1:
parser.print_help()
raise SystemExit(1)
try:
keysize = int(cli_args[0])
except ValueError:
parser.print_help()
print('Not a valid number: %s' % cli_args[0], file=sys.stderr)
raise SystemExit(1)
print('Generating %i-bit key' % keysize, file=sys.stderr)
(pub_key, priv_key) = rsa.newkeys(keysize)
# Save public key
if cli.pubout:
print('Writing public key to %s' % cli.pubout, file=sys.stderr)
data = pub_key.save_pkcs1(format=cli.form)
with open(cli.pubout, 'wb') as outfile:
outfile.write(data)
# Save private key
data = priv_key.save_pkcs1(format=cli.form)
if cli.out:
print('Writing private key to %s' % cli.out, file=sys.stderr)
with open(cli.out, 'wb') as outfile:
outfile.write(data)
else:
print('Writing private key to stdout', file=sys.stderr)
sys.stdout.write(data)
class CryptoOperation(object):
'''CLI callable that operates with input, output, and a key.'''
__metaclass__ = abc.ABCMeta
keyname = 'public' # or 'private'
usage = 'usage: %%prog [options] %(keyname)s_key'
description = None
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
input_help = 'Name of the file to %(operation)s. Reads from stdin if ' \
'not specified.'
output_help = 'Name of the file to write the %(operation_past)s file ' \
'to. Written to stdout if this option is not present.'
expected_cli_args = 1
has_output = True
key_class = rsa.PublicKey
def __init__(self):
self.usage = self.usage % self.__class__.__dict__
self.input_help = self.input_help % self.__class__.__dict__
self.output_help = self.output_help % self.__class__.__dict__
@abc.abstractmethod
def perform_operation(self, indata, key, cli_args=None):
'''Performs the program's operation.
Implement in a subclass.
:returns: the data to write to the output.
'''
def __call__(self):
'''Runs the program.'''
(cli, cli_args) = self.parse_cli()
key = self.read_key(cli_args[0], cli.keyform)
indata = self.read_infile(cli.input)
print(self.operation_progressive.title(), file=sys.stderr)
outdata = self.perform_operation(indata, key, cli_args)
if self.has_output:
self.write_outfile(outdata, cli.output)
def parse_cli(self):
'''Parse the CLI options
:returns: (cli_opts, cli_args)
'''
parser = OptionParser(usage=self.usage, description=self.description)
parser.add_option('-i', '--input', type='string', help=self.input_help)
if self.has_output:
parser.add_option('-o', '--output', type='string', help=self.output_help)
parser.add_option('--keyform',
help='Key format of the %s key - default PEM' % self.keyname,
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != self.expected_cli_args:
parser.print_help()
raise SystemExit(1)
return (cli, cli_args)
def read_key(self, filename, keyform):
'''Reads a public or private key.'''
print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr)
with open(filename, 'rb') as keyfile:
keydata = keyfile.read()
return self.key_class.load_pkcs1(keydata, keyform)
def read_infile(self, inname):
'''Read the input file'''
if inname:
print('Reading input from %s' % inname, file=sys.stderr)
with open(inname, 'rb') as infile:
return infile.read()
print('Reading input from stdin', file=sys.stderr)
return sys.stdin.read()
def write_outfile(self, outdata, outname):
'''Write the output file'''
if outname:
print('Writing output to %s' % outname, file=sys.stderr)
with open(outname, 'wb') as outfile:
outfile.write(outdata)
else:
print('Writing output to stdout', file=sys.stderr)
sys.stdout.write(outdata)
class EncryptOperation(CryptoOperation):
'''Encrypts a file.'''
keyname = 'public'
description = ('Encrypts a file. The file must be shorter than the key '
'length in order to be encrypted. For larger files, use the '
'pyrsa-encrypt-bigfile command.')
operation = 'encrypt'
operation_past = 'encrypted'
operation_progressive = 'encrypting'
def perform_operation(self, indata, pub_key, cli_args=None):
'''Encrypts files.'''
return rsa.encrypt(indata, pub_key)
class DecryptOperation(CryptoOperation):
'''Decrypts a file.'''
keyname = 'private'
description = ('Decrypts a file. The original file must be shorter than '
'the key length in order to have been encrypted. For larger '
'files, use the pyrsa-decrypt-bigfile command.')
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
key_class = rsa.PrivateKey
def perform_operation(self, indata, priv_key, cli_args=None):
'''Decrypts files.'''
return rsa.decrypt(indata, priv_key)
class SignOperation(CryptoOperation):
'''Signs a file.'''
keyname = 'private'
usage = 'usage: %%prog [options] private_key hash_method'
description = ('Signs a file, outputs the signature. Choose the hash '
'method from %s' % ', '.join(HASH_METHODS))
operation = 'sign'
operation_past = 'signature'
operation_progressive = 'Signing'
key_class = rsa.PrivateKey
expected_cli_args = 2
output_help = ('Name of the file to write the signature to. Written '
'to stdout if this option is not present.')
def perform_operation(self, indata, priv_key, cli_args):
'''Decrypts files.'''
hash_method = cli_args[1]
if hash_method not in HASH_METHODS:
raise SystemExit('Invalid hash method, choose one of %s' %
', '.join(HASH_METHODS))
return rsa.sign(indata, priv_key, hash_method)
class VerifyOperation(CryptoOperation):
'''Verify a signature.'''
keyname = 'public'
usage = 'usage: %%prog [options] public_key signature_file'
description = ('Verifies a signature, exits with status 0 upon success, '
'prints an error message and exits with status 1 upon error.')
operation = 'verify'
operation_past = 'verified'
operation_progressive = 'Verifying'
key_class = rsa.PublicKey
expected_cli_args = 2
has_output = False
def perform_operation(self, indata, pub_key, cli_args):
'''Decrypts files.'''
signature_file = cli_args[1]
with open(signature_file, 'rb') as sigfile:
signature = sigfile.read()
try:
rsa.verify(indata, signature, pub_key)
except rsa.VerificationError:
raise SystemExit('Verification failed.')
print('Verification OK', file=sys.stderr)
class BigfileOperation(CryptoOperation):
'''CryptoOperation that doesn't read the entire file into memory.'''
def __init__(self):
CryptoOperation.__init__(self)
self.file_objects = []
def __del__(self):
'''Closes any open file handles.'''
for fobj in self.file_objects:
fobj.close()
def __call__(self):
'''Runs the program.'''
(cli, cli_args) = self.parse_cli()
key = self.read_key(cli_args[0], cli.keyform)
# Get the file handles
infile = self.get_infile(cli.input)
outfile = self.get_outfile(cli.output)
# Call the operation
print(self.operation_progressive.title(), file=sys.stderr)
self.perform_operation(infile, outfile, key, cli_args)
def get_infile(self, inname):
'''Returns the input file object'''
if inname:
print('Reading input from %s' % inname, file=sys.stderr)
fobj = open(inname, 'rb')
self.file_objects.append(fobj)
else:
print('Reading input from stdin', file=sys.stderr)
fobj = sys.stdin
return fobj
def get_outfile(self, outname):
'''Returns the output file object'''
if outname:
print('Will write output to %s' % outname, file=sys.stderr)
fobj = open(outname, 'wb')
self.file_objects.append(fobj)
else:
print('Will write output to stdout', file=sys.stderr)
fobj = sys.stdout
return fobj
class EncryptBigfileOperation(BigfileOperation):
'''Encrypts a file to VARBLOCK format.'''
keyname = 'public'
description = ('Encrypts a file to an encrypted VARBLOCK file. The file '
'can be larger than the key length, but the output file is only '
'compatible with Python-RSA.')
operation = 'encrypt'
operation_past = 'encrypted'
operation_progressive = 'encrypting'
def perform_operation(self, infile, outfile, pub_key, cli_args=None):
'''Encrypts files to VARBLOCK.'''
return rsa.bigfile.encrypt_bigfile(infile, outfile, pub_key)
class DecryptBigfileOperation(BigfileOperation):
'''Decrypts a file in VARBLOCK format.'''
keyname = 'private'
description = ('Decrypts an encrypted VARBLOCK file that was encrypted '
'with pyrsa-encrypt-bigfile')
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
key_class = rsa.PrivateKey
def perform_operation(self, infile, outfile, priv_key, cli_args=None):
'''Decrypts a VARBLOCK file.'''
return rsa.bigfile.decrypt_bigfile(infile, outfile, priv_key)
encrypt = EncryptOperation()
decrypt = DecryptOperation()
sign = SignOperation()
verify = VerifyOperation()
encrypt_bigfile = EncryptBigfileOperation()
decrypt_bigfile = DecryptBigfileOperation()
| apache-2.0 |
AltSchool/django | django/contrib/gis/gdal/field.py | 355 | 6739 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
This class wraps an OGR Field, and needs to be instantiated
from a Feature object.
"""
def __init__(self, feat, index):
"""
Initializes on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self, is_64=False):
"Retrieves the Field's value as an integer."
if is_64:
return capi.get_field_as_integer64(self._feat.ptr, self._index)
else:
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException('Unable to retrieve date & time information from the field.')
# #### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
_bit64 = False
@property
def value(self):
"Returns an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int(self._bit64)
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, GDALException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTInteger64(OFTInteger):
_bit64 = True
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
class OFTInteger64List(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
# New 64-bit integer types in GDAL 2
12: OFTInteger64,
13: OFTInteger64List,
}
ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
| bsd-3-clause |
voidcc/POXPOF | pox/openflow/topology.py | 46 | 15169 | # Copyright 2011 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenFlow doesn't know anything about Topology, and Topology doesn't
know anything about OpenFlow. This module knows something about both,
and hooks the two of them together.
Specifically, this module is somewhat like an adapter that listens to
events from other parts of the openflow substem (such as discovery), and
uses them to populate and manipulate Topology.
"""
import itertools
from pox.lib.revent import *
import libopenflow_01 as of
from pox.openflow import *
from pox.core import core
from pox.topology.topology import *
from pox.openflow.discovery import *
from pox.openflow.libopenflow_01 import xid_generator
from pox.openflow.flow_table import FlowTable,FlowTableModification,TableEntry
from pox.lib.util import dpidToStr
from pox.lib.addresses import *
import pickle
import itertools
# After a switch disconnects, it has this many seconds to reconnect in
# order to reactivate the same OpenFlowSwitch object. After this, if
# it reconnects, it will be a new switch object.
RECONNECT_TIMEOUT = 30
log = core.getLogger()
class OpenFlowTopology (object):
"""
Listens to various OpenFlow-specific events and uses those to manipulate
Topology accordingly.
"""
def __init__ (self):
core.listen_to_dependencies(self, ['topology'], short_attrs=True)
def _handle_openflow_discovery_LinkEvent (self, event):
"""
The discovery module simply sends out LLDP packets, and triggers
LinkEvents for discovered switches. It's our job to take these
LinkEvents and update pox.topology.
"""
link = event.link
sw1 = self.topology.getEntityByID(link.dpid1)
sw2 = self.topology.getEntityByID(link.dpid2)
if sw1 is None or sw2 is None: return
if link.port1 not in sw1.ports or link.port2 not in sw2.ports: return
if event.added:
sw1.ports[link.port1].addEntity(sw2, single=True)
sw2.ports[link.port2].addEntity(sw1, single=True)
elif event.removed:
sw1.ports[link.port1].entities.discard(sw2)
sw2.ports[link.port2].entities.discard(sw1)
def _handle_openflow_ConnectionUp (self, event):
sw = self.topology.getEntityByID(event.dpid)
add = False
if sw is None:
sw = OpenFlowSwitch(event.dpid)
add = True
else:
if sw._connection is not None:
log.warn("Switch %s connected, but... it's already connected!" %
(dpidToStr(event.dpid),))
sw._setConnection(event.connection, event.ofp)
log.info("Switch " + dpidToStr(event.dpid) + " connected")
if add:
self.topology.addEntity(sw)
sw.raiseEvent(SwitchJoin, sw)
def _handle_openflow_ConnectionDown (self, event):
sw = self.topology.getEntityByID(event.dpid)
if sw is None:
log.warn("Switch %s disconnected, but... it doesn't exist!" %
(dpidToStr(event.dpid),))
else:
if sw._connection is None:
log.warn("Switch %s disconnected, but... it's wasn't connected!" %
(dpidToStr(event.dpid),))
sw._connection = None
log.info("Switch " + str(event.dpid) + " disconnected")
class OpenFlowPort (Port):
"""
A subclass of topology.Port for OpenFlow switch ports.
Adds the notion of "connected entities", which the default
ofp_phy_port class does not have.
Note: Not presently used.
"""
def __init__ (self, ofp):
# Passed an ofp_phy_port
Port.__init__(self, ofp.port_no, ofp.hw_addr, ofp.name)
self.isController = self.number == of.OFPP_CONTROLLER
self._update(ofp)
self.exists = True
self.entities = set()
def _update (self, ofp):
assert self.name == ofp.name
assert self.number == ofp.port_no
self.hwAddr = EthAddr(ofp.hw_addr)
self._config = ofp.config
self._state = ofp.state
def __contains__ (self, item):
""" True if this port connects to the specified entity """
return item in self.entities
def addEntity (self, entity, single = False):
# Invariant (not currently enforced?):
# len(self.entities) <= 2 ?
if single:
self.entities = set([entity])
else:
self.entities.add(entity)
def to_ofp_phy_port(self):
return of.ofp_phy_port(port_no = self.number, hw_addr = self.hwAddr,
name = self.name, config = self._config,
state = self._state)
def __repr__ (self):
return "<Port #" + str(self.number) + ">"
class OpenFlowSwitch (EventMixin, Switch):
"""
OpenFlowSwitches are Topology entities (inheriting from topology.Switch)
OpenFlowSwitches are persistent; that is, if a switch reconnects, the
Connection field of the original OpenFlowSwitch object will simply be
reset to refer to the new connection.
For now, OpenFlowSwitch is primarily a proxy to its underlying connection
object. Later, we'll possibly add more explicit operations the client can
perform.
Note that for the purposes of the debugger, we can interpose on
a switch entity by enumerating all listeners for the events listed
below, and triggering mock events for those listeners.
"""
_eventMixin_events = set([
SwitchJoin, # Defined in pox.topology
SwitchLeave,
SwitchConnectionUp,
SwitchConnectionDown,
PortStatus, # Defined in libopenflow_01
FlowRemoved,
PacketIn,
BarrierIn,
])
def __init__ (self, dpid):
if not dpid:
raise AssertionError("OpenFlowSwitch should have dpid")
Switch.__init__(self, id=dpid)
EventMixin.__init__(self)
self.dpid = dpid
self.ports = {}
self.flow_table = OFSyncFlowTable(self)
self.capabilities = 0
self._connection = None
self._listeners = []
self._reconnectTimeout = None # Timer for reconnection
self._xid_generator = xid_generator( ((dpid & 0x7FFF) << 16) + 1)
def _setConnection (self, connection, ofp=None):
''' ofp - a FeaturesReply message '''
if self._connection: self._connection.removeListeners(self._listeners)
self._listeners = []
self._connection = connection
if self._reconnectTimeout is not None:
self._reconnectTimeout.cancel()
self._reconnectTimeout = None
if connection is None:
self._reconnectTimeout = Timer(RECONNECT_TIMEOUT,
self._timer_ReconnectTimeout)
if ofp is not None:
# update capabilities
self.capabilities = ofp.capabilities
# update all ports
untouched = set(self.ports.keys())
for p in ofp.ports:
if p.port_no in self.ports:
self.ports[p.port_no]._update(p)
untouched.remove(p.port_no)
else:
self.ports[p.port_no] = OpenFlowPort(p)
for p in untouched:
self.ports[p].exists = False
del self.ports[p]
if connection is not None:
self._listeners = self.listenTo(connection, prefix="con")
self.raiseEvent(SwitchConnectionUp(switch = self,
connection = connection))
else:
self.raiseEvent(SwitchConnectionDown(self))
def _timer_ReconnectTimeout (self):
""" Called if we've been disconnected for RECONNECT_TIMEOUT seconds """
self._reconnectTimeout = None
core.topology.removeEntity(self)
self.raiseEvent(SwitchLeave, self)
def _handle_con_PortStatus (self, event):
p = event.ofp.desc
if event.ofp.reason == of.OFPPR_DELETE:
if p.port_no in self.ports:
self.ports[p.port_no].exists = False
del self.ports[p.port_no]
elif event.ofp.reason == of.OFPPR_MODIFY:
self.ports[p.port_no]._update(p)
else:
assert event.ofp.reason == of.OFPPR_ADD
assert p.port_no not in self.ports
self.ports[p.port_no] = OpenFlowPort(p)
self.raiseEvent(event)
event.halt = False
def _handle_con_ConnectionDown (self, event):
self._setConnection(None)
def _handle_con_PacketIn (self, event):
self.raiseEvent(event)
event.halt = False
def _handle_con_BarrierIn (self, event):
self.raiseEvent(event)
event.halt = False
def _handle_con_FlowRemoved (self, event):
self.raiseEvent(event)
self.flowTable.removeFlow(event)
event.halt = False
def findPortForEntity (self, entity):
for p in self.ports.itervalues():
if entity in p:
return p
return None
@property
def connected(self):
return self._connection != None
def installFlow(self, **kw):
""" install flow in the local table and the associated switch """
self.flow_table.install(TableEntry(**kw))
def serialize (self):
# Skip over non-serializable data, e.g. sockets
serializable = OpenFlowSwitch(self.dpid)
return pickle.dumps(serializable, protocol = 0)
def send(self, *args, **kw):
return self._connection.send(*args, **kw)
def read(self, *args, **kw):
return self._connection.read(*args, **kw)
def __repr__ (self):
return "<%s %s>" % (self.__class__.__name__, dpidToStr(self.dpid))
@property
def name(self):
return repr(self)
class OFSyncFlowTable (EventMixin):
_eventMixin_events = set([FlowTableModification])
"""
A flow table that keeps in sync with a switch
"""
ADD = of.OFPFC_ADD
REMOVE = of.OFPFC_DELETE
REMOVE_STRICT = of.OFPFC_DELETE_STRICT
TIME_OUT = 2
def __init__ (self, switch=None, **kw):
EventMixin.__init__(self)
self.flow_table = FlowTable()
self.switch = switch
# a list of pending flow table entries : tuples (ADD|REMOVE, entry)
self._pending = []
# a map of pending barriers barrier_xid-> ([entry1,entry2])
self._pending_barrier_to_ops = {}
# a map of pending barriers per request entry -> (barrier_xid, time)
self._pending_op_to_barrier = {}
self.listenTo(switch)
def install (self, entries=[]):
"""
asynchronously install entries in the flow table
will raise a FlowTableModification event when the change has been
processed by the switch
"""
self._mod(entries, OFSyncFlowTable.ADD)
def remove_with_wildcards (self, entries=[]):
"""
asynchronously remove entries in the flow table
will raise a FlowTableModification event when the change has been
processed by the switch
"""
self._mod(entries, OFSyncFlowTable.REMOVE)
def remove_strict (self, entries=[]):
"""
asynchronously remove entries in the flow table.
will raise a FlowTableModification event when the change has been
processed by the switch
"""
self._mod(entries, OFSyncFlowTable.REMOVE_STRICT)
@property
def entries (self):
return self.flow_table.entries
@property
def num_pending (self):
return len(self._pending)
def __len__ (self):
return len(self.flow_table)
def _mod (self, entries, command):
if isinstance(entries, TableEntry):
entries = [ entries ]
for entry in entries:
if(command == OFSyncFlowTable.REMOVE):
self._pending = [(cmd,pentry) for cmd,pentry in self._pending
if not (cmd == OFSyncFlowTable.ADD
and entry.matches_with_wildcards(pentry))]
elif(command == OFSyncFlowTable.REMOVE_STRICT):
self._pending = [(cmd,pentry) for cmd,pentry in self._pending
if not (cmd == OFSyncFlowTable.ADD
and entry == pentry)]
self._pending.append( (command, entry) )
self._sync_pending()
def _sync_pending (self, clear=False):
if not self.switch.connected:
return False
# resync the switch
if clear:
self._pending_barrier_to_ops = {}
self._pending_op_to_barrier = {}
self._pending = filter(lambda(op): op[0] == OFSyncFlowTable.ADD,
self._pending)
self.switch.send(of.ofp_flow_mod(command=of.OFPFC_DELETE,
match=of.ofp_match()))
self.switch.send(of.ofp_barrier_request())
todo = map(lambda(e): (OFSyncFlowTable.ADD, e),
self.flow_table.entries) + self._pending
else:
todo = [op for op in self._pending
if op not in self._pending_op_to_barrier
or (self._pending_op_to_barrier[op][1]
+ OFSyncFlowTable.TIME_OUT) < time.time() ]
for op in todo:
fmod_xid = self.switch._xid_generator()
flow_mod = op[1].to_flow_mod(xid=fmod_xid, command=op[0],
flags=op[1].flags | of.OFPFF_SEND_FLOW_REM)
self.switch.send(flow_mod)
barrier_xid = self.switch._xid_generator()
self.switch.send(of.ofp_barrier_request(xid=barrier_xid))
now = time.time()
self._pending_barrier_to_ops[barrier_xid] = todo
for op in todo:
self._pending_op_to_barrier[op] = (barrier_xid, now)
def _handle_SwitchConnectionUp (self, event):
# sync all_flows
self._sync_pending(clear=True)
def _handle_SwitchConnectionDown (self, event):
# connection down. too bad for our unconfirmed entries
self._pending_barrier_to_ops = {}
self._pending_op_to_barrier = {}
def _handle_BarrierIn (self, barrier):
# yeah. barrier in. time to sync some of these flows
if barrier.xid in self._pending_barrier_to_ops:
added = []
removed = []
#print "barrier in: pending for barrier: %d: %s" % (barrier.xid,
# self._pending_barrier_to_ops[barrier.xid])
for op in self._pending_barrier_to_ops[barrier.xid]:
(command, entry) = op
if(command == OFSyncFlowTable.ADD):
self.flow_table.add_entry(entry)
added.append(entry)
else:
removed.extend(self.flow_table.remove_matching_entries(entry.match,
entry.priority, strict=command == OFSyncFlowTable.REMOVE_STRICT))
#print "op: %s, pending: %s" % (op, self._pending)
if op in self._pending: self._pending.remove(op)
self._pending_op_to_barrier.pop(op, None)
del self._pending_barrier_to_ops[barrier.xid]
self.raiseEvent(FlowTableModification(added = added, removed=removed))
return EventHalt
else:
return EventContinue
def _handle_FlowRemoved (self, event):
"""
process a flow removed event -- remove the matching flow from the table.
"""
flow_removed = event.ofp
for entry in self.flow_table.entries:
if (flow_removed.match == entry.match
and flow_removed.priority == entry.priority):
self.flow_table.remove_entry(entry)
self.raiseEvent(FlowTableModification(removed=[entry]))
return EventHalt
return EventContinue
def launch ():
if not core.hasComponent("openflow_topology"):
core.register("openflow_topology", OpenFlowTopology())
| apache-2.0 |
MpApQ/kernel_huawei | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
parlar/calls2xls | external/CrossMap/usr/lib64/python2.7/site-packages/bx/align/lav.py | 5 | 18238 | """
Support for reading and writing the LAV format produced by the `blastz`_
pairwise aligner.
.. _blastz: http://www.bx.psu.edu/miller_lab/
"""
from bx.align import *
import bx.seq
import sys,math,StringIO
import itertools
from bx import interval_index_file
class Reader(object):
"""Iterate over all lav blocks in a file in order"""
def __init__(self,file,path_subs=None,fail_to_ns=False):
self.file = file
self.lineNumber = 0
self.path_subs = path_subs # list of (prefix,replacement) to allow
if (self.path_subs == None): # .. redirection of sequence file paths
self.path_subs = [] # .. on different machines
self.fail_to_ns = fail_to_ns # True => if sequences fail to open,
# create a fake file of all Ns
self.d_stanza_text = None
self.seq1_filename = None
self.seq1_file = None
self.seq1_header = None
self.seq1_start = None
self.seq1_end = None
self.seq1_strand = None
self.seq1_contig = None
self.seq1_src = None
self.seq1_gap = None
self.seq2_filename = None
self.seq2_file = None
self.seq2_header = None
self.seq2_start = None
self.seq2_end = None
self.seq2_strand = None
self.seq2_contig = None
self.seq2_src = None
self.seq2_gap = None
def next(self):
while (True):
line = self.fetch_line(strip=None,requireLine=False)
assert (line), "unexpected end of file (missing #:eof)"
line = line.rstrip()
if (line == ""): # (allow blank lines between stanzas)
continue
if (line == "#:eof"):
line = self.file.readline().rstrip()
assert (not line), "extra line after #:eof (line %d, \"%s\")" \
% (self.lineNumber,line)
return None
if (line == "#:lav"):
continue
if (line.startswith("d {")):
self.d_stanza_text = self.parse_unknown_stanza()
continue
if (line.startswith("s {")):
self.parse_s_stanza()
continue
if (line.startswith("h {")):
self.parse_h_stanza()
continue
if (line.startswith("a {")):
(score,pieces) = self.parse_a_stanza()
break
if (line.endswith("{")):
self.parse_unknown_stanza()
continue
assert (False), "incomprehensible line (line %d, \"%s\")" \
% (self.lineNumber,line)
return self.build_alignment(score,pieces)
def __iter__(self):
return ReaderIter(self)
def close(self):
self.file.close()
def open_seqs(self):
if (self.seq1_file != None) and (self.seq2_file != None):
return
if (self.seq1_file == None):
if (self.seq1_strand == "+"): revcomp = False
else: revcomp = "-5'"
if (self.seq1_contig == 1): contig = None
else: contig = self.seq1_contig
try:
f = file(self.seq1_filename,"rb")
except:
if (self.fail_to_ns):
f = StringIO.StringIO(">seq1\n" + ("n" * (self.seq1_end - self.seq1_start)))
revcomp = False
contig = 1
else:
assert (False), "failed to open %s" % self.seq1_filename
self.seq1_file = bx.seq.seq_file(f,revcomp=revcomp,contig=contig)
self.seq1_gap = self.seq1_file.gap
try:
name1 = self.header_to_src_name(self.seq1_header)
except ValueError:
try:
name1 = self.path_to_src_name(self.seq1_filename)
except ValueError:
name1 = "seq1"
(species1,chrom1) = src_split(name1)
self.seq1_src = src_merge(species1,chrom1,contig)
if (contig != None): chrom1 += "[%s]" % contig
if (self.seq2_file == None):
if (self.seq2_strand == "+"): revcomp = False
else: revcomp = "-5'"
if (self.seq2_contig == 1): contig = None
else: contig = self.seq2_contig
try:
f = file(self.seq2_filename,"rb")
except:
if (self.fail_to_ns):
f = StringIO.StringIO(">seq2\n" + ("n" * (self.seq2_end - self.seq2_start)))
revcomp = False
contig = 1
else:
assert (False), "failed to open %s" % self.seq1_filename
self.seq2_file = bx.seq.seq_file(f,revcomp=revcomp,contig=contig)
self.seq2_gap = self.seq2_file.gap
try:
name2 = self.header_to_src_name(self.seq2_header)
except ValueError:
try:
name2 = self.path_to_src_name(self.seq2_filename)
except ValueError:
name2 = "seq2"
(species2,chrom2) = src_split(name2)
self.seq2_src = src_merge(species2,chrom2,contig)
if (contig != None): chrom2 += "[%s]" % contig
length1 = self.seq1_file.length
length2 = self.seq2_file.length
assert (species1 != species2) or (chrom1 != chrom2) or (length1 == length2), \
"conflicting lengths for %s (%d and %d)" % (self.seq1_src,length1,length2)
self.species_to_lengths = {}
self.species_to_lengths[species1] = {}
self.species_to_lengths[species2] = {} # (OK if it clobbers line above)
self.species_to_lengths[species1][chrom1] = self.seq1_file.length
self.species_to_lengths[species2][chrom2] = self.seq2_file.length
def close_seqs(self):
if (self.seq1_file != None):
self.seq1_file.close()
self.seq1_file = None
if (self.seq2_file != None):
self.seq2_file.close()
self.seq2_file = None
def parse_s_stanza(self):
self.close_seqs()
line = self.fetch_line(report=" in s-stanza")
(self.seq1_filename,
self.seq1_start,
self.seq1_end,
self.seq1_strand,
self.seq1_contig) = self.parse_s_seq(line)
line = self.fetch_line(report=" in s-stanza")
(self.seq2_filename,
self.seq2_start,
self.seq2_end,
self.seq2_strand,
self.seq2_contig) = self.parse_s_seq(line)
line = self.fetch_line(report=" in s-stanza")
assert (line == "}"), "improper s-stanza terminator (line %d, \"%s\")" \
% (self.lineNumber,line)
def parse_s_seq(self,line):
fields = line.split()
filename = fields[0].strip('"')
start = int(fields[1]) - 1
end = int(fields[2])
contig = int(fields[4])
if (fields[3] == "1"): strand = "-"
else: strand = "+"
if (filename.endswith("-")):
assert (strand == "-"), "strand mismatch in \"%s\"" % line
filename = filename[:-1]
filename = do_path_subs(filename,self.path_subs)
return (filename,start,end,strand,contig)
def parse_h_stanza(self):
line = self.fetch_line(strip='"',report=" in h-stanza")
self.seq1_header = line
self.seq1_header_prefix = ""
if (line.startswith(">")):
self.seq1_header = line[1:].strip()
self.seq1_header_prefix = ">"
self.seq1_header = self.seq1_header.split(None,1)
if (len(self.seq1_header) > 0): self.seq1_header = self.seq1_header[0]
else: self.seq1_header = "seq1"
line = self.fetch_line(strip='"',report=" in h-stanza")
self.seq2_header = line
self.seq2_header_prefix = ""
if (line.startswith(">")):
self.seq2_header = line[1:].strip()
self.seq2_header_prefix = ">"
self.seq2_header = self.seq2_header.split(None,1)
if (len(self.seq2_header) > 0): self.seq2_header = self.seq2_header[0]
else: self.seq2_header = "seq2"
line = self.fetch_line(report=" in h-stanza")
assert (line == "}"), "improper h-stanza terminator (line %d, \"%s\")" \
% (self.lineNumber,line)
def parse_a_stanza(self):
"""returns the pair (score,pieces)
where pieces is a list of ungapped segments (start1,start2,length,pctId)
with start1,start2 origin-0"""
# 's' line -- score, 1 field
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
assert (fields[0] == "s"), "s line expected in a-stanza (line %d, \"%s\")" \
% (self.lineNumber,line)
try: score = int(fields[1])
except: score = float(fields[1])
# 'b' line -- begin positions in seqs, 2 fields
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
assert (fields[0] == "b"), "b line expected in a-stanza (line %d, \"%s\")" \
% (self.lineNumber,line)
beg1 = int(fields[1]) - 1
beg2 = int(fields[2]) - 1
# 'e' line -- end positions in seqs, 2 fields
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
assert (fields[0] == "e"), "e line expected in a-stanza (line %d, \"%s\")" \
% (self.lineNumber,line)
len1 = int(fields[1]) - beg1
len2 = int(fields[2]) - beg2
# 'l' lines
pieces = []
while (True):
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
if (fields[0] != "l"):
break
start1 = int(fields[1]) - 1
start2 = int(fields[2]) - 1
length = int(fields[3]) - start1
length2 = int(fields[4]) - start2
try: pctId = int(fields[5])
except: pctId = float(fields[5])
assert (length2 == length), "length mismatch in a-stanza"
pieces.append((start1+self.seq1_start,start2+self.seq2_start,length,pctId))
assert (line == "}"), "improper a-stanza terminator (line %d, \"%s\")" \
% (self.lineNumber,line)
return (score,pieces)
def parse_unknown_stanza(self):
lines = []
while (True):
line = self.fetch_line()
assert (line), "unexpected end of file (missing #:eof)"
if (line == "}"): break
lines.append(line)
return " " + "\n ".join(lines) + "\n"
def fetch_line(self,strip=True,requireLine=True,report=""):
if (strip == None): line = self.file.readline()
elif (strip == True): line = self.file.readline().strip()
else: line = self.file.readline().strip().strip(strip)
self.lineNumber += 1
if (requireLine):
assert (line), \
"unexpected blank line or end of file%s (line %d)" \
% (report,self.lineNumber)
return line
def d_stanza(self):
if (self.d_stanza_text == None): return ""
return "d {\n%s}" % self.d_stanza_text
def s_stanza(self):
if (self.seq1_filename == None): return ""
if (self.seq1_strand == "-"): seq1_strand = "1"
else: seq1_strand = "0"
if (self.seq2_strand == "-"): seq2_strand = "1"
else: seq2_strand = "0"
s = " \"%s\" %d %d %s %d\n"\
% (self.seq1_filename,self.seq2_start+1,self.seq1_end,
seq1_strand,self.seq1_contig)
s += " \"%s\" %d %d %s %d\n"\
% (self.seq2_filename,self.seq2_start+1,self.seq2_end,
seq2_strand,self.seq2_contig)
return "s {\n%s}" % s
def h_stanza(self):
if (self.seq1_header == None): return ""
s = " \"%s%s\"\n" % (self.seq1_header_prefix,self.seq1_header)
s += " \"%s%s\"\n" % (self.seq2_header_prefix,self.seq2_header)
return "h {\n%s}" % s
def build_alignment(self,score,pieces):
"""converts a score and pieces to an alignment"""
# build text
self.open_seqs()
text1 = text2 = ""
end1 = end2 = None
for (start1,start2,length,pctId) in pieces:
if (end1 != None):
if (start1 == end1): # insertion in sequence 2
text1 += self.seq1_gap * (start2-end2)
text2 += self.seq2_file.get(end2,start2-end2)
else: # insertion in sequence 1
text1 += self.seq1_file.get(end1,start1-end1)
text2 += self.seq2_gap * (start1-end1)
text1 += self.seq1_file.get(start1,length)
text2 += self.seq2_file.get(start2,length)
end1 = start1 + length
end2 = start2 + length
# create alignment
start1 = pieces[0][0]
start2 = pieces[0][1]
end1 = pieces[-1][0] + pieces[-1][2]
end2 = pieces[-1][1] + pieces[-1][2]
size1 = end1 - start1
size2 = end2 - start2
a = Alignment(score=score,species_to_lengths=self.species_to_lengths)
#if (self.seq1_strand == "-"): start1 = self.seq1_file.length - end1
a.add_component(Component(self.seq1_src,start1,size1,self.seq1_strand,text=text1))
#if (self.seq2_strand == "-"): start2 = self.seq2_file.length - end2
a.add_component(Component(self.seq2_src,start2,size2,self.seq2_strand,text=text2))
return a
def path_to_src_name(self,path_name):
# converts, e.g. ".../hg18/seq/chr13.nib" to "hg18.chr13"
if (path_name == None) or (path_name == ""): raise ValueError
if (path_name.endswith(".nib")): path_name = path_name[:-4]
if (path_name.endswith(".fa")): path_name = path_name[:-3]
if (path_name.endswith(".fasta")): path_name = path_name[:-6]
slash = path_name.rfind("/")
if (slash == -1): return path_name
name = path_name[slash+1:]
path_name = path_name[:slash]
if (path_name.endswith("/seq")): path_name = path_name[:-4]
slash = path_name.rfind("/")
if (slash != -1): path_name = path_name[slash+1:]
return path_name + "." + name
def header_to_src_name(self,header):
# converts, e.g. "hg18.chr13:115404472-117281897" to "hg18.chr13"
if (header == None) or (header == ""): raise ValueError
colon = header.rfind(":")
if (colon != -1): header = header[:colon]
if ("/" in header): raise ValueError
if (header.count(".") == 0):
return header
header = header.split(".")
if (header[0] == "") or (header[1] == ""): raise ValueError
return ".".join(header)
class ReaderIter(object):
def __init__(self,reader):
self.reader = reader
def __iter__(self):
return self
def next(self):
v = self.reader.next()
if (not v): raise StopIteration
return v
class LavAsPiecesReader(Reader):
"""Iterate over all lav blocks in a file in order, returning alignments
as score and pieces, as returned by Reader.parse_a_stanza"""
def build_alignment(self,score,pieces):
return (score,pieces)
class Writer(object):
# blockHash is a hash from (src1,strand1,src2,strand2) to a list of blocks;
# the blocks are collected on each call to write(), but the actual writing
# does not occur until close().
def __init__(self,file,attributes={}):
self.file = file
self.fname1 = None
self.fname2 = None
self.block = 0
self.blockHash = {} # (see note above)
if ("name_format_1" in attributes):
self.fname1 = attributes["name_format_1"]
if ("name_format_2" in attributes):
self.fname2 = attributes["name_format_2"]
if ("d_stanza" in attributes):
write_lav_marker(self)
print >>self.file,"d {"
print >>self.file,attributes["d_stanza"]
print >>self.file,"}"
def write(self,alignment):
if (len(alignment.components) != 2):
raise "%d-component alignment is not compatible with lav" % \
len(alignment.components)
c1 = alignment.components[0]
c2 = alignment.components[1]
key = (c1.src,c1.strand,c2.src,c2.strand)
if (key not in self.blockHash): self.blockHash[key] = []
self.blockHash[key].append(alignment)
self.block += 1
def close(self):
keys = [key for key in self.blockHash]
keys = sort_keys_by_chrom (keys)
for key in keys:
(src1,strand1,src2,strand2) = key
alignment = self.blockHash[key][0]
self.src1 = src1
self.strand1 = strand1
self.length1 = alignment.src_size(src1)
self.src2 = src2
self.strand2 = strand2
self.length2 = alignment.src_size(src2)
self.write_s_stanza()
self.write_h_stanza()
for alignment in self.blockHash[key]:
self.write_a_stanza(alignment)
self.write_trailer()
if (self.file != sys.stdout): self.file.close()
def write_s_stanza(self):
self.write_lav_marker()
(strand1,flag1) = minus_or_nothing(self.strand1)
(strand2,flag2) = minus_or_nothing(self.strand2)
fname1 = build_filename(self.fname1,self.src1)
fname2 = build_filename(self.fname2,self.src2)
print >>self.file,"s {"
print >>self.file," \"%s%s\" 1 %d %d 1" \
% (fname1,strand1,self.length1,flag1)
print >>self.file," \"%s%s\" 1 %d %d 1" \
% (fname2,strand2,self.length2,flag2)
print >>self.file,"}"
def write_h_stanza(self):
strand1 = rc_or_nothing(self.strand1)
strand2 = rc_or_nothing(self.strand2)
print >>self.file,"h {"
print >>self.file," \"> %s%s\"" % (self.src1,strand1)
print >>self.file," \"> %s%s\"" % (self.src2,strand2)
print >>self.file,"}"
def write_a_stanza(self,alignment):
c1 = alignment.components[0]
pos1 = c1.start
text1 = c1.text.upper()
c2 = alignment.components[1]
pos2 = c2.start
text2 = c2.text.upper()
# collect ungapped pieces
pieces = []
piece1 = None
for ix in range(len(text1)):
ch1 = text1[ix]
ch2 = text2[ix]
nonGap = (ch1 != "-") and (ch2 != "-")
if (nonGap):
if (piece1 == None): # new piece starts
(piece1,piece2,idCount) = (pos1,pos2,0)
if (ch1 == ch2): idCount += 1
elif (piece1 != None): # new gap starts
size = pos1 - piece1
pctId = (200*idCount + size) / (2*size)
pieces.append((piece1,piece2,size,pctId))
piece1 = None
if (ch1 != "-"): pos1 += 1
if (ch2 != "-"): pos2 += 1
if (piece1 != None):
size = pos1 - piece1
pctId = (200*idCount + size) / (2*size)
pieces.append((piece1,piece2,size,pctId))
# write the block
(start1,start2,size,pctId) = pieces[-1] # get end of final piece
end1 = start1 + size
end2 = start2 + size
(start1,start2,size,pctId) = pieces[0] # get start of first piece
score = int(round(alignment.score))
print >>self.file,"a {"
print >>self.file," s %s" % score
print >>self.file," b %d %d" % (start1+1,start2+1)
print >>self.file," e %d %d" % (end1, end2)
for (start1,start2,size,pctId) in pieces:
print >>self.file," l %d %d %d %d %d" \
% (start1+1,start2+1,start1+size,start2+size,pctId)
print >>self.file,"}"
def write_lav_marker(self):
print >>self.file,"#:lav"
def write_trailer(self):
print >>self.file,"#:eof"
def sort_keys_by_chrom (keys):
decorated = [(chrom_key(src1),strand1,chrom_key(src2),strand2,(src1,strand1,src2,strand2)) \
for (src1,strand1,src2,strand2) in keys]
decorated.sort()
return [key for (src1,strand1,src2,strand2,key) in decorated]
def chrom_key (src):
(species,chrom) = src_split(src)
if (chrom.startswith("chr")): chrom = chrom[3:]
try: chrom = int(chrom)
except ValueError: pass
return chrom
def build_filename(fmt,src):
if (fmt == None): return src
num = fmt.count("%s")
if (num == 0): return fmt
(species,chrom) = src_split(src)
if (num == 1): return fmt % chrom
return fmt % (species,chrom)
def minus_or_nothing(strand):
if (strand == "-"): return ("-",1)
else: return ("",0)
def rc_or_nothing(strand):
if (strand == "-"): return " (reverse complement)"
else: return ""
def do_path_subs(path,path_subs):
for (prefix,replacement) in path_subs:
if (path.startswith(prefix)):
return replacement + path[len(prefix):]
return path
| mit |
rajrohith/blobstore | azure/storage/_common_conversion.py | 1 | 3663 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import hashlib
import hmac
import sys
from dateutil.tz import tzutc
from io import (IOBase, SEEK_SET)
from ._error import (
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM,
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM,
)
from .models import (
_unicode_type,
)
if sys.version_info < (3,):
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
def _to_str(value):
return _str(value) if value is not None else None
def _int_to_str(value):
return str(int(value)) if value is not None else None
def _bool_to_str(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _to_utc_datetime(value):
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
def _datetime_to_utc_string(value):
# Azure expects the date value passed in to be UTC.
# Azure will always return values as UTC.
# If a date is passed in without timezone info, it is assumed to be UTC.
if value is None:
return None
if value.tzinfo:
value = value.astimezone(tzutc())
return value.strftime('%a, %d %b %Y %H:%M:%S GMT')
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = _decode_base64_to_bytes(key)
else:
if isinstance(key, _unicode_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
def _get_content_md5(data):
md5 = hashlib.md5()
if isinstance(data, bytes):
md5.update(data)
elif hasattr(data, 'read'):
pos = 0
try:
pos = data.tell()
except:
pass
for chunk in iter(lambda: data.read(4096), b""):
md5.update(chunk)
try:
data.seek(pos, SEEK_SET)
except (AttributeError, IOError):
raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data'))
else:
raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data'))
return base64.b64encode(md5.digest()).decode('utf-8')
def _lower(text):
return text.lower()
| apache-2.0 |
bolkedebruin/airflow | airflow/providers/amazon/aws/sensors/s3_prefix.py | 2 | 3412 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class S3PrefixSensor(BaseSensorOperator):
"""
Waits for a prefix to exist. A prefix is the first part of a key,
thus enabling checking of constructs similar to glob airfl* or
SQL LIKE 'airfl%'. There is the possibility to precise a delimiter to
indicate the hierarchy or keys, meaning that the match will stop at that
delimiter. Current code accepts sane delimiters, i.e. characters that
are NOT special characters in the Python regex engine.
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param prefix: The prefix being waited on. Relative path from bucket root level.
:type prefix: str
:param delimiter: The delimiter intended to show hierarchy.
Defaults to '/'.
:type delimiter: str
:param aws_conn_id: a reference to the s3 connection
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
"""
template_fields = ('prefix', 'bucket_name')
@apply_defaults
def __init__(self,
bucket_name,
prefix,
delimiter='/',
aws_conn_id='aws_default',
verify=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
# Parse
self.bucket_name = bucket_name
self.prefix = prefix
self.delimiter = delimiter
self.full_url = "s3://" + bucket_name + '/' + prefix
self.aws_conn_id = aws_conn_id
self.verify = verify
def poke(self, context):
self.log.info('Poking for prefix : %s in bucket s3://%s', self.prefix, self.bucket_name)
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
return hook.check_for_prefix(
prefix=self.prefix,
delimiter=self.delimiter,
bucket_name=self.bucket_name)
| apache-2.0 |
johnazariah/trieste | shipyard_api.py | 1 | 5244 | # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import azure.common
import azure.storage.blob as azure_storage_blob
import azure.storage.file as azure_storage_file
import azure.storage.queue as azure_storage_queue
import azure.storage.table as azure_storage_table
import azure.batch.batch_auth as batch_auth
import azure.batch.batch_service_client as batch_service_client
import sys
sys.path.append('batch_shipyard')
import batch_shipyard.convoy.fleet as convoy_fleet # noqa
class ShipyardApi:
def _get_batch_client(self, config_batch_credentials):
batch_credentials = batch_auth.SharedKeyCredentials(
config_batch_credentials['account'],
config_batch_credentials['account_key'])
return batch_service_client.BatchServiceClient(
batch_credentials,
base_url=config_batch_credentials['account_service_url'])
def __init__(self, config):
self.config = config
self.config["_auto_confirm"] = False
self.batch_client = self._get_batch_client(
config['credentials']['batch'])
self.batch_client.config.add_user_agent(
'batch-shipyard/{}'.format('2.0.0rc2'))
config_storage_credentials = \
config['credentials']['storage']['__storage_account_name__']
parameters = {
'account_name': config_storage_credentials['account'],
'account_key': config_storage_credentials['account_key'],
'endpoint_suffix': config_storage_credentials['endpoint']
}
self.blob_client = azure_storage_blob.BlockBlobService(**parameters)
self.queue_client = azure_storage_queue.QueueService(**parameters)
self.table_client = azure_storage_table.TableService(**parameters)
class ClusterApi(ShipyardApi):
def __init__(self, shipyard_config):
super(ClusterApi, self).__init__(shipyard_config)
def create_cluster(self, id, vm_count):
if id is not None:
self.config["pool_specification"]["id"] = id
self.config["pool_specification"]["id"] = id
self.config["pool_specification"]["vm_count"] = vm_count
convoy_fleet.populate_global_settings(self.config, True)
convoy_fleet.adjust_general_settings(self.config)
convoy_fleet.action_pool_add(
self.batch_client,
self.blob_client,
self.queue_client,
self.table_client,
self.config)
def list_clusters(self):
return self.batch_client.pool.list()
def delete_cluster(self, id):
self.batch_client.pool.delete(id or self.config["pool_specification"]["id"])
class JobApi(ShipyardApi):
def __init__(self, shipyard_config):
super(JobApi, self).__init__(shipyard_config)
def submit_job(self, id, cluster_id, recreate):
if id is not None:
self.config["job_specifications"][0]["id"] = id
if cluster_id is not None:
self.config["pool_specification"]["id"] = cluster_id
convoy_fleet.populate_global_settings(self.config, False)
convoy_fleet.adjust_general_settings(self.config)
convoy_fleet.action_jobs_add(
self.batch_client,
self.blob_client,
self.config,
recreate)
def list_jobs(self):
return self.batch_client.job.list()
def list_tasks_for_job(self, job_id):
return self.batch_client.task.list(job_id or self.config["job_specifications"]["id"])
def stream_file(self, id, task_id, cluster_id):
if id is not None:
self.config["job_specifications"][0]["id"] = id
if cluster_id is not None:
self.config["pool_specification"]["id"] = cluster_id
convoy_fleet.populate_global_settings(self.config, False)
convoy_fleet.adjust_general_settings(self.config)
convoy_fleet.action_data_stream(
self.batch_client,
self.config,
"{},{},stderr.txt".format(id, task_id),
True)
def delete_job(self, id):
self.batch_client.job.delete(id or self.config["job_specifications"]["id"])
pass
| mit |
cnworks/moving-wall | handDepth.py | 1 | 5659 | import cv2
import numpy as np
import serial
import math
def writeSerial(pattern):
#ser = serial.Serial('/dev/ttyACM0',9600)
write = ""
for i in pattern:
write += str(i)+","
write=write[:-1]
write+="\n"
print(write)
#ser.write(write)
def findHandROI(frame1):
#convert to hsv for better work with color
converted = cv2.cvtColor(frame1, cv2.COLOR_BGR2HSV)
#bounds depend heavily on lighting situation
#lower and upper bound for skin color
lower = np.array([0, 10, 60], dtype = "uint8")
#lower = np.array([0, 10, 60], dtype = "uint8")
#upper = np.array([20, 150, 255], dtype = "uint8")
upper = np.array([10, 150, 255], dtype = "uint8")
skinMask = cv2.inRange(converted, lower, upper)
# blur the mask to help remove noise, then apply the
# mask to the frame
skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
skin = cv2.bitwise_and(frame1, frame1, mask = skinMask)
#description of haar cascades of hands
hand_cascade = cv2.CascadeClassifier('cascades/hand2.xml')
#detect hands with created cascade identifier from file
hands = hand_cascade.detectMultiScale(skin, 1.3, 5)
return hands
def findDepth(frame1, frame2, x, y, w, h):
#find features and their descriptors in both images
kp1, des1 = orb.detectAndCompute(frame1,None)
kp2, des2 = orb.detectAndCompute(frame2,None)
FLANN_INDEX_LSH = 6
#initialize params for matching
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12 #6
key_size = 12, # 20 #12
multi_probe_level = 1) #2 #1
#optionaly change cicle count for better precision
search_params = dict(checks=100)
#initialize matcher with created parameters
flann = cv2.FlannBasedMatcher(index_params,search_params)
#search for matches using the descriptors of both images
matches = flann.knnMatch(des1,des2,k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in xrange(len(matches))]
pts1 = []
pts2 = []
distance = []
# ratio test as per Lowe's paper and check if point is in roi
for i,(m_n) in enumerate(matches):
if len(m_n) != 2:
continue
m, n = m_n
if m.distance < 0.7*n.distance:
#calculate angles of both cameras
x1,y1 = kp1[m.queryIdx].pt
x2,y2 = kp2[m.trainIdx].pt
if x1 > x and x1 < x+w and y1 > y and y1 < y+h:
#angle of sight / image width * coordinates
angle1 = 50.0 / width1 * x1 + 65
angle2 = 50.0 / width2 * (width2 -x2) + 65
if angle1 + angle2 < 180:
angle1 = math.radians(angle1)
angle2 = math.radians(angle2)
#calculate distance with distance between cameras of 6cm and using triangulation
distance.append(6 * ((math.sin(angle1)*math.sin(angle2))/math.sin(angle1+angle2)))
matchesMask[i]=[1,0]
#for every match save coordinates in both images
pts1.append(kp1[m.queryIdx].pt)
pts2.append(kp2[m.trainIdx].pt)
draw_params = dict(matchColor = (0,255,0),
singlePointColor = (255,0,0),
matchesMask = matchesMask,
flags = 0)
imgMatchs = cv2.drawMatchesKnn(frame1,kp1,frame2,kp2,matches,None,**draw_params)
cv2.imshow('matches',imgMatchs)
return np.average(distance)
############# PROGRAMM START ##############
# initialize camera and capture session
vc1 = cv2.VideoCapture(2)
vc2 = cv2.VideoCapture(1)
#initialize feature detector with orb
orb = cv2.ORB_create()
# if session is open read first frame or set vc status to False
if vc1.isOpened():
vcstat, img1 = vc1.read()
else:
vcstat = False
while vcstat:
vcstat, img1 = vc1.read()
_, img2 = vc2.read()
#get resolutions of images
height1, width1,_ = img1.shape
height2, width2,_ = img2.shape
frame1 = cv2.resize(img1, (int(0.5 * width1), int(0.5 * height1)), interpolation = cv2.INTER_AREA)
frame2 = cv2.resize(img2, (int(0.5 * width2), int(0.5 * height2)), interpolation = cv2.INTER_AREA)
hands = findHandROI(frame1)
blockw = width1 / 4
blockh = height1 / 4
cubeData = [0] * 16
for (x,y,w,h) in hands:
hand = cv2.rectangle(frame1, (x,y),(x+w,y+h),(0,255,0),3)
distance = findDepth(frame1, frame2, x, y, w, h)
xblock = int((x+w/2)/ blockw)
yblcok = int((y+h/2) / blockh)
cubeIndex = 0
for i in range(yblcok*4+xblock):
if cubeData[cubeIndex] != 1:
cubeData[cubeIndex]=0
cubeIndex+=1
if np.isnan(distance) != True and distance<=4000:
#TODO: define accurate range for hand distance
cubeData[cubeIndex]=int(distance/4000*300 +150)
else:
cubeData[cubeIndex]=0
cubeIndex+=1
if len(cubeData) == 16:
writeSerial(cubeData)
cv2.imshow('Hand ROI', frame1)
#waitKey(0) wartet bis taste kommt (1) laeuft weiter
k = cv2.waitKey(1)
if k == 27:
break# wait for ESC key to exit'''
vc1.release()
vc2.release()
cv2.destroyAllWindows() | gpl-3.0 |
crawfordsm/pysalt | slottools/slotreadtimefix.py | 1 | 6586 | ################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""SLOTREADTIME is a tool to fix the UTC time. The time in the binary files is
the readtime and not the UTC time. The actually time of the exposure is
7xEXPTIME less than the time of the read out as the image is shifted down 7
places on the CCD before being read out.
This program will read in a file, and for each extension correct the UTC-TIME
keyword by the time it takes to shift the exposure so the UTC-TIME corresponds
to the start of the exposure and adds a keyword with READTIME.
It will not run on files with the READTIME header keyword already in place.
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import os
import sys
import time, datetime
from pyraf import iraf
from iraf import pysalt
import saltsafekey as saltkey
import saltsafeio as saltio
import salttime
import slottool
from saltsafelog import logging, history
from salterror import SaltError, SaltIOError
debug=True
def slotreadtimefix(images,outimages, outpref,
clobber=False, logfile='slot.log',verbose=True):
with logging(logfile,debug) as log:
# Check the input images
infiles = saltio.argunpack ('Input',images)
# create list of output files
outfiles=saltio.listparse('Outfile', outimages, outpref,infiles,'')
#verify that the input and output lists are the same length
saltio.comparelists(infiles,outfiles,'Input','output')
for img, oimg in zip(infiles, outfiles):
#check to see if the out image already exists
if not clobber and os.path.isfile(oimg):
raise SaltIOError('%s alraedy exists' % oimg)
#open the file
struct=saltio.openfits(img)
#log the message
log.message('Updateing times in %s' % img, with_header=False, with_stdout=verbose)
#now for each science frame, corrent the readtime
#Assumes SALT format and that the first extension
#is empty
for i in range(1,len(struct)):
try:
struct[i]=readtimefix(struct[i])
except SaltIOError,e :
raise SaltError('%s %s' % (img,e))
#Add history keywords
# housekeeping keywords
fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref'])
saltkey.housekeeping(struct[0],"SLOTREAD", 'READTIME added', hist)
#write to the output
saltio.writefits(struct, oimg, clobber)
return
def readtimefix(hdu, dsteps=7, transtime=4e-3):
"""Update the hdu with the correct time for when the exposure started
and add the READTIME keyword
dsteps--the number of readouts to correct for
transtime--the transfer time between each frame
"""
#check for if the data has already been processed
if saltkey.found('READTIME', hdu):
raise SaltIOError(' has already been processed')
#determine the UTC time
utctime=saltkey.get('UTC-OBS', hdu)
timeobs=saltkey.get('TIME-OBS', hdu)
dateobs=saltkey.get('DATE-OBS', hdu)
exptime=float(saltkey.get('EXPTIME', hdu))
#add the readtime header
saltkey.new("READTIME",utctime,'Time of the readout of the frame', hdu)
#correct the utctime--first switch to datetime to properly handle
#dates around changes in hours
y,m,d=dateobs.split('-')
H,M,S=utctime.split(':')
s=int(float(S))
ms=int(1e6*(float(S)-s))
newtime=datetime.datetime(int(y),int(m),int(d),int(H),int(M),s,ms)
#correct the datetime
dtime=dsteps*(exptime+transtime)
s=int(dtime)
ms=int(1e6*(dtime-s))
newtime=newtime-datetime.timedelta(0, s, ms)
#update the headkeywords
hdu.header.update("UTC-OBS", str(newtime.time()))
saltkey.put("UTC-OBS", str(newtime.time()), hdu)
saltkey.put("TIME-OBS", str(newtime.time()), hdu)
saltkey.put("DATE-OBS", str(newtime.date()), hdu)
return hdu
# -----------------------------------------------------------
# main code
parfile = iraf.osfn("slottools$slotreadtimefix.par")
t = iraf.IrafTaskFactory(taskname="slotreadtimefix",value=parfile,function=slotreadtimefix,pkgname='slottools')
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/skimage/feature/__init__.py | 29 | 1887 | from ._canny import canny
from ._daisy import daisy
from ._hog import hog
from .texture import (greycomatrix, greycoprops,
local_binary_pattern,
multiblock_lbp,
draw_multiblock_lbp)
from .peak import peak_local_max
from .corner import (corner_kitchen_rosenfeld, corner_harris,
corner_shi_tomasi, corner_foerstner, corner_subpix,
corner_peaks, corner_fast, structure_tensor,
structure_tensor_eigvals, hessian_matrix,
hessian_matrix_eigvals, hessian_matrix_det)
from .corner_cy import corner_moravec, corner_orientations
from .template import match_template
from .register_translation import register_translation
from .brief import BRIEF
from .censure import CENSURE
from .orb import ORB
from .match import match_descriptors
from .util import plot_matches
from .blob import blob_dog, blob_log, blob_doh
__all__ = ['canny',
'daisy',
'hog',
'greycomatrix',
'greycoprops',
'local_binary_pattern',
'multiblock_lbp',
'draw_multiblock_lbp',
'peak_local_max',
'structure_tensor',
'structure_tensor_eigvals',
'hessian_matrix',
'hessian_matrix_det',
'hessian_matrix_eigvals',
'corner_kitchen_rosenfeld',
'corner_harris',
'corner_shi_tomasi',
'corner_foerstner',
'corner_subpix',
'corner_peaks',
'corner_moravec',
'corner_fast',
'corner_orientations',
'match_template',
'register_translation',
'BRIEF',
'CENSURE',
'ORB',
'match_descriptors',
'plot_matches',
'blob_dog',
'blob_doh',
'blob_log']
| mit |
xisisu/RT-Xen | tools/python/xen/xend/server/vusbif.py | 43 | 5097 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2009, FUJITSU LABORATORIES LTD.
# Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
#============================================================================
"""Support for virtual USB host controllers.
"""
import re
import string
import types
from xen.xend import sxp
from xen.xend.XendError import VmError
from xen.xend.XendLogging import log
from xen.xend.server.DevController import DevController
from xen.xend.server.DevConstants import xenbusState
from xen.xend.xenstore.xstransact import xstransact
from xen.util import vusb_util
class VUSBController(DevController):
"""VUSB Devices.
"""
def __init__(self, vm):
"""Create a VUSB Devices.
"""
DevController.__init__(self, vm)
def sxprs(self):
"""@see DevController.sxprs"""
devslist = []
for devid in self.deviceIDs():
vusb_config = []
backid = self.readFrontend(devid, 'backend-id')
vusb_config.append(['backend-id', backid])
state = self.readFrontend(devid, 'state')
vusb_config.append(['state', state])
backpath = self.readFrontend(devid, 'backend')
vusb_config.append(['backend', backpath])
usbver = self.readBackend(devid, 'usb-ver')
vusb_config.append(['usb-ver', usbver])
numports = self.readBackend(devid, 'num-ports')
vusb_config.append(['num-ports', numports])
portpath = "port/"
ports = ['port']
for i in range(1, int(numports) + 1):
bus = self.readBackend(devid, portpath + '%i' % i)
ports.append(['%i' % i, str(bus)])
vusb_config.append(ports)
devslist.append([devid, vusb_config])
return devslist
def getDeviceDetails(self, config):
"""@see DevController.getDeviceDetails"""
back = {}
devid = self.allocateDeviceID()
usbver = config.get('usb-ver', '')
numports = config.get('num-ports', '')
back['usb-ver'] = str(usbver)
back['num-ports'] = str(numports)
for i in range(1, int(numports) + 1):
back['port/%i' % i] = config['port-%i' % i]
return (devid, back, {})
def getDeviceConfiguration(self, devid, transaction = None):
"""@see DevController.configuration"""
config = DevController.getDeviceConfiguration(self, devid, transaction)
if transaction is None:
hcinfo = self.readBackend(devid, 'usb-ver', 'num-ports')
else:
hcinfo = self.readBackendTxn(transaction, devid,
'usb-ver', 'num-ports')
(usbver, numports) = hcinfo
config['usb-ver'] = str(usbver)
config['num-ports'] = str(numports)
for i in range(1, int(numports) + 1):
if transaction is None:
config['port-%i' % i] = self.readBackend(devid, 'port/%i' % i)
else:
config['port-%i' % i] = self.readBackendTxn(transaction, devid,
'port/%i' % i)
return config
def reconfigureDevice(self, devid, config):
"""@see DevController.reconfigureDevice"""
cur_config = self.getDeviceConfiguration(devid)
numports = cur_config['num-ports']
for i in range(1, int(numports) + 1):
if config.has_key('port-%i' % i):
if not config['port-%i' % i] == cur_config['port-%i' % i]:
if not cur_config['port-%i' % i] == "":
vusb_util.unbind_usb_device(cur_config['port-%i' % i])
self.writeBackend(devid, 'port/%i' % i,
config['port-%i' % i])
if not config['port-%i' % i] == "":
vusb_util.bind_usb_device(config['port-%i' % i])
return self.readBackend(devid, 'uuid')
def waitForBackend(self, devid):
return (0, "ok - no hotplug")
def waitForBackend_destroy(self, backpath):
return 0
def migrate(self, deviceConfig, network, dst, step, domName):
raise VmError('Migration not permitted with assigned USB device.')
| gpl-2.0 |
hmronline/home-assistant | tests/helpers/test_template.py | 10 | 17327 | """Test Home Assistant template helper methods."""
# pylint: disable=too-many-public-methods
import unittest
from unittest.mock import patch
from homeassistant.components import group
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
class TestUtilTemplate(unittest.TestCase):
"""Test the Template."""
def setUp(self): # pylint: disable=invalid-name
"""Setup the tests."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down stuff we started."""
self.hass.stop()
def test_referring_states_by_entity_id(self):
"""."""
self.hass.states.set('test.object', 'happy')
self.assertEqual(
'happy',
template.render(self.hass, '{{ states.test.object.state }}'))
def test_iterating_all_states(self):
"""."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.temperature', 10)
self.assertEqual(
'10happy',
template.render(
self.hass,
'{% for state in states %}{{ state.state }}{% endfor %}'))
def test_iterating_domain_states(self):
"""."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.back_door', 'open')
self.hass.states.set('sensor.temperature', 10)
self.assertEqual(
'open10',
template.render(
self.hass,
"""
{% for state in states.sensor %}{{ state.state }}{% endfor %}
"""))
def test_float(self):
"""."""
self.hass.states.set('sensor.temperature', '12')
self.assertEqual(
'12.0',
template.render(
self.hass,
'{{ float(states.sensor.temperature.state) }}'))
self.assertEqual(
'True',
template.render(
self.hass,
'{{ float(states.sensor.temperature.state) > 11 }}'))
def test_rounding_value(self):
"""."""
self.hass.states.set('sensor.temperature', 12.78)
self.assertEqual(
'12.8',
template.render(
self.hass,
'{{ states.sensor.temperature.state | round(1) }}'))
self.assertEqual(
'128',
template.render(
self.hass,
'{{ states.sensor.temperature.state | multiply(10) | round }}'
))
def test_rounding_value_get_original_value_on_error(self):
"""."""
self.assertEqual(
'None',
template.render(
self.hass,
'{{ None | round }}'
))
self.assertEqual(
'no_number',
template.render(
self.hass,
'{{ "no_number" | round }}'
))
def test_multiply(self):
"""."""
tests = {
None: 'None',
10: '100',
'"abcd"': 'abcd'
}
for inp, out in tests.items():
self.assertEqual(
out,
template.render(self.hass,
'{{ %s | multiply(10) | round }}' % inp))
def test_passing_vars_as_keywords(self):
"""."""
self.assertEqual(
'127', template.render(self.hass, '{{ hello }}', hello=127))
def test_passing_vars_as_vars(self):
"""."""
self.assertEqual(
'127', template.render(self.hass, '{{ hello }}', {'hello': 127}))
def test_render_with_possible_json_value_with_valid_json(self):
"""."""
self.assertEqual(
'world',
template.render_with_possible_json_value(
self.hass, '{{ value_json.hello }}', '{"hello": "world"}'))
def test_render_with_possible_json_value_with_invalid_json(self):
"""."""
self.assertEqual(
'',
template.render_with_possible_json_value(
self.hass, '{{ value_json }}', '{ I AM NOT JSON }'))
def test_render_with_possible_json_value_with_template_error(self):
"""."""
self.assertEqual(
'hello',
template.render_with_possible_json_value(
self.hass, '{{ value_json', 'hello'))
def test_render_with_possible_json_value_with_template_error_value(self):
"""."""
self.assertEqual(
'-',
template.render_with_possible_json_value(
self.hass, '{{ value_json', 'hello', '-'))
def test_raise_exception_on_error(self):
"""."""
with self.assertRaises(TemplateError):
template.render(self.hass, '{{ invalid_syntax')
def test_if_state_exists(self):
"""."""
self.hass.states.set('test.object', 'available')
self.assertEqual(
'exists',
template.render(
self.hass,
"""
{% if states.test.object %}exists{% else %}not exists{% endif %}
"""))
def test_is_state(self):
"""."""
self.hass.states.set('test.object', 'available')
self.assertEqual(
'yes',
template.render(
self.hass,
"""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
"""))
def test_is_state_attr(self):
"""."""
self.hass.states.set('test.object', 'available', {'mode': 'on'})
self.assertEqual(
'yes',
template.render(
self.hass,
"""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
"""))
def test_states_function(self):
"""."""
self.hass.states.set('test.object', 'available')
self.assertEqual(
'available',
template.render(self.hass, '{{ states("test.object") }}'))
self.assertEqual(
'unknown',
template.render(self.hass, '{{ states("test.object2") }}'))
@patch('homeassistant.core.dt_util.utcnow', return_value=dt_util.utcnow())
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_now(self, mock_is_safe, mock_utcnow):
"""."""
self.assertEqual(
dt_util.utcnow().isoformat(),
template.render(self.hass, '{{ now.isoformat() }}'))
@patch('homeassistant.core.dt_util.utcnow', return_value=dt_util.utcnow())
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_utcnow(self, mock_is_safe, mock_utcnow):
"""."""
self.assertEqual(
dt_util.utcnow().isoformat(),
template.render(self.hass, '{{ utcnow.isoformat() }}'))
def test_utcnow_is_exactly_now(self):
"""."""
self.assertEqual(
'True',
template.render(self.hass, '{{ utcnow == now }}'))
def test_distance_function_with_1_state(self):
"""."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
self.assertEqual(
'187',
template.render(
self.hass, '{{ distance(states.test.object) | round }}'))
def test_distance_function_with_2_states(self):
"""."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
self.assertEqual(
'187',
template.render(
self.hass,
'{{ distance(states.test.object, states.test.object_2)'
'| round }}'))
def test_distance_function_with_1_coord(self):
"""."""
self.assertEqual(
'187',
template.render(
self.hass, '{{ distance("32.87336", "-117.22943") | round }}'))
def test_distance_function_with_2_coords(self):
"""."""
self.assertEqual(
'187',
template.render(
self.hass,
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (self.hass.config.latitude, self.hass.config.longitude)))
def test_distance_function_with_1_state_1_coord(self):
"""."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
self.assertEqual(
'187',
template.render(
self.hass,
'{{ distance("32.87336", "-117.22943", states.test.object_2) '
'| round }}'))
self.assertEqual(
'187',
template.render(
self.hass,
'{{ distance(states.test.object_2, "32.87336", "-117.22943") '
'| round }}'))
def test_distance_function_return_None_if_invalid_state(self):
"""."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': 10,
})
self.assertEqual(
'None',
template.render(
self.hass,
'{{ distance(states.test.object_2) | round }}'))
def test_distance_function_return_None_if_invalid_coord(self):
"""."""
self.assertEqual(
'None',
template.render(
self.hass,
'{{ distance("123", "abc") }}'))
self.assertEqual(
'None',
template.render(
self.hass,
'{{ distance("123") }}'))
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
self.assertEqual(
'None',
template.render(
self.hass,
'{{ distance("123", states.test_object_2) }}'))
def test_closest_function_home_vs_domain(self):
"""."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_test_domain.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
self.assertEqual(
'test_domain.object',
template.render(self.hass,
'{{ closest(states.test_domain).entity_id }}'))
def test_closest_function_home_vs_all_states(self):
"""."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain_2.and_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
self.assertEqual(
'test_domain_2.and_closer',
template.render(self.hass,
'{{ closest(states).entity_id }}'))
def test_closest_function_home_vs_group_entity_id(self):
"""."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group(self.hass, 'location group', ['test_domain.object'])
self.assertEqual(
'test_domain.object',
template.render(self.hass,
'{{ closest("group.location_group").entity_id }}'))
def test_closest_function_home_vs_group_state(self):
"""."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group(self.hass, 'location group', ['test_domain.object'])
self.assertEqual(
'test_domain.object',
template.render(
self.hass,
'{{ closest(states.group.location_group).entity_id }}'))
def test_closest_function_to_coord(self):
"""."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
self.assertEqual(
'test_domain.closest_zone',
template.render(
self.hass,
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (self.hass.config.latitude + 0.3,
self.hass.config.longitude + 0.3))
)
def test_closest_function_to_entity_id(self):
"""."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
self.assertEqual(
'test_domain.closest_zone',
template.render(
self.hass,
'{{ closest("zone.far_away", states.test_domain).entity_id }}')
)
def test_closest_function_to_state(self):
"""."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
self.assertEqual(
'test_domain.closest_zone',
template.render(
self.hass,
'{{ closest(states.zone.far_away, '
'states.test_domain).entity_id }}')
)
def test_closest_function_invalid_state(self):
"""."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
for state in ('states.zone.non_existing', '"zone.non_existing"'):
self.assertEqual(
'None',
template.render(
self.hass, '{{ closest(%s, states) }}' % state))
def test_closest_function_state_with_invalid_location(self):
"""."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': 'invalid latitude',
'longitude': self.hass.config.longitude + 0.1,
})
self.assertEqual(
'None',
template.render(
self.hass,
'{{ closest(states.test_domain.closest_home, '
'states) }}'))
def test_closest_function_invalid_coordinates(self):
"""."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.assertEqual(
'None',
template.render(self.hass,
'{{ closest("invalid", "coord", states) }}'))
def test_closest_function_no_location_states(self):
"""."""
self.assertEqual('None',
template.render(self.hass, '{{ closest(states) }}'))
| mit |
naousse/odoo | addons/procurement_jit/__openerp__.py | 312 | 2085 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Just In Time Scheduling',
'version': '1.0',
'category': 'Base',
'description': """
This module allows Just In Time computation of procurement orders.
==================================================================
If you install this module, you will not have to run the regular procurement
scheduler anymore (but you still need to run the minimum order point rule
scheduler, or for example let it run daily).
All procurement orders will be processed immediately, which could in some
cases entail a small performance impact.
It may also increase your stock size because products are reserved as soon
as possible and the scheduler time range is not taken into account anymore.
In that case, you can not use priorities any more on the different picking.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['procurement'],
'data': [],
'demo': [],
'test': ['test/procurement_jit.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ajoaoff/django | tests/gis_tests/test_geoip2.py | 75 | 6007 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geoip2 import HAS_GEOIP2
from django.contrib.gis.geos import HAS_GEOS, GEOSGeometry
from django.test import mock
from django.utils import six
if HAS_GEOIP2:
from django.contrib.gis.geoip2 import GeoIP2, GeoIP2Exception
# Note: Requires both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoLite2-City.mmdb' and
# 'GeoLite2-City.mmdb'.
@skipUnless(HAS_GEOIP2 and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
class GeoIPTest(unittest.TestCase):
addr = '128.249.1.1'
fqdn = 'tmc.edu'
def test01_init(self):
"GeoIP initialization."
g1 = GeoIP2() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP2(path, 0) # Passing in data path explicitly.
g3 = GeoIP2.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertTrue(g._country)
self.assertTrue(g._city)
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLite2-City.mmdb')
cntry = os.path.join(path, 'GeoLite2-Country.mmdb')
g4 = GeoIP2(city, country='')
self.assertIsNone(g4._country)
g5 = GeoIP2(cntry, city='')
self.assertIsNone(g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIP2Exception, GeoIP2, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIP2Exception
else:
e = TypeError
self.assertRaises(e, GeoIP2, bad, 0)
def test02_bad_query(self):
"GeoIP query parameter checking."
cntry_g = GeoIP2(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIP2Exception, cntry_g.city, 'tmc.edu')
self.assertRaises(GeoIP2Exception, cntry_g.coords, 'tmc.edu')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP2)
@mock.patch('socket.gethostbyname')
def test03_country(self, gethostbyname):
"GeoIP country querying methods."
gethostbyname.return_value = '128.249.1.1'
g = GeoIP2(city='<foo>')
for query in (self.fqdn, self.addr):
self.assertEqual(
'US',
g.country_code(query),
'Failed for func country_code and query %s' % query
)
self.assertEqual(
'United States',
g.country_name(query),
'Failed for func country_name and query %s' % query
)
self.assertEqual(
{'country_code': 'US', 'country_name': 'United States'},
g.country(query)
)
@skipUnless(HAS_GEOS, "Geos is required")
@mock.patch('socket.gethostbyname')
def test04_city(self, gethostbyname):
"GeoIP city querying methods."
gethostbyname.return_value = '128.249.1.1'
g = GeoIP2(country='<foo>')
for query in (self.fqdn, self.addr):
# Country queries should still work.
self.assertEqual(
'US',
g.country_code(query),
'Failed for func country_code and query %s' % query
)
self.assertEqual(
'United States',
g.country_name(query),
'Failed for func country_name and query %s' % query
)
self.assertEqual(
{'country_code': 'US', 'country_name': 'United States'},
g.country(query)
)
# City information dictionary.
d = g.city(query)
self.assertEqual('US', d['country_code'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
@mock.patch('socket.gethostbyname')
def test05_unicode_response(self, gethostbyname):
"GeoIP strings should be properly encoded (#16553)."
gethostbyname.return_value = '194.27.42.76'
g = GeoIP2()
d = g.city("nigde.edu.tr")
self.assertEqual('Niğde', d['city'])
d = g.country('200.26.205.1')
# Some databases have only unaccented countries
self.assertIn(d['country_name'], ('Curaçao', 'Curacao'))
def test06_ipv6_query(self):
"GeoIP can lookup IPv6 addresses."
g = GeoIP2()
d = g.city('2002:81ed:c9a5::81ed:c9a5') # IPv6 address for www.nhm.ku.edu
self.assertEqual('US', d['country_code'])
self.assertEqual('Lawrence', d['city'])
self.assertEqual('KS', d['region'])
def test_repr(self):
path = settings.GEOIP_PATH
g = GeoIP2(path=path)
meta = g._reader.metadata()
version = '%s.%s' % (meta.binary_format_major_version, meta.binary_format_minor_version)
country_path = g._country_file
city_path = g._city_file
expected = '<GeoIP2 [v%(version)s] _country_file="%(country)s", _city_file="%(city)s">' % {
'version': version,
'country': country_path,
'city': city_path,
}
self.assertEqual(repr(g), expected)
| bsd-3-clause |
ryuunosukeyoshi/PartnerPoi-Bot | cogs/betterterminal.py | 1 | 14515 | from subprocess import Popen, CalledProcessError, PIPE, STDOUT
from re import sub
from os.path import exists
from os import makedirs, getcwd, chdir, listdir, popen as ospopen
from getpass import getuser
from platform import uname, python_version
from discord.ext import commands
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
__author__ = 'Sentry#4141'
class BetterTerminal:
"""Repl like Terminal in discord"""
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.load_json('data/betterterminal/settings.json')
self.prefix = self.settings['prefix']
self.cc = self.settings['cc']
self.os = self.settings['os']
self.cos = self.settings['cos']
self.enabled = self.settings['enabled']
self.sessions = {}
@commands.command(pass_context=True, hidden=True)
@checks.is_owner()
async def cmddebug(self, ctx):
"""This command is for debugging only"""
try:
commithash = ospopen('git rev-parse --verify HEAD').read()[:7]
except:
commithash = 'None'
text = str('```'
'Bot Information\n\n'
'Bot name: {}\n'
'Bot displayname: {}\n\n'
'Operating System: {}\n'
'OS Version: {}\n'
'Architecture: {}\n\n'
'Python Version: {}\n'
'Commit {}\n'
'```'.format(ctx.message.server.me.name,
ctx.message.server.me.display_name,
uname()[0], uname()[3], uname()[4],
python_version(), commithash)
)
result = []
in_text = text
shorten_by = 12
page_length = 2000
num_mentions = text.count("@here") + text.count("@everyone")
shorten_by += num_mentions
page_length -= shorten_by
while len(in_text) > page_length:
closest_delim = max([in_text.rfind(d, 0, page_length)
for d in ["\n"]])
closest_delim = closest_delim if closest_delim != -1 else page_length
to_send = in_text[:closest_delim].replace(
"@everyone", "@\u200beveryone").replace("@here", "@\u200bhere")
result.append(to_send)
in_text = in_text[closest_delim:]
result.append(in_text.replace(
"@everyone", "@\u200beveryone").replace("@here", "@\u200bhere"))
for page in result:
await self.bot.say(page)
@commands.group(pass_context=True, hidden=True)
@checks.is_owner()
async def system(self, ctx):
"""Returns system infromation"""
await self.bot.say('{} is running on {} {} using {}'
''.format(ctx.message.server.me.display_name,
uname()[0], uname()[2], python_version()))
@commands.command(pass_context=True)
@checks.is_owner()
async def cmd(self, ctx):
"""Starts up the prompt"""
if ctx.message.channel.id in self.sessions:
await self.bot.say('Already running a Terminal session '
'in this channel. Exit it with `exit()` or `quit`')
return
# Rereading the values that were already read in __init__ to ensure its always up to date
try:
self.settings = dataIO.load_json('data/betterterminal/settings.json')
except:
# Pretend its the worst case and reset the settings
check_folder()
check_file()
self.settings = dataIO.load_json('data/betterterminal/settings.json')
self.prefix = self.settings['prefix']
self.cc = self.settings['cc']
self.os = self.settings['os']
self.sessions.update({ctx.message.channel.id:ctx.message.author.id})
await self.bot.say('Enter commands after {} to execute them.'
' `exit()` or `quit` to exit.'.format(self.prefix.replace("`", "\\`")))
@commands.group(pass_context=True)
@checks.is_owner()
async def cmdsettings(self, ctx):
"""Settings for BetterTerminal"""
if ctx.invoked_subcommand is None:
pages = self.bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
@cmdsettings.group(name="customcom", pass_context=True)
@checks.is_owner()
async def _cc(self, ctx):
"""Custom commands for BetterTerminal"""
await self.bot.say('This feature is WIP')
"""
if ctx.invoked_subcommand is None:
pages = self.bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
"""
@cmdsettings.command(name="os", pass_context=True)
@checks.is_owner()
async def _os(self, ctx, os: str = None):
"""Set the prompt type of BetterTerminal to emulate another Operatingsystem.
these 'emulations' arent 100% accurate on other Operatingsystems"""
if os is None:
pages = self.bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
if self.cos == 'default':
await self.bot.say('```\nCurrent prompt type: {}[{}] ```\n'.format(self.cos, uname()[0].lower()))
else:
await self.bot.say('```\nCurrent prompt type: {} ```\n'.format(self.cos))
return
if not os.lower() in self.os and os != 'default':
await self.bot.say('Invalid prompt type.\nThe following once are valid:\n\n{}'.format(", ".join(self.os)))
return
os = os.lower()
self.cos = os
self.settings['cos'] = os
dataIO.save_json('data/betterterminal/settings.json', self.settings)
await self.bot.say('Changed prompt type to {} '.format(self.cos.replace("`", "\\`")))
@cmdsettings.command(name="prefix", pass_context=True)
@checks.is_owner()
async def _prefix(self, ctx, prefix: str = None):
"""Set the prefix for the Terminal"""
if prefix is None:
pages = self.bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
await self.bot.say('```\nCurrent prefix: {} ```\n'.format(self.prefix))
return
self.prefix = prefix
self.settings['prefix'] = prefix
dataIO.save_json('data/betterterminal/settings.json', self.settings)
await self.bot.say('Changed prefix to {} '.format(self.prefix.replace("`", "\\`")))
async def on_message(self, message): # This is where the magic starts
if message.channel.id in self.sessions and self.enabled and message.author.id == self.bot.settings.owner: # I REPEAT DO NOT DELETE
#TODO:
# Whitelist & Blacklists that cant be modified by the bot
if not dataIO.is_valid_json("data/betterterminal/settings.json"):
check_folder()
check_file()
self.settings = dataIO.load_json('data/betterterminal/settings.json')
self.prefix = self.settings['prefix']
self.cc = self.settings['cc']
self.os = self.settings['os']
self.cos = self.settings['cos']
self.enabled = self.settings['enabled']
if message.content.startswith(self.prefix) or message.content.startswith('debugprefixcmd'):
if message.content.startswith(self.prefix):
command = message.content[len(self.prefix):]
else:
command = message.content[len('debugprefixcmd'):]
# check if the message starts with the command prefix
if message.attachments:
command += ' ' + message.attachments[0]['url']
if not command: # if you have entered nothing it will just ignore
return
if command in self.cc:
if self.cc[command][uname()[0].lower()]:
command = self.cc[command][uname()[0].lower()]
else:
command = self.cc[command]['linux']
if command == 'exit()' or command == 'quit': # commands used for quiting cmd, same as for repl
await self.bot.send_message(message.channel, 'Exiting.')
self.sessions.pop(message.channel.id)
return
elif commands == 'exit':
await self.bot.send_message(message.channel, "")
if command.lower().find("apt-get install") != -1 and command.lower().find("-y") == -1:
command = "{} -y".format(command) # forces apt-get to not ask for a prompt
if command.startswith('cd ') and command.split('cd ')[1]:
path = command.split('cd ')[1]
try:
chdir(path)
return
except:
if path in listdir() or path.startswith('/'):
shell = 'cd: {}: Permission denied'.format(path)
else:
shell = 'cd: {}: No such file or directory'.format(path)
else:
try:
output = Popen(command, shell=True, stdout=PIPE,
stderr=STDOUT).communicate()[0]
error = False
except CalledProcessError as err:
output = err.output
error = True
shell = output.decode('utf_8')
if shell == "" and not error:
return
shell = sub('/bin/sh: .: ', '', shell)
if "\n" in shell[:-2]:
shell = '\n' + shell
if self.cos == 'default':
cos = uname()[0].lower()
else:
cos = self.cos
if cos in self.os:
path = getcwd()
username = getuser()
system = uname()[1]
user = self.os[cos].format(
user=username, system=system, path=path)
else:
path = getcwd()
username = getuser()
system = uname()[1]
user = self.os['linux'].format(user=username, system=system, path=path)
result = []
in_text = text = user + shell
shorten_by = 12
page_length = 2000
num_mentions = text.count("@here") + text.count("@everyone")
shorten_by += num_mentions
page_length -= shorten_by
while len(in_text) > page_length:
closest_delim = max([in_text.rfind(d, 0, page_length)
for d in ["\n"]])
closest_delim = closest_delim if closest_delim != -1 else page_length
to_send = in_text[:closest_delim].replace(
"@everyone", "@\u200beveryone").replace("@here", "@\u200bhere")
result.append(to_send)
in_text = in_text[closest_delim:]
result.append(in_text.replace(
"@everyone", "@\u200beveryone").replace("@here", "@\u200bhere"))
#result = list(pagify(user + shell, shorten_by=12))
for num, output in enumerate(result):
if num % 1 == 0 and num != 0:
note = await self.bot.send_message(message.channel,
'There are still {} pages left.\n'
'Type `more` to continue.'
''.format(len(result) - (num+1)))
msg = await self.bot.wait_for_message(check=lambda m:
m.channel == message.channel and
m.author == message.author and
m.content == 'more',
timeout=10)
try:
await self.bot.delete_message(note)
except:
pass
if msg is None:
return
else:
if output:
await self.bot.send_message(message.channel,
'```Bash\n{}```'.format(output))
else:
if output:
await self.bot.send_message(message.channel,
'```Bash\n{}```'.format(output))
def check_folder():
if not exists("data/betterterminal"):
print("[Terminal]Creating data/betterterminal folder...")
makedirs("data/betterterminal")
def check_file():
jdict = {
"prefix":">",
"cc":{'test' : {'linux':'printf "Hello.\n'
'This is a custom command made using the magic of python."',
'windows':'echo Hello. '
'This is a custom command made using the magic of python.'}
},
"os":{
'windows':'{path}>',
'linux':'{user}@{system}:{path} $ '
},
"cos":"default",
"enabled":True
}
if not dataIO.is_valid_json("data/betterterminal/settings.json"):
print("[BetterTerminal]Creating default settings.json...")
dataIO.save_json("data/betterterminal/settings.json", jdict)
def setup(bot):
check_folder()
check_file()
bot.add_cog(BetterTerminal(bot))
| gpl-3.0 |
j0nathan33/CouchPotatoServer | libs/suds/xsd/sxbase.py | 193 | 19777 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbase} module provides I{base} classes that represent
schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.sax.element import Element
from suds.sax import Namespace
log = getLogger(__name__)
class SchemaObject(object):
"""
A schema object is an extension to object object with
with schema awareness.
@ivar root: The XML root element.
@type root: L{Element}
@ivar schema: The schema containing this object.
@type schema: L{schema.Schema}
@ivar form_qualified: A flag that inidcates that @elementFormDefault
has a value of I{qualified}.
@type form_qualified: boolean
@ivar nillable: A flag that inidcates that @nillable
has a value of I{true}.
@type nillable: boolean
@ivar default: The default value.
@type default: object
@ivar rawchildren: A list raw of all children.
@type rawchildren: [L{SchemaObject},...]
"""
@classmethod
def prepend(cls, d, s, filter=Filter()):
"""
Prepend schema object's from B{s}ource list to
the B{d}estination list while applying the filter.
@param d: The destination list.
@type d: list
@param s: The source list.
@type s: list
@param filter: A filter that allows items to be prepended.
@type filter: L{Filter}
"""
i = 0
for x in s:
if x in filter:
d.insert(i, x)
i += 1
@classmethod
def append(cls, d, s, filter=Filter()):
"""
Append schema object's from B{s}ource list to
the B{d}estination list while applying the filter.
@param d: The destination list.
@type d: list
@param s: The source list.
@type s: list
@param filter: A filter that allows items to be appended.
@type filter: L{Filter}
"""
for item in s:
if item in filter:
d.append(item)
def __init__(self, schema, root):
"""
@param schema: The containing schema.
@type schema: L{schema.Schema}
@param root: The xml root node.
@type root: L{Element}
"""
self.schema = schema
self.root = root
self.id = objid(self)
self.name = root.get('name')
self.qname = (self.name, schema.tns[1])
self.min = root.get('minOccurs')
self.max = root.get('maxOccurs')
self.type = root.get('type')
self.ref = root.get('ref')
self.form_qualified = schema.form_qualified
self.nillable = False
self.default = root.get('default')
self.rawchildren = []
self.cache = {}
def attributes(self, filter=Filter()):
"""
Get only the attribute content.
@param filter: A filter to constrain the result.
@type filter: L{Filter}
@return: A list of tuples (attr, ancestry)
@rtype: [(L{SchemaObject}, [L{SchemaObject},..]),..]
"""
result = []
for child, ancestry in self:
if child.isattr() and child in filter:
result.append((child, ancestry))
return result
def children(self, filter=Filter()):
"""
Get only the I{direct} or non-attribute content.
@param filter: A filter to constrain the result.
@type filter: L{Filter}
@return: A list tuples: (child, ancestry)
@rtype: [(L{SchemaObject}, [L{SchemaObject},..]),..]
"""
result = []
for child, ancestry in self:
if not child.isattr() and child in filter:
result.append((child, ancestry))
return result
def get_attribute(self, name):
"""
Get (find) a I{non-attribute} attribute by name.
@param name: A attribute name.
@type name: str
@return: A tuple: the requested (attribute, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
"""
for child, ancestry in self.attributes():
if child.name == name:
return (child, ancestry)
return (None, [])
def get_child(self, name):
"""
Get (find) a I{non-attribute} child by name.
@param name: A child name.
@type name: str
@return: A tuple: the requested (child, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
"""
for child, ancestry in self.children():
if child.any() or child.name == name:
return (child, ancestry)
return (None, [])
def namespace(self, prefix=None):
"""
Get this properties namespace
@param prefix: The default prefix.
@type prefix: str
@return: The schema's target namespace
@rtype: (I{prefix},I{URI})
"""
ns = self.schema.tns
if ns[0] is None:
ns = (prefix, ns[1])
return ns
def default_namespace(self):
return self.root.defaultNamespace()
def unbounded(self):
"""
Get whether this node is unbounded I{(a collection)}
@return: True if unbounded, else False.
@rtype: boolean
"""
max = self.max
if max is None:
max = '1'
if max.isdigit():
return (int(max) > 1)
else:
return ( max == 'unbounded' )
def optional(self):
"""
Get whether this type is optional.
@return: True if optional, else False
@rtype: boolean
"""
min = self.min
if min is None:
min = '1'
return ( min == '0' )
def required(self):
"""
Get whether this type is required.
@return: True if required, else False
@rtype: boolean
"""
return ( not self.optional() )
def resolve(self, nobuiltin=False):
"""
Resolve and return the nodes true self.
@param nobuiltin: Flag indicates that resolution must
not continue to include xsd builtins.
@return: The resolved (true) type.
@rtype: L{SchemaObject}
"""
return self.cache.get(nobuiltin, self)
def sequence(self):
"""
Get whether this is an <xs:sequence/>
@return: True if <xs:sequence/>, else False
@rtype: boolean
"""
return False
def xslist(self):
"""
Get whether this is an <xs:list/>
@return: True if any, else False
@rtype: boolean
"""
return False
def all(self):
"""
Get whether this is an <xs:all/>
@return: True if any, else False
@rtype: boolean
"""
return False
def choice(self):
"""
Get whether this is n <xs:choice/>
@return: True if any, else False
@rtype: boolean
"""
return False
def any(self):
"""
Get whether this is an <xs:any/>
@return: True if any, else False
@rtype: boolean
"""
return False
def builtin(self):
"""
Get whether this is a schema-instance (xs) type.
@return: True if any, else False
@rtype: boolean
"""
return False
def enum(self):
"""
Get whether this is a simple-type containing an enumeration.
@return: True if any, else False
@rtype: boolean
"""
return False
def isattr(self):
"""
Get whether the object is a schema I{attribute} definition.
@return: True if an attribute, else False.
@rtype: boolean
"""
return False
def extension(self):
"""
Get whether the object is an extension of another type.
@return: True if an extension, else False.
@rtype: boolean
"""
return False
def restriction(self):
"""
Get whether the object is an restriction of another type.
@return: True if an restriction, else False.
@rtype: boolean
"""
return False
def mixed(self):
"""
Get whether this I{mixed} content.
"""
return False
def find(self, qref, classes=()):
"""
Find a referenced type in self or children.
@param qref: A qualified reference.
@type qref: qref
@param classes: A list of classes used to qualify the match.
@type classes: [I{class},...]
@return: The referenced type.
@rtype: L{SchemaObject}
@see: L{qualify()}
"""
if not len(classes):
classes = (self.__class__,)
if self.qname == qref and self.__class__ in classes:
return self
for c in self.rawchildren:
p = c.find(qref, classes)
if p is not None:
return p
return None
def translate(self, value, topython=True):
"""
Translate a value (type) to/from a python type.
@param value: A value to translate.
@return: The converted I{language} type.
"""
return value
def childtags(self):
"""
Get a list of valid child tag names.
@return: A list of child tag names.
@rtype: [str,...]
"""
return ()
def dependencies(self):
"""
Get a list of dependancies for dereferencing.
@return: A merge dependancy index and a list of dependancies.
@rtype: (int, [L{SchemaObject},...])
"""
return (None, [])
def autoqualified(self):
"""
The list of I{auto} qualified attribute values.
Qualification means to convert values into I{qref}.
@return: A list of attibute names.
@rtype: list
"""
return ['type', 'ref']
def qualify(self):
"""
Convert attribute values, that are references to other
objects, into I{qref}. Qualfied using default document namespace.
Since many wsdls are written improperly: when the document does
not define a default namespace, the schema target namespace is used
to qualify references.
"""
defns = self.root.defaultNamespace()
if Namespace.none(defns):
defns = self.schema.tns
for a in self.autoqualified():
ref = getattr(self, a)
if ref is None:
continue
if isqref(ref):
continue
qref = qualify(ref, self.root, defns)
log.debug('%s, convert %s="%s" to %s', self.id, a, ref, qref)
setattr(self, a, qref)
def merge(self, other):
"""
Merge another object as needed.
"""
other.qualify()
for n in ('name',
'qname',
'min',
'max',
'default',
'type',
'nillable',
'form_qualified',):
if getattr(self, n) is not None:
continue
v = getattr(other, n)
if v is None:
continue
setattr(self, n, v)
def content(self, collection=None, filter=Filter(), history=None):
"""
Get a I{flattened} list of this nodes contents.
@param collection: A list to fill.
@type collection: list
@param filter: A filter used to constrain the result.
@type filter: L{Filter}
@param history: The history list used to prevent cyclic dependency.
@type history: list
@return: The filled list.
@rtype: list
"""
if collection is None:
collection = []
if history is None:
history = []
if self in history:
return collection
history.append(self)
if self in filter:
collection.append(self)
for c in self.rawchildren:
c.content(collection, filter, history[:])
return collection
def str(self, indent=0, history=None):
"""
Get a string representation of this object.
@param indent: The indent.
@type indent: int
@return: A string.
@rtype: str
"""
if history is None:
history = []
if self in history:
return '%s ...' % Repr(self)
history.append(self)
tab = '%*s'%(indent*3, '')
result = []
result.append('%s<%s' % (tab, self.id))
for n in self.description():
if not hasattr(self, n):
continue
v = getattr(self, n)
if v is None:
continue
result.append(' %s="%s"' % (n, v))
if len(self):
result.append('>')
for c in self.rawchildren:
result.append('\n')
result.append(c.str(indent+1, history[:]))
if c.isattr():
result.append('@')
result.append('\n%s' % tab)
result.append('</%s>' % self.__class__.__name__)
else:
result.append(' />')
return ''.join(result)
def description(self):
"""
Get the names used for str() and repr() description.
@return: A dictionary of relavent attributes.
@rtype: [str,...]
"""
return ()
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return unicode(self.str())
def __repr__(self):
s = []
s.append('<%s' % self.id)
for n in self.description():
if not hasattr(self, n):
continue
v = getattr(self, n)
if v is None:
continue
s.append(' %s="%s"' % (n, v))
s.append(' />')
myrep = ''.join(s)
return myrep.encode('utf-8')
def __len__(self):
n = 0
for x in self: n += 1
return n
def __iter__(self):
return Iter(self)
def __getitem__(self, index):
i = 0
for c in self:
if i == index:
return c
class Iter:
"""
The content iterator - used to iterate the L{Content} children. The iterator
provides a I{view} of the children that is free of container elements
such as <sequence/> and <choice/>.
@ivar stack: A stack used to control nesting.
@type stack: list
"""
class Frame:
""" A content iterator frame. """
def __init__(self, sx):
"""
@param sx: A schema object.
@type sx: L{SchemaObject}
"""
self.sx = sx
self.items = sx.rawchildren
self.index = 0
def next(self):
"""
Get the I{next} item in the frame's collection.
@return: The next item or None
@rtype: L{SchemaObject}
"""
if self.index < len(self.items):
result = self.items[self.index]
self.index += 1
return result
def __init__(self, sx):
"""
@param sx: A schema object.
@type sx: L{SchemaObject}
"""
self.stack = []
self.push(sx)
def push(self, sx):
"""
Create a frame and push the specified object.
@param sx: A schema object to push.
@type sx: L{SchemaObject}
"""
self.stack.append(Iter.Frame(sx))
def pop(self):
"""
Pop the I{top} frame.
@return: The popped frame.
@rtype: L{Frame}
@raise StopIteration: when stack is empty.
"""
if len(self.stack):
return self.stack.pop()
else:
raise StopIteration()
def top(self):
"""
Get the I{top} frame.
@return: The top frame.
@rtype: L{Frame}
@raise StopIteration: when stack is empty.
"""
if len(self.stack):
return self.stack[-1]
else:
raise StopIteration()
def next(self):
"""
Get the next item.
@return: A tuple: the next (child, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
@raise StopIteration: A the end.
"""
frame = self.top()
while True:
result = frame.next()
if result is None:
self.pop()
return self.next()
if isinstance(result, Content):
ancestry = [f.sx for f in self.stack]
return (result, ancestry)
self.push(result)
return self.next()
def __iter__(self):
return self
class XBuiltin(SchemaObject):
"""
Represents an (xsd) schema <xs:*/> node
"""
def __init__(self, schema, name):
"""
@param schema: The containing schema.
@type schema: L{schema.Schema}
"""
root = Element(name)
SchemaObject.__init__(self, schema, root)
self.name = name
self.nillable = True
def namespace(self, prefix=None):
return Namespace.xsdns
def builtin(self):
return True
def resolve(self, nobuiltin=False):
return self
class Content(SchemaObject):
"""
This class represents those schema objects that represent
real XML document content.
"""
pass
class NodeFinder:
"""
Find nodes based on flexable criteria. The I{matcher} is
may be any object that implements a match(n) method.
@ivar matcher: An object used as criteria for match.
@type matcher: I{any}.match(n)
@ivar limit: Limit the number of matches. 0=unlimited.
@type limit: int
"""
def __init__(self, matcher, limit=0):
"""
@param matcher: An object used as criteria for match.
@type matcher: I{any}.match(n)
@param limit: Limit the number of matches. 0=unlimited.
@type limit: int
"""
self.matcher = matcher
self.limit = limit
def find(self, node, list):
"""
Traverse the tree looking for matches.
@param node: A node to match on.
@type node: L{SchemaObject}
@param list: A list to fill.
@type list: list
"""
if self.matcher.match(node):
list.append(node)
self.limit -= 1
if self.limit == 0:
return
for c in node.rawchildren:
self.find(c, list)
return self | gpl-3.0 |
JacobStevenR/scrapy | scrapy/core/scheduler.py | 117 | 4301 | import os
import json
import logging
from os.path import join, exists
from queuelib import PriorityQueue
from scrapy.utils.reqser import request_to_dict, request_from_dict
from scrapy.utils.misc import load_object
from scrapy.utils.job import job_dir
logger = logging.getLogger(__name__)
class Scheduler(object):
def __init__(self, dupefilter, jobdir=None, dqclass=None, mqclass=None, logunser=False, stats=None):
self.df = dupefilter
self.dqdir = self._dqdir(jobdir)
self.dqclass = dqclass
self.mqclass = mqclass
self.logunser = logunser
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
dupefilter_cls = load_object(settings['DUPEFILTER_CLASS'])
dupefilter = dupefilter_cls.from_settings(settings)
dqclass = load_object(settings['SCHEDULER_DISK_QUEUE'])
mqclass = load_object(settings['SCHEDULER_MEMORY_QUEUE'])
logunser = settings.getbool('LOG_UNSERIALIZABLE_REQUESTS')
return cls(dupefilter, job_dir(settings), dqclass, mqclass, logunser, crawler.stats)
def has_pending_requests(self):
return len(self) > 0
def open(self, spider):
self.spider = spider
self.mqs = PriorityQueue(self._newmq)
self.dqs = self._dq() if self.dqdir else None
return self.df.open()
def close(self, reason):
if self.dqs:
prios = self.dqs.close()
with open(join(self.dqdir, 'active.json'), 'w') as f:
json.dump(prios, f)
return self.df.close(reason)
def enqueue_request(self, request):
if not request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)
return False
dqok = self._dqpush(request)
if dqok:
self.stats.inc_value('scheduler/enqueued/disk', spider=self.spider)
else:
self._mqpush(request)
self.stats.inc_value('scheduler/enqueued/memory', spider=self.spider)
self.stats.inc_value('scheduler/enqueued', spider=self.spider)
return True
def next_request(self):
request = self.mqs.pop()
if request:
self.stats.inc_value('scheduler/dequeued/memory', spider=self.spider)
else:
request = self._dqpop()
if request:
self.stats.inc_value('scheduler/dequeued/disk', spider=self.spider)
if request:
self.stats.inc_value('scheduler/dequeued', spider=self.spider)
return request
def __len__(self):
return len(self.dqs) + len(self.mqs) if self.dqs else len(self.mqs)
def _dqpush(self, request):
if self.dqs is None:
return
try:
reqd = request_to_dict(request, self.spider)
self.dqs.push(reqd, -request.priority)
except ValueError as e: # non serializable request
if self.logunser:
logger.error("Unable to serialize request: %(request)s - reason: %(reason)s",
{'request': request, 'reason': e},
exc_info=True, extra={'spider': self.spider})
return
else:
return True
def _mqpush(self, request):
self.mqs.push(request, -request.priority)
def _dqpop(self):
if self.dqs:
d = self.dqs.pop()
if d:
return request_from_dict(d, self.spider)
def _newmq(self, priority):
return self.mqclass()
def _newdq(self, priority):
return self.dqclass(join(self.dqdir, 'p%s' % priority))
def _dq(self):
activef = join(self.dqdir, 'active.json')
if exists(activef):
with open(activef) as f:
prios = json.load(f)
else:
prios = ()
q = PriorityQueue(self._newdq, startprios=prios)
if q:
logger.info("Resuming crawl (%(queuesize)d requests scheduled)",
{'queuesize': len(q)}, extra={'spider': self.spider})
return q
def _dqdir(self, jobdir):
if jobdir:
dqdir = join(jobdir, 'requests.queue')
if not exists(dqdir):
os.makedirs(dqdir)
return dqdir
| bsd-3-clause |
dcroc16/skunk_works | google_appengine/lib/django-1.3/tests/regressiontests/forms/localflavor/pl.py | 87 | 21811 | from django.contrib.localflavor.pl.forms import (PLProvinceSelect,
PLCountySelect, PLPostalCodeField, PLNIPField, PLPESELField, PLREGONField)
from utils import LocalFlavorTestCase
class PLLocalFlavorTests(LocalFlavorTestCase):
def test_PLProvinceSelect(self):
f = PLProvinceSelect()
out = u'''<select name="voivodeships">
<option value="lower_silesia">Lower Silesia</option>
<option value="kuyavia-pomerania">Kuyavia-Pomerania</option>
<option value="lublin">Lublin</option>
<option value="lubusz">Lubusz</option>
<option value="lodz">Lodz</option>
<option value="lesser_poland">Lesser Poland</option>
<option value="masovia">Masovia</option>
<option value="opole">Opole</option>
<option value="subcarpatia">Subcarpatia</option>
<option value="podlasie">Podlasie</option>
<option value="pomerania" selected="selected">Pomerania</option>
<option value="silesia">Silesia</option>
<option value="swietokrzyskie">Swietokrzyskie</option>
<option value="warmia-masuria">Warmia-Masuria</option>
<option value="greater_poland">Greater Poland</option>
<option value="west_pomerania">West Pomerania</option>
</select>'''
self.assertEqual(f.render('voivodeships', 'pomerania'), out)
def test_PLCountrySelect(self):
f = PLCountySelect()
out = u'''<select name="administrativeunit">
<option value="wroclaw">Wroc\u0142aw</option>
<option value="jeleniagora">Jelenia G\xf3ra</option>
<option value="legnica">Legnica</option>
<option value="boleslawiecki">boles\u0142awiecki</option>
<option value="dzierzoniowski">dzier\u017coniowski</option>
<option value="glogowski">g\u0142ogowski</option>
<option value="gorowski">g\xf3rowski</option>
<option value="jaworski">jaworski</option>
<option value="jeleniogorski">jeleniog\xf3rski</option>
<option value="kamiennogorski">kamiennog\xf3rski</option>
<option value="klodzki">k\u0142odzki</option>
<option value="legnicki">legnicki</option>
<option value="lubanski">luba\u0144ski</option>
<option value="lubinski">lubi\u0144ski</option>
<option value="lwowecki">lw\xf3wecki</option>
<option value="milicki">milicki</option>
<option value="olesnicki">ole\u015bnicki</option>
<option value="olawski">o\u0142awski</option>
<option value="polkowicki">polkowicki</option>
<option value="strzelinski">strzeli\u0144ski</option>
<option value="sredzki">\u015bredzki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="trzebnicki">trzebnicki</option>
<option value="walbrzyski">wa\u0142brzyski</option>
<option value="wolowski">wo\u0142owski</option>
<option value="wroclawski">wroc\u0142awski</option>
<option value="zabkowicki">z\u0105bkowicki</option>
<option value="zgorzelecki">zgorzelecki</option>
<option value="zlotoryjski">z\u0142otoryjski</option>
<option value="bydgoszcz">Bydgoszcz</option>
<option value="torun">Toru\u0144</option>
<option value="wloclawek">W\u0142oc\u0142awek</option>
<option value="grudziadz">Grudzi\u0105dz</option>
<option value="aleksandrowski">aleksandrowski</option>
<option value="brodnicki">brodnicki</option>
<option value="bydgoski">bydgoski</option>
<option value="chelminski">che\u0142mi\u0144ski</option>
<option value="golubsko-dobrzynski">golubsko-dobrzy\u0144ski</option>
<option value="grudziadzki">grudzi\u0105dzki</option>
<option value="inowroclawski">inowroc\u0142awski</option>
<option value="lipnowski">lipnowski</option>
<option value="mogilenski">mogile\u0144ski</option>
<option value="nakielski">nakielski</option>
<option value="radziejowski">radziejowski</option>
<option value="rypinski">rypi\u0144ski</option>
<option value="sepolenski">s\u0119pole\u0144ski</option>
<option value="swiecki">\u015bwiecki</option>
<option value="torunski">toru\u0144ski</option>
<option value="tucholski">tucholski</option>
<option value="wabrzeski">w\u0105brzeski</option>
<option value="wloclawski">wroc\u0142awski</option>
<option value="zninski">\u017ani\u0144ski</option>
<option value="lublin">Lublin</option>
<option value="biala-podlaska">Bia\u0142a Podlaska</option>
<option value="chelm">Che\u0142m</option>
<option value="zamosc">Zamo\u015b\u0107</option>
<option value="bialski">bialski</option>
<option value="bilgorajski">bi\u0142gorajski</option>
<option value="chelmski">che\u0142mski</option>
<option value="hrubieszowski">hrubieszowski</option>
<option value="janowski">janowski</option>
<option value="krasnostawski">krasnostawski</option>
<option value="krasnicki">kra\u015bnicki</option>
<option value="lubartowski">lubartowski</option>
<option value="lubelski">lubelski</option>
<option value="leczynski">\u0142\u0119czy\u0144ski</option>
<option value="lukowski">\u0142ukowski</option>
<option value="opolski">opolski</option>
<option value="parczewski">parczewski</option>
<option value="pulawski">pu\u0142awski</option>
<option value="radzynski">radzy\u0144ski</option>
<option value="rycki">rycki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wlodawski">w\u0142odawski</option>
<option value="zamojski">zamojski</option>
<option value="gorzow-wielkopolski">Gorz\xf3w Wielkopolski</option>
<option value="zielona-gora">Zielona G\xf3ra</option>
<option value="gorzowski">gorzowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="miedzyrzecki">mi\u0119dzyrzecki</option>
<option value="nowosolski">nowosolski</option>
<option value="slubicki">s\u0142ubicki</option>
<option value="strzelecko-drezdenecki">strzelecko-drezdenecki</option>
<option value="sulecinski">sule\u0144ci\u0144ski</option>
<option value="swiebodzinski">\u015bwiebodzi\u0144ski</option>
<option value="wschowski">wschowski</option>
<option value="zielonogorski">zielonog\xf3rski</option>
<option value="zaganski">\u017caga\u0144ski</option>
<option value="zarski">\u017carski</option>
<option value="lodz">\u0141\xf3d\u017a</option>
<option value="piotrkow-trybunalski">Piotrk\xf3w Trybunalski</option>
<option value="skierniewice">Skierniewice</option>
<option value="belchatowski">be\u0142chatowski</option>
<option value="brzezinski">brzezi\u0144ski</option>
<option value="kutnowski">kutnowski</option>
<option value="laski">\u0142aski</option>
<option value="leczycki">\u0142\u0119czycki</option>
<option value="lowicki">\u0142owicki</option>
<option value="lodzki wschodni">\u0142\xf3dzki wschodni</option>
<option value="opoczynski">opoczy\u0144ski</option>
<option value="pabianicki">pabianicki</option>
<option value="pajeczanski">paj\u0119cza\u0144ski</option>
<option value="piotrkowski">piotrkowski</option>
<option value="poddebicki">podd\u0119bicki</option>
<option value="radomszczanski">radomszcza\u0144ski</option>
<option value="rawski">rawski</option>
<option value="sieradzki">sieradzki</option>
<option value="skierniewicki">skierniewicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wielunski">wielu\u0144ski</option>
<option value="wieruszowski">wieruszowski</option>
<option value="zdunskowolski">zdu\u0144skowolski</option>
<option value="zgierski">zgierski</option>
<option value="krakow">Krak\xf3w</option>
<option value="tarnow">Tarn\xf3w</option>
<option value="nowy-sacz">Nowy S\u0105cz</option>
<option value="bochenski">boche\u0144ski</option>
<option value="brzeski">brzeski</option>
<option value="chrzanowski">chrzanowski</option>
<option value="dabrowski">d\u0105browski</option>
<option value="gorlicki">gorlicki</option>
<option value="krakowski">krakowski</option>
<option value="limanowski">limanowski</option>
<option value="miechowski">miechowski</option>
<option value="myslenicki">my\u015blenicki</option>
<option value="nowosadecki">nowos\u0105decki</option>
<option value="nowotarski">nowotarski</option>
<option value="olkuski">olkuski</option>
<option value="oswiecimski">o\u015bwi\u0119cimski</option>
<option value="proszowicki">proszowicki</option>
<option value="suski">suski</option>
<option value="tarnowski">tarnowski</option>
<option value="tatrzanski">tatrza\u0144ski</option>
<option value="wadowicki">wadowicki</option>
<option value="wielicki">wielicki</option>
<option value="warszawa">Warszawa</option>
<option value="ostroleka">Ostro\u0142\u0119ka</option>
<option value="plock">P\u0142ock</option>
<option value="radom">Radom</option>
<option value="siedlce">Siedlce</option>
<option value="bialobrzeski">bia\u0142obrzeski</option>
<option value="ciechanowski">ciechanowski</option>
<option value="garwolinski">garwoli\u0144ski</option>
<option value="gostyninski">gostyni\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="grojecki">gr\xf3jecki</option>
<option value="kozienicki">kozenicki</option>
<option value="legionowski">legionowski</option>
<option value="lipski">lipski</option>
<option value="losicki">\u0142osicki</option>
<option value="makowski">makowski</option>
<option value="minski">mi\u0144ski</option>
<option value="mlawski">m\u0142awski</option>
<option value="nowodworski">nowodworski</option>
<option value="ostrolecki">ostro\u0142\u0119cki</option>
<option value="ostrowski">ostrowski</option>
<option value="otwocki">otwocki</option>
<option value="piaseczynski">piaseczy\u0144ski</option>
<option value="plocki">p\u0142ocki</option>
<option value="plonski">p\u0142o\u0144ski</option>
<option value="pruszkowski">pruszkowski</option>
<option value="przasnyski">przasnyski</option>
<option value="przysuski">przysuski</option>
<option value="pultuski">pu\u0142tuski</option>
<option value="radomski">radomski</option>
<option value="siedlecki">siedlecki</option>
<option value="sierpecki">sierpecki</option>
<option value="sochaczewski">sochaczewski</option>
<option value="sokolowski">soko\u0142owski</option>
<option value="szydlowiecki">szyd\u0142owiecki</option>
<option value="warszawski-zachodni">warszawski zachodni</option>
<option value="wegrowski">w\u0119growski</option>
<option value="wolominski">wo\u0142omi\u0144ski</option>
<option value="wyszkowski">wyszkowski</option>
<option value="zwolenski">zwole\u0144ski</option>
<option value="zurominski">\u017curomi\u0144ski</option>
<option value="zyrardowski">\u017cyrardowski</option>
<option value="opole">Opole</option>
<option value="brzeski">brzeski</option>
<option value="glubczycki">g\u0142ubczyski</option>
<option value="kedzierzynsko-kozielski">k\u0119dzierzy\u0144ski-kozielski</option>
<option value="kluczborski">kluczborski</option>
<option value="krapkowicki">krapkowicki</option>
<option value="namyslowski">namys\u0142owski</option>
<option value="nyski">nyski</option>
<option value="oleski">oleski</option>
<option value="opolski">opolski</option>
<option value="prudnicki">prudnicki</option>
<option value="strzelecki">strzelecki</option>
<option value="rzeszow">Rzesz\xf3w</option>
<option value="krosno">Krosno</option>
<option value="przemysl">Przemy\u015bl</option>
<option value="tarnobrzeg">Tarnobrzeg</option>
<option value="bieszczadzki">bieszczadzki</option>
<option value="brzozowski">brzozowski</option>
<option value="debicki">d\u0119bicki</option>
<option value="jaroslawski">jaros\u0142awski</option>
<option value="jasielski">jasielski</option>
<option value="kolbuszowski">kolbuszowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="leski">leski</option>
<option value="lezajski">le\u017cajski</option>
<option value="lubaczowski">lubaczowski</option>
<option value="lancucki">\u0142a\u0144cucki</option>
<option value="mielecki">mielecki</option>
<option value="nizanski">ni\u017ca\u0144ski</option>
<option value="przemyski">przemyski</option>
<option value="przeworski">przeworski</option>
<option value="ropczycko-sedziszowski">ropczycko-s\u0119dziszowski</option>
<option value="rzeszowski">rzeszowski</option>
<option value="sanocki">sanocki</option>
<option value="stalowowolski">stalowowolski</option>
<option value="strzyzowski">strzy\u017cowski</option>
<option value="tarnobrzeski">tarnobrzeski</option>
<option value="bialystok">Bia\u0142ystok</option>
<option value="lomza">\u0141om\u017ca</option>
<option value="suwalki">Suwa\u0142ki</option>
<option value="augustowski">augustowski</option>
<option value="bialostocki">bia\u0142ostocki</option>
<option value="bielski">bielski</option>
<option value="grajewski">grajewski</option>
<option value="hajnowski">hajnowski</option>
<option value="kolnenski">kolne\u0144ski</option>
<option value="\u0142omzynski">\u0142om\u017cy\u0144ski</option>
<option value="moniecki">moniecki</option>
<option value="sejnenski">sejne\u0144ski</option>
<option value="siemiatycki">siematycki</option>
<option value="sokolski">sok\xf3lski</option>
<option value="suwalski">suwalski</option>
<option value="wysokomazowiecki">wysokomazowiecki</option>
<option value="zambrowski">zambrowski</option>
<option value="gdansk">Gda\u0144sk</option>
<option value="gdynia">Gdynia</option>
<option value="slupsk">S\u0142upsk</option>
<option value="sopot">Sopot</option>
<option value="bytowski">bytowski</option>
<option value="chojnicki">chojnicki</option>
<option value="czluchowski">cz\u0142uchowski</option>
<option value="kartuski">kartuski</option>
<option value="koscierski">ko\u015bcierski</option>
<option value="kwidzynski">kwidzy\u0144ski</option>
<option value="leborski">l\u0119borski</option>
<option value="malborski">malborski</option>
<option value="nowodworski">nowodworski</option>
<option value="gdanski">gda\u0144ski</option>
<option value="pucki">pucki</option>
<option value="slupski">s\u0142upski</option>
<option value="starogardzki">starogardzki</option>
<option value="sztumski">sztumski</option>
<option value="tczewski">tczewski</option>
<option value="wejherowski">wejcherowski</option>
<option value="katowice" selected="selected">Katowice</option>
<option value="bielsko-biala">Bielsko-Bia\u0142a</option>
<option value="bytom">Bytom</option>
<option value="chorzow">Chorz\xf3w</option>
<option value="czestochowa">Cz\u0119stochowa</option>
<option value="dabrowa-gornicza">D\u0105browa G\xf3rnicza</option>
<option value="gliwice">Gliwice</option>
<option value="jastrzebie-zdroj">Jastrz\u0119bie Zdr\xf3j</option>
<option value="jaworzno">Jaworzno</option>
<option value="myslowice">Mys\u0142owice</option>
<option value="piekary-slaskie">Piekary \u015al\u0105skie</option>
<option value="ruda-slaska">Ruda \u015al\u0105ska</option>
<option value="rybnik">Rybnik</option>
<option value="siemianowice-slaskie">Siemianowice \u015al\u0105skie</option>
<option value="sosnowiec">Sosnowiec</option>
<option value="swietochlowice">\u015awi\u0119toch\u0142owice</option>
<option value="tychy">Tychy</option>
<option value="zabrze">Zabrze</option>
<option value="zory">\u017bory</option>
<option value="bedzinski">b\u0119dzi\u0144ski</option>
<option value="bielski">bielski</option>
<option value="bierunsko-ledzinski">bieru\u0144sko-l\u0119dzi\u0144ski</option>
<option value="cieszynski">cieszy\u0144ski</option>
<option value="czestochowski">cz\u0119stochowski</option>
<option value="gliwicki">gliwicki</option>
<option value="klobucki">k\u0142obucki</option>
<option value="lubliniecki">lubliniecki</option>
<option value="mikolowski">miko\u0142owski</option>
<option value="myszkowski">myszkowski</option>
<option value="pszczynski">pszczy\u0144ski</option>
<option value="raciborski">raciborski</option>
<option value="rybnicki">rybnicki</option>
<option value="tarnogorski">tarnog\xf3rski</option>
<option value="wodzislawski">wodzis\u0142awski</option>
<option value="zawiercianski">zawiercia\u0144ski</option>
<option value="zywiecki">\u017cywiecki</option>
<option value="kielce">Kielce</option>
<option value="buski">buski</option>
<option value="jedrzejowski">j\u0119drzejowski</option>
<option value="kazimierski">kazimierski</option>
<option value="kielecki">kielecki</option>
<option value="konecki">konecki</option>
<option value="opatowski">opatowski</option>
<option value="ostrowiecki">ostrowiecki</option>
<option value="pinczowski">pi\u0144czowski</option>
<option value="sandomierski">sandomierski</option>
<option value="skarzyski">skar\u017cyski</option>
<option value="starachowicki">starachowicki</option>
<option value="staszowski">staszowski</option>
<option value="wloszczowski">w\u0142oszczowski</option>
<option value="olsztyn">Olsztyn</option>
<option value="elblag">Elbl\u0105g</option>
<option value="bartoszycki">bartoszycki</option>
<option value="braniewski">braniewski</option>
<option value="dzialdowski">dzia\u0142dowski</option>
<option value="elblaski">elbl\u0105ski</option>
<option value="elcki">e\u0142cki</option>
<option value="gizycki">gi\u017cycki</option>
<option value="goldapski">go\u0142dapski</option>
<option value="ilawski">i\u0142awski</option>
<option value="ketrzynski">k\u0119trzy\u0144ski</option>
<option value="lidzbarski">lidzbarski</option>
<option value="mragowski">mr\u0105gowski</option>
<option value="nidzicki">nidzicki</option>
<option value="nowomiejski">nowomiejski</option>
<option value="olecki">olecki</option>
<option value="olsztynski">olszty\u0144ski</option>
<option value="ostrodzki">ostr\xf3dzki</option>
<option value="piski">piski</option>
<option value="szczycienski">szczycie\u0144ski</option>
<option value="wegorzewski">w\u0119gorzewski</option>
<option value="poznan">Pozna\u0144</option>
<option value="kalisz">Kalisz</option>
<option value="konin">Konin</option>
<option value="leszno">Leszno</option>
<option value="chodzieski">chodziejski</option>
<option value="czarnkowsko-trzcianecki">czarnkowsko-trzcianecki</option>
<option value="gnieznienski">gnie\u017anie\u0144ski</option>
<option value="gostynski">gosty\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="jarocinski">jaroci\u0144ski</option>
<option value="kaliski">kaliski</option>
<option value="kepinski">k\u0119pi\u0144ski</option>
<option value="kolski">kolski</option>
<option value="koninski">koni\u0144ski</option>
<option value="koscianski">ko\u015bcia\u0144ski</option>
<option value="krotoszynski">krotoszy\u0144ski</option>
<option value="leszczynski">leszczy\u0144ski</option>
<option value="miedzychodzki">mi\u0119dzychodzki</option>
<option value="nowotomyski">nowotomyski</option>
<option value="obornicki">obornicki</option>
<option value="ostrowski">ostrowski</option>
<option value="ostrzeszowski">ostrzeszowski</option>
<option value="pilski">pilski</option>
<option value="pleszewski">pleszewski</option>
<option value="poznanski">pozna\u0144ski</option>
<option value="rawicki">rawicki</option>
<option value="slupecki">s\u0142upecki</option>
<option value="szamotulski">szamotulski</option>
<option value="sredzki">\u015bredzki</option>
<option value="sremski">\u015bremski</option>
<option value="turecki">turecki</option>
<option value="wagrowiecki">w\u0105growiecki</option>
<option value="wolsztynski">wolszty\u0144ski</option>
<option value="wrzesinski">wrzesi\u0144ski</option>
<option value="zlotowski">z\u0142otowski</option>
<option value="bialogardzki">bia\u0142ogardzki</option>
<option value="choszczenski">choszcze\u0144ski</option>
<option value="drawski">drawski</option>
<option value="goleniowski">goleniowski</option>
<option value="gryficki">gryficki</option>
<option value="gryfinski">gryfi\u0144ski</option>
<option value="kamienski">kamie\u0144ski</option>
<option value="kolobrzeski">ko\u0142obrzeski</option>
<option value="koszalinski">koszali\u0144ski</option>
<option value="lobeski">\u0142obeski</option>
<option value="mysliborski">my\u015bliborski</option>
<option value="policki">policki</option>
<option value="pyrzycki">pyrzycki</option>
<option value="slawienski">s\u0142awie\u0144ski</option>
<option value="stargardzki">stargardzki</option>
<option value="szczecinecki">szczecinecki</option>
<option value="swidwinski">\u015bwidwi\u0144ski</option>
<option value="walecki">wa\u0142ecki</option>
</select>'''
self.assertEqual(f.render('administrativeunit', 'katowice'), out)
def test_PLPostalCodeField(self):
error_format = [u'Enter a postal code in the format XX-XXX.']
valid = {
'41-403': '41-403',
}
invalid = {
'43--434': error_format,
}
self.assertFieldOutput(PLPostalCodeField, valid, invalid)
def test_PLNIPField(self):
error_format = [u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX or XX-XX-XXX-XXX.']
error_checksum = [u'Wrong checksum for the Tax Number (NIP).']
valid = {
'64-62-414-124': '6462414124',
'646-241-41-24': '6462414124',
}
invalid = {
'43-343-234-323': error_format,
'646-241-41-23': error_checksum,
}
self.assertFieldOutput(PLNIPField, valid, invalid)
def test_PLPESELField(self):
error_checksum = [u'Wrong checksum for the National Identification Number.']
error_format = [u'National Identification Number consists of 11 digits.']
valid = {
'80071610614': '80071610614',
}
invalid = {
'80071610610': error_checksum,
'80': error_format,
'800716106AA': error_format,
}
self.assertFieldOutput(PLPESELField, valid, invalid)
def test_PLREGONField(self):
error_checksum = [u'Wrong checksum for the National Business Register Number (REGON).']
error_format = [u'National Business Register Number (REGON) consists of 9 or 14 digits.']
valid = {
'12345678512347': '12345678512347',
'590096454': '590096454',
}
invalid = {
'123456784': error_checksum,
'12345678412342': error_checksum,
'590096453': error_checksum,
'590096': error_format,
}
self.assertFieldOutput(PLREGONField, valid, invalid)
| mit |
binhqnguyen/lena | nsc/scons-local-1.2.0.d20090223/SCons/CacheDir.py | 19 | 8006 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/CacheDir.py 4043 2009/02/23 09:06:45 scons"
__doc__ = """
CacheDir support
"""
import os.path
import stat
import string
import sys
import SCons.Action
cache_enabled = True
cache_debug = False
cache_force = False
cache_show = False
def CacheRetrieveFunc(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if not fs.exists(cachefile):
cd.CacheDebug('CacheRetrieve(%s): %s not in cache\n', t, cachefile)
return 1
cd.CacheDebug('CacheRetrieve(%s): retrieving from %s\n', t, cachefile)
if SCons.Action.execute_actions:
if fs.islink(cachefile):
fs.symlink(fs.readlink(cachefile), t.path)
else:
env.copy_from_cache(cachefile, t.path)
st = fs.stat(cachefile)
fs.chmod(t.path, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
return 0
def CacheRetrieveString(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if t.fs.exists(cachefile):
return "Retrieved `%s' from cache" % t.path
return None
CacheRetrieve = SCons.Action.Action(CacheRetrieveFunc, CacheRetrieveString)
CacheRetrieveSilent = SCons.Action.Action(CacheRetrieveFunc, None)
def CachePushFunc(target, source, env):
t = target[0]
if t.nocache:
return
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if fs.exists(cachefile):
# Don't bother copying it if it's already there. Note that
# usually this "shouldn't happen" because if the file already
# existed in cache, we'd have retrieved the file from there,
# not built it. This can happen, though, in a race, if some
# other person running the same build pushes their copy to
# the cache after we decide we need to build it but before our
# build completes.
cd.CacheDebug('CachePush(%s): %s already exists in cache\n', t, cachefile)
return
cd.CacheDebug('CachePush(%s): pushing to %s\n', t, cachefile)
tempfile = cachefile+'.tmp'+str(os.getpid())
errfmt = "Unable to copy %s to cache. Cache file is %s"
if not fs.isdir(cachedir):
try:
fs.makedirs(cachedir)
except EnvironmentError:
# We may have received an exception because another process
# has beaten us creating the directory.
if not fs.isdir(cachedir):
msg = errfmt % (str(target), cachefile)
raise SCons.Errors.EnvironmentError, msg
try:
if fs.islink(t.path):
fs.symlink(fs.readlink(t.path), tempfile)
else:
fs.copy2(t.path, tempfile)
fs.rename(tempfile, cachefile)
st = fs.stat(t.path)
fs.chmod(cachefile, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
except EnvironmentError:
# It's possible someone else tried writing the file at the
# same time we did, or else that there was some problem like
# the CacheDir being on a separate file system that's full.
# In any case, inability to push a file to cache doesn't affect
# the correctness of the build, so just print a warning.
msg = errfmt % (str(target), cachefile)
SCons.Warnings.warn(SCons.Warnings.CacheWriteErrorWarning, msg)
CachePush = SCons.Action.Action(CachePushFunc, None)
class CacheDir:
def __init__(self, path):
try:
import hashlib
except ImportError:
msg = "No hashlib or MD5 module available, CacheDir() not supported"
SCons.Warnings.warn(SCons.Warnings.NoMD5ModuleWarning, msg)
self.path = None
else:
self.path = path
self.current_cache_debug = None
self.debugFP = None
def CacheDebug(self, fmt, target, cachefile):
if cache_debug != self.current_cache_debug:
if cache_debug == '-':
self.debugFP = sys.stdout
elif cache_debug:
self.debugFP = open(cache_debug, 'w')
else:
self.debugFP = None
self.current_cache_debug = cache_debug
if self.debugFP:
self.debugFP.write(fmt % (target, os.path.split(cachefile)[1]))
def is_enabled(self):
return (cache_enabled and not self.path is None)
def cachepath(self, node):
"""
"""
if not self.is_enabled():
return None, None
sig = node.get_cachedir_bsig()
subdir = string.upper(sig[0])
dir = os.path.join(self.path, subdir)
return dir, os.path.join(dir, sig)
def retrieve(self, node):
"""
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Note that there's a special trick here with the execute flag
(one that's not normally done for other actions). Basically
if the user requested a no_exec (-n) build, then
SCons.Action.execute_actions is set to 0 and when any action
is called, it does its showing but then just returns zero
instead of actually calling the action execution operation.
The problem for caching is that if the file does NOT exist in
cache then the CacheRetrieveString won't return anything to
show for the task, but the Action.__call__ won't call
CacheRetrieveFunc; instead it just returns zero, which makes
the code below think that the file *was* successfully
retrieved from the cache, therefore it doesn't do any
subsequent building. However, the CacheRetrieveString didn't
print anything because it didn't actually exist in the cache,
and no more build actions will be performed, so the user just
sees nothing. The fix is to tell Action.__call__ to always
execute the CacheRetrieveFunc and then have the latter
explicitly check SCons.Action.execute_actions itself.
"""
if not self.is_enabled():
return False
env = node.get_build_env()
if cache_show:
if CacheRetrieveSilent(node, [], env, execute=1) == 0:
node.build(presub=0, execute=0)
return True
else:
if CacheRetrieve(node, [], env, execute=1) == 0:
return True
return False
def push(self, node):
if not self.is_enabled():
return
return CachePush(node, [], node.get_build_env())
def push_if_forced(self, node):
if cache_force:
return self.push(node)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
plissonf/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
mttr/django | django/forms/utils.py | 169 | 5975 | from __future__ import unicode_literals
import json
import sys
from django.conf import settings
from django.core.exceptions import ValidationError # backwards compatibility
from django.utils import six, timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, format_html, format_html_join, html_safe
from django.utils.translation import ugettext_lazy as _
try:
from collections import UserList
except ImportError: # Python 2
from UserList import UserList
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
else:
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
@html_safe
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{}</ul>',
format_html_join('', '<li>{}{}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@html_safe
@python_2_unicode_compatible
class ErrorList(UserList, list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __init__(self, initlist=None, error_class=None):
super(ErrorList, self).__init__(initlist)
if error_class is None:
self.error_class = 'errorlist'
else:
self.error_class = 'errorlist {}'.format(error_class)
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="{}">{}</ul>',
self.error_class,
format_html_join('', '<li>{}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
def __reduce_ex__(self, *args, **kwargs):
# The `list` reduce function returns an iterator as the fourth element
# that is normally used for repopulating. Since we only inherit from
# `list` for `isinstance` backward compatibility (Refs #17413) we
# nullify this iterator as it would otherwise result in duplicate
# entries. (Refs #23594)
info = super(UserList, self).__reduce_ex__(*args, **kwargs)
return info[:3] + (None, None)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive dateimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| bsd-3-clause |
jmesteve/openerp | openerp/addons/sale/__init__.py | 72 | 1268 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import sale
import res_partner
import wizard
import report
import edi
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
todaychi/hue | desktop/core/ext-py/pyopenssl/setup.py | 10 | 3052 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Jean-Paul Calderone 2008-2015, All rights reserved
#
"""
Installation script for the OpenSSL module
"""
from setuptools import setup
# XXX Deduplicate this
__version__ = '0.15.1'
setup(name='pyOpenSSL', version=__version__,
packages = ['OpenSSL'],
package_dir = {'OpenSSL': 'OpenSSL'},
py_modules = ['OpenSSL.__init__',
'OpenSSL.tsafe',
'OpenSSL.rand',
'OpenSSL.crypto',
'OpenSSL.SSL',
'OpenSSL.version',
'OpenSSL.test.__init__',
'OpenSSL.test.util',
'OpenSSL.test.test_crypto',
'OpenSSL.test.test_rand',
'OpenSSL.test.test_ssl',
'OpenSSL.test.test_tsafe',
'OpenSSL.test.test_util',],
description = 'Python wrapper module around the OpenSSL library',
author = 'Jean-Paul Calderone',
author_email = 'exarkun@twistedmatrix.com',
maintainer = 'Jean-Paul Calderone',
maintainer_email = 'exarkun@twistedmatrix.com',
url = 'https://github.com/pyca/pyopenssl',
license = 'APL2',
install_requires=["cryptography>=0.7", "six>=1.5.2"],
long_description = """\
High-level wrapper around a subset of the OpenSSL library, includes
* SSL.Connection objects, wrapping the methods of Python's portable
sockets
* Callbacks written in Python
* Extensive error-handling mechanism, mirroring OpenSSL's error codes
... and much more ;)""",
classifiers = [
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
# General classifiers to indicate "this project supports Python 2" and
# "this project supports Python 3".
'Programming Language :: Python :: 2',
# In particular, this makes pyOpenSSL show up on
# https://pypi.python.org/pypi?:action=browse&c=533&show=all and is in
# accordance with
# http://docs.python.org/2/howto/pyporting.html#universal-bits-of-advice
'Programming Language :: Python :: 3',
# More specific classifiers to indicate more precisely which versions
# of those languages the project supports.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
],
test_suite="OpenSSL")
| apache-2.0 |
laborautonomo/Mailpile | mailpile/plugins/migrate.py | 3 | 10057 | import mailpile.config
from mailpile.commands import Command
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.mail_source.mbox import MboxMailSource
from mailpile.mail_source.maildir import MaildirMailSource
from mailpile.plugins import PluginManager
from mailpile.util import *
from mailpile.vcard import *
_plugins = PluginManager(builtin=__file__)
# We might want to do this differently at some point, but
# for now it's fine.
def migrate_routes(session):
# Migration from route string to messageroute structure
def route_parse(route):
if route.startswith('|'):
command = route[1:].strip()
return {
"name": command.split()[0],
"protocol": "local",
"command": command
}
else:
res = re.split(
"([\w]+)://([^:]+):([^@]+)@([\w\d.]+):([\d]+)[/]{0,1}", route)
if len(res) >= 5:
return {
"name": _("%(user)s on %(host)s"
) % {"user": res[2], "host": res[4]},
"protocol": res[1],
"username": res[2],
"password": res[3],
"host": res[4],
"port": res[5]
}
else:
session.ui.warning(_('Could not migrate route: %s') % route)
return None
def make_route_name(route_dict):
# This will always return the same hash, no matter how Python
# decides to order the dict internally.
return md5_hex(str(sorted(list(route_dict.iteritems()))))[:8]
if session.config.prefs.get('default_route'):
route_dict = route_parse(session.config.prefs.default_route)
if route_dict:
route_name = make_route_name(route_dict)
session.config.routes[route_name] = route_dict
session.config.prefs.default_messageroute = route_name
for profile in session.config.profiles:
if profile.get('route'):
route_dict = route_parse(profile.route)
if route_dict:
route_name = make_route_name(route_dict)
session.config.routes[route_name] = route_dict
profile.messageroute = route_name
return True
def migrate_mailboxes(session):
config = session.config
def _common_path(paths):
common_head, junk = os.path.split(paths[0])
for path in paths:
head, junk = os.path.split(path)
while (common_head and common_head != '/' and
head and head != '/' and
head != common_head):
# First we try shortening the target path...
while head and head != '/' and head != common_head:
head, junk = os.path.split(head)
# If that failed, lop one off the common path and try again
if head != common_head:
common_head, junk = os.path.split(common_head)
head, junk = os.path.split(path)
return common_head
mboxes = []
maildirs = []
macmaildirs = []
thunderbird = []
spam_tids = [tag._key for tag in config.get_tags(type='spam')]
trash_tids = [tag._key for tag in config.get_tags(type='trash')]
inbox_tids = [tag._key for tag in config.get_tags(type='inbox')]
# Iterate through config.sys.mailbox, sort mailboxes by type
for mbx_id, path, src in config.get_mailboxes():
if (path == '/dev/null' or
path.startswith('src:') or
src is not None or
config.is_editable_mailbox(mbx_id)):
continue
elif os.path.exists(os.path.join(path, 'Info.plist')):
macmaildirs.append((mbx_id, path))
elif os.path.isdir(path):
maildirs.append((mbx_id, path))
elif 'thunderbird' in path.lower():
thunderbird.append((mbx_id, path))
else:
mboxes.append((mbx_id, path))
# macmail: library/mail/v2
if thunderbird:
# Create basic mail source...
if 'tbird' not in config.sources:
config.sources['tbird'] = {
'name': 'Thunderbird',
'protocol': 'mbox',
}
config.sources.tbird.discovery.create_tag = True
config.sources.tbird.discovery.policy = 'read'
config.sources.tbird.discovery.process_new = True
tbird_src = MboxMailSource(session, config.sources.tbird)
# Configure discovery policy?
root = _common_path([path for mbx_id, path in thunderbird])
if 'thunderbird' in root.lower():
# FIXME: This is wrong, we should create a mailbox entry
# with the policy 'watch'.
tbird_src.my_config.discovery.path = root
# Take over all the mailboxes
for mbx_id, path in thunderbird:
mbx = tbird_src.take_over_mailbox(mbx_id)
if 'inbox' in path.lower():
mbx.apply_tags.extend(inbox_tids)
elif 'spam' in path.lower() or 'junk' in path.lower():
mbx.apply_tags.extend(spam_tids)
elif 'trash' in path.lower():
mbx.apply_tags.extend(trash_tids)
tbird_src.my_config.discovery.policy = 'unknown'
for name, mailboxes, proto, description, cls in (
('mboxes', mboxes, 'mbox', 'Unix mbox files', MboxMailSource),
('maildirs', maildirs, 'maildir', 'Maildirs', MaildirMailSource),
):
if mailboxes:
# Create basic mail source...
if name not in config.sources:
config.sources[name] = {
'name': description,
'protocol': proto
}
config.sources[name].discovery.create_tag = False
config.sources[name].discovery.policy = 'read'
config.sources[name].discovery.process_new = True
config.sources[name].discovery.apply_tags = inbox_tids[:]
src = cls(session, config.sources[name])
for mbx_id, path in mailboxes:
mbx = src.take_over_mailbox(mbx_id)
config.sources[name].discovery.policy = 'unknown'
return True
def migrate_profiles(session):
config, vcards = session.config, session.config.vcards
for profile in config.profiles:
if profile.email:
vcard = vcards.get_vcard(profile.email)
if vcard and vcard.email == profile.email:
vcards.deindex_vcard(vcard)
else:
vcard = MailpileVCard(
VCardLine(name='EMAIL', value=profile.email, type='PREF'),
VCardLine(name='FN', value=profile.name or profile.email))
vcard.kind = 'profile'
if profile.signature:
vcard.signature = profile.signature
if profile.messageroute:
vcard.route = profile.messageroute
vcards.add_vcards(vcard)
config.profiles = {}
return True
def migrate_cleanup(session):
config = session.config
# Clean the autotaggers
autotaggers = [t for t in config.prefs.autotag.values() if t.tagger]
config.prefs.autotag = autotaggers
# Clean the profiles
profiles = [p for p in config.profiles.values() if p.email or p.name]
config.profiles = profiles
# Clean the vcards:
# - Prefer vcards with valid key info
# - De-dupe everything based on name/email combinations
def cardprint(vc):
emails = set([v.value for v in vc.get_all('email')])
return '/'.join([vc.fn] + sorted(list(emails)))
vcards = all_vcards = set(config.vcards.values())
keepers = set()
for vc in vcards:
keys = vc.get_all('key')
for k in keys:
try:
mime, fp = k.value.split('data:')[1].split(',')
if fp:
keepers.add(vc)
except (ValueError, IndexError):
pass
for p in (1, 2):
prints = set([cardprint(vc) for vc in keepers])
for vc in vcards:
cp = cardprint(vc)
if cp not in prints:
keepers.add(vc)
prints.add(cp)
vcards = keepers
keepers = set()
# Deleted!!
config.vcards.del_vcards(*list(all_vcards - vcards))
return True
MIGRATIONS_BEFORE_SETUP = [migrate_routes]
MIGRATIONS_AFTER_SETUP = [migrate_profiles, migrate_cleanup]
MIGRATIONS = {
'routes': migrate_routes,
'sources': migrate_mailboxes,
'profiles': migrate_profiles,
'cleanup': migrate_cleanup
}
class Migrate(Command):
"""Perform any needed migrations"""
SYNOPSIS = (None, 'setup/migrate', None,
'[' + '|'.join(sorted(MIGRATIONS.keys())) + ']')
ORDER = ('Internals', 0)
def command(self, before_setup=True, after_setup=True):
session = self.session
err = cnt = 0
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
migrations = []
for a in self.args:
if a in MIGRATIONS:
migrations.append(MIGRATIONS[a])
else:
raise UsageError(_('Unknown migration: %s (available: %s)'
) % (a, ', '.join(MIGRATIONS.keys())))
if not migrations:
migrations = ((before_setup and MIGRATIONS_BEFORE_SETUP or []) +
(after_setup and MIGRATIONS_AFTER_SETUP or []))
for mig in migrations:
try:
if mig(session):
cnt += 1
else:
err += 1
except:
self._ignore_exception()
err += 1
self._background_save(config=True)
return self._success(_('Performed %d migrations, failed %d.'
) % (cnt, err))
_plugins.register_commands(Migrate)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.