repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
hubert667/AIR | build/celery/build/lib.linux-i686-2.7/celery/worker/consumer.py | 2 | 26417 | # -*- coding: utf-8 -*-
"""
celery.worker.consumer
~~~~~~~~~~~~~~~~~~~~~~
This module contains the components responsible for consuming messages
from the broker, processing the messages and keeping the broker connections
up and running.
"""
from __future__ import absolute_import
import errno
import kombu
import logging
import os
import socket
from collections import defaultdict
from functools import partial
from heapq import heappush
from operator import itemgetter
from time import sleep
from billiard.common import restart_state
from billiard.exceptions import RestartFreqExceeded
from kombu.async.semaphore import DummyLock
from kombu.common import QoS, ignore_errors
from kombu.syn import _detect_environment
from kombu.utils.compat import get_errno
from kombu.utils.encoding import safe_repr, bytes_t
from kombu.utils.limits import TokenBucket
from celery import bootsteps
from celery.app.trace import build_tracer
from celery.canvas import signature
from celery.exceptions import InvalidTaskError
from celery.five import items, values
from celery.utils.functional import noop
from celery.utils.log import get_logger
from celery.utils.text import truncate
from celery.utils.timeutils import humanize_seconds, rate
from . import heartbeat, loops, pidbox
from .state import task_reserved, maybe_shutdown, revoked, reserved_requests
try:
buffer_t = buffer
except NameError: # pragma: no cover
# Py3 does not have buffer, but we only need isinstance.
class buffer_t(object): # noqa
pass
__all__ = [
'Consumer', 'Connection', 'Events', 'Heart', 'Control',
'Tasks', 'Evloop', 'Agent', 'Mingle', 'Gossip', 'dump_body',
]
CLOSE = bootsteps.CLOSE
logger = get_logger(__name__)
debug, info, warn, error, crit = (logger.debug, logger.info, logger.warning,
logger.error, logger.critical)
CONNECTION_RETRY = """\
consumer: Connection to broker lost. \
Trying to re-establish the connection...\
"""
CONNECTION_RETRY_STEP = """\
Trying again {when}...\
"""
CONNECTION_ERROR = """\
consumer: Cannot connect to %s: %s.
%s
"""
CONNECTION_FAILOVER = """\
Will retry using next failover.\
"""
UNKNOWN_FORMAT = """\
Received and deleted unknown message. Wrong destination?!?
The full contents of the message body was: %s
"""
#: Error message for when an unregistered task is received.
UNKNOWN_TASK_ERROR = """\
Received unregistered task of type %s.
The message has been ignored and discarded.
Did you remember to import the module containing this task?
Or maybe you are using relative imports?
Please see http://bit.ly/gLye1c for more information.
The full contents of the message body was:
%s
"""
#: Error message for when an invalid task message is received.
INVALID_TASK_ERROR = """\
Received invalid task message: %s
The message has been ignored and discarded.
Please ensure your message conforms to the task
message protocol as described here: http://bit.ly/hYj41y
The full contents of the message body was:
%s
"""
MESSAGE_REPORT = """\
body: {0} {{content_type:{1} content_encoding:{2} delivery_info:{3}}}\
"""
MINGLE_GET_FIELDS = itemgetter('clock', 'revoked')
def dump_body(m, body):
if isinstance(body, buffer_t):
body = bytes_t(body)
return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024),
len(m.body))
class Consumer(object):
Strategies = dict
#: set when consumer is shutting down.
in_shutdown = False
#: Optional callback called the first time the worker
#: is ready to receive tasks.
init_callback = None
#: The current worker pool instance.
pool = None
#: A timer used for high-priority internal tasks, such
#: as sending heartbeats.
timer = None
restart_count = -1 # first start is the same as a restart
class Blueprint(bootsteps.Blueprint):
name = 'Consumer'
default_steps = [
'celery.worker.consumer:Connection',
'celery.worker.consumer:Mingle',
'celery.worker.consumer:Events',
'celery.worker.consumer:Gossip',
'celery.worker.consumer:Heart',
'celery.worker.consumer:Control',
'celery.worker.consumer:Tasks',
'celery.worker.consumer:Evloop',
'celery.worker.consumer:Agent',
]
def shutdown(self, parent):
self.send_all(parent, 'shutdown')
def __init__(self, on_task_request,
init_callback=noop, hostname=None,
pool=None, app=None,
timer=None, controller=None, hub=None, amqheartbeat=None,
worker_options=None, disable_rate_limits=False,
initial_prefetch_count=2, prefetch_multiplier=1, **kwargs):
self.app = app
self.controller = controller
self.init_callback = init_callback
self.hostname = hostname or socket.gethostname()
self.pid = os.getpid()
self.pool = pool
self.timer = timer
self.strategies = self.Strategies()
conninfo = self.app.connection()
self.connection_errors = conninfo.connection_errors
self.channel_errors = conninfo.channel_errors
self._restart_state = restart_state(maxR=5, maxT=1)
self._does_info = logger.isEnabledFor(logging.INFO)
self.on_task_request = on_task_request
self.on_task_message = set()
self.amqheartbeat_rate = self.app.conf.BROKER_HEARTBEAT_CHECKRATE
self.disable_rate_limits = disable_rate_limits
self.initial_prefetch_count = initial_prefetch_count
self.prefetch_multiplier = prefetch_multiplier
# this contains a tokenbucket for each task type by name, used for
# rate limits, or None if rate limits are disabled for that task.
self.task_buckets = defaultdict(lambda: None)
self.reset_rate_limits()
self.hub = hub
if self.hub:
self.amqheartbeat = amqheartbeat
if self.amqheartbeat is None:
self.amqheartbeat = self.app.conf.BROKER_HEARTBEAT
else:
self.amqheartbeat = 0
if not hasattr(self, 'loop'):
self.loop = loops.asynloop if hub else loops.synloop
if _detect_environment() == 'gevent':
# there's a gevent bug that causes timeouts to not be reset,
# so if the connection timeout is exceeded once, it can NEVER
# connect again.
self.app.conf.BROKER_CONNECTION_TIMEOUT = None
self.steps = []
self.blueprint = self.Blueprint(
app=self.app, on_close=self.on_close,
)
self.blueprint.apply(self, **dict(worker_options or {}, **kwargs))
def bucket_for_task(self, type):
limit = rate(getattr(type, 'rate_limit', None))
return TokenBucket(limit, capacity=1) if limit else None
def reset_rate_limits(self):
self.task_buckets.update(
(n, self.bucket_for_task(t)) for n, t in items(self.app.tasks)
)
def _update_prefetch_count(self, index=0):
"""Update prefetch count after pool/shrink grow operations.
Index must be the change in number of processes as a postive
(increasing) or negative (decreasing) number.
.. note::
Currently pool grow operations will end up with an offset
of +1 if the initial size of the pool was 0 (e.g.
``--autoscale=1,0``).
"""
num_processes = self.pool.num_processes
if not self.initial_prefetch_count or not num_processes:
return # prefetch disabled
self.initial_prefetch_count = (
self.pool.num_processes * self.prefetch_multiplier
)
return self._update_qos_eventually(index)
def _update_qos_eventually(self, index):
return (self.qos.decrement_eventually if index < 0
else self.qos.increment_eventually)(
abs(index) * self.prefetch_multiplier)
def _limit_task(self, request, bucket, tokens):
if not bucket.can_consume(tokens):
hold = bucket.expected_time(tokens)
self.timer.call_after(
hold, self._limit_task, (request, bucket, tokens),
)
else:
task_reserved(request)
self.on_task_request(request)
def start(self):
blueprint, loop = self.blueprint, self.loop
while blueprint.state != CLOSE:
self.restart_count += 1
maybe_shutdown()
try:
blueprint.start(self)
except self.connection_errors as exc:
if isinstance(exc, OSError) and get_errno(exc) == errno.EMFILE:
raise # Too many open files
maybe_shutdown()
try:
self._restart_state.step()
except RestartFreqExceeded as exc:
crit('Frequent restarts detected: %r', exc, exc_info=1)
sleep(1)
if blueprint.state != CLOSE and self.connection:
warn(CONNECTION_RETRY, exc_info=True)
try:
self.connection.collect()
except Exception:
pass
self.on_close()
blueprint.restart(self)
def register_with_event_loop(self, hub):
self.blueprint.send_all(
self, 'register_with_event_loop', args=(hub, ),
description='Hub.register',
)
def shutdown(self):
self.in_shutdown = True
self.blueprint.shutdown(self)
def stop(self):
self.blueprint.stop(self)
def on_ready(self):
callback, self.init_callback = self.init_callback, None
if callback:
callback(self)
def loop_args(self):
return (self, self.connection, self.task_consumer,
self.blueprint, self.hub, self.qos, self.amqheartbeat,
self.app.clock, self.amqheartbeat_rate)
def on_decode_error(self, message, exc):
"""Callback called if an error occurs while decoding
a message received.
Simply logs the error and acknowledges the message so it
doesn't enter a loop.
:param message: The message with errors.
:param exc: The original exception instance.
"""
crit("Can't decode message body: %r (type:%r encoding:%r raw:%r')",
exc, message.content_type, message.content_encoding,
dump_body(message, message.body), exc_info=1)
message.ack()
def on_close(self):
# Clear internal queues to get rid of old messages.
# They can't be acked anyway, as a delivery tag is specific
# to the current channel.
if self.controller and self.controller.semaphore:
self.controller.semaphore.clear()
if self.timer:
self.timer.clear()
reserved_requests.clear()
if self.pool and self.pool.flush:
self.pool.flush()
def connect(self):
"""Establish the broker connection.
Will retry establishing the connection if the
:setting:`BROKER_CONNECTION_RETRY` setting is enabled
"""
conn = self.app.connection(heartbeat=self.amqheartbeat)
# Callback called for each retry while the connection
# can't be established.
def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP):
if getattr(conn, 'alt', None) and interval == 0:
next_step = CONNECTION_FAILOVER
error(CONNECTION_ERROR, conn.as_uri(), exc,
next_step.format(when=humanize_seconds(interval, 'in', ' ')))
# remember that the connection is lazy, it won't establish
# until needed.
if not self.app.conf.BROKER_CONNECTION_RETRY:
# retry disabled, just call connect directly.
conn.connect()
return conn
conn = conn.ensure_connection(
_error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES,
callback=maybe_shutdown,
)
if self.hub:
conn.transport.register_with_event_loop(conn.connection, self.hub)
return conn
def add_task_queue(self, queue, exchange=None, exchange_type=None,
routing_key=None, **options):
cset = self.task_consumer
queues = self.app.amqp.queues
# Must use in' here, as __missing__ will automatically
# create queues when CELERY_CREATE_MISSING_QUEUES is enabled.
# (Issue #1079)
if queue in queues:
q = queues[queue]
else:
exchange = queue if exchange is None else exchange
exchange_type = ('direct' if exchange_type is None
else exchange_type)
q = queues.select_add(queue,
exchange=exchange,
exchange_type=exchange_type,
routing_key=routing_key, **options)
if not cset.consuming_from(queue):
cset.add_queue(q)
cset.consume()
info('Started consuming from %s', queue)
def cancel_task_queue(self, queue):
info('Cancelling queue %s', queue)
self.app.amqp.queues.deselect(queue)
self.task_consumer.cancel_by_queue(queue)
def apply_eta_task(self, task):
"""Method called by the timer to apply a task with an
ETA/countdown."""
task_reserved(task)
self.on_task_request(task)
self.qos.decrement_eventually()
def _message_report(self, body, message):
return MESSAGE_REPORT.format(dump_body(message, body),
safe_repr(message.content_type),
safe_repr(message.content_encoding),
safe_repr(message.delivery_info))
def on_unknown_message(self, body, message):
warn(UNKNOWN_FORMAT, self._message_report(body, message))
message.reject_log_error(logger, self.connection_errors)
def on_unknown_task(self, body, message, exc):
error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), exc_info=True)
message.reject_log_error(logger, self.connection_errors)
def on_invalid_task(self, body, message, exc):
error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True)
message.reject_log_error(logger, self.connection_errors)
def update_strategies(self):
loader = self.app.loader
for name, task in items(self.app.tasks):
self.strategies[name] = task.start_strategy(self.app, self)
task.__trace__ = build_tracer(name, task, loader, self.hostname,
app=self.app)
def create_task_handler(self):
strategies = self.strategies
on_unknown_message = self.on_unknown_message
on_unknown_task = self.on_unknown_task
on_invalid_task = self.on_invalid_task
callbacks = self.on_task_message
def on_task_received(body, message):
try:
name = body['task']
except (KeyError, TypeError):
return on_unknown_message(body, message)
try:
strategies[name](message, body,
message.ack_log_error,
message.reject_log_error,
callbacks)
except KeyError as exc:
on_unknown_task(body, message, exc)
except InvalidTaskError as exc:
on_invalid_task(body, message, exc)
return on_task_received
def __repr__(self):
return '<Consumer: {self.hostname} ({state})>'.format(
self=self, state=self.blueprint.human_state(),
)
class Connection(bootsteps.StartStopStep):
def __init__(self, c, **kwargs):
c.connection = None
def start(self, c):
c.connection = c.connect()
info('Connected to %s', c.connection.as_uri())
def shutdown(self, c):
# We must set self.connection to None here, so
# that the green pidbox thread exits.
connection, c.connection = c.connection, None
if connection:
ignore_errors(connection, connection.close)
def info(self, c, params='N/A'):
if c.connection:
params = c.connection.info()
params.pop('password', None) # don't send password.
return {'broker': params}
class Events(bootsteps.StartStopStep):
requires = (Connection, )
def __init__(self, c, send_events=None, **kwargs):
self.send_events = True
self.groups = None if send_events else ['worker']
c.event_dispatcher = None
def start(self, c):
# flush events sent while connection was down.
prev = self._close(c)
dis = c.event_dispatcher = c.app.events.Dispatcher(
c.connect(), hostname=c.hostname,
enabled=self.send_events, groups=self.groups,
)
if prev:
dis.extend_buffer(prev)
dis.flush()
def stop(self, c):
pass
def _close(self, c):
if c.event_dispatcher:
dispatcher = c.event_dispatcher
# remember changes from remote control commands:
self.groups = dispatcher.groups
# close custom connection
if dispatcher.connection:
ignore_errors(c, dispatcher.connection.close)
ignore_errors(c, dispatcher.close)
c.event_dispatcher = None
return dispatcher
def shutdown(self, c):
self._close(c)
class Heart(bootsteps.StartStopStep):
requires = (Events, )
def __init__(self, c, without_heartbeat=False, **kwargs):
self.enabled = not without_heartbeat
c.heart = None
def start(self, c):
c.heart = heartbeat.Heart(c.timer, c.event_dispatcher)
c.heart.start()
def stop(self, c):
c.heart = c.heart and c.heart.stop()
shutdown = stop
class Mingle(bootsteps.StartStopStep):
label = 'Mingle'
requires = (Events, )
compatible_transports = set(['amqp', 'redis'])
def __init__(self, c, without_mingle=False, **kwargs):
self.enabled = not without_mingle and self.compatible_transport(c.app)
def compatible_transport(self, app):
with app.connection() as conn:
return conn.transport.driver_type in self.compatible_transports
def start(self, c):
info('mingle: searching for neighbors')
I = c.app.control.inspect(timeout=1.0, connection=c.connection)
replies = I.hello(c.hostname, revoked._data) or {}
replies.pop(c.hostname, None)
if replies:
info('mingle: sync with %s nodes',
len([reply for reply, value in items(replies) if value]))
for reply in values(replies):
if reply:
try:
other_clock, other_revoked = MINGLE_GET_FIELDS(reply)
except KeyError: # reply from pre-3.1 worker
pass
else:
c.app.clock.adjust(other_clock)
revoked.update(other_revoked)
info('mingle: sync complete')
else:
info('mingle: all alone')
class Tasks(bootsteps.StartStopStep):
requires = (Mingle, )
def __init__(self, c, **kwargs):
c.task_consumer = c.qos = None
def start(self, c):
c.update_strategies()
c.task_consumer = c.app.amqp.TaskConsumer(
c.connection, on_decode_error=c.on_decode_error,
)
c.qos = QoS(c.task_consumer.qos, c.initial_prefetch_count)
c.qos.update() # set initial prefetch count
def stop(self, c):
if c.task_consumer:
debug('Cancelling task consumer...')
ignore_errors(c, c.task_consumer.cancel)
def shutdown(self, c):
if c.task_consumer:
self.stop(c)
debug('Closing consumer channel...')
ignore_errors(c, c.task_consumer.close)
c.task_consumer = None
def info(self, c):
return {'prefetch_count': c.qos.value if c.qos else 'N/A'}
class Agent(bootsteps.StartStopStep):
conditional = True
requires = (Connection, )
def __init__(self, c, **kwargs):
self.agent_cls = self.enabled = c.app.conf.CELERYD_AGENT
def create(self, c):
agent = c.agent = self.instantiate(self.agent_cls, c.connection)
return agent
class Control(bootsteps.StartStopStep):
requires = (Tasks, )
def __init__(self, c, **kwargs):
self.is_green = c.pool is not None and c.pool.is_green
self.box = (pidbox.gPidbox if self.is_green else pidbox.Pidbox)(c)
self.start = self.box.start
self.stop = self.box.stop
self.shutdown = self.box.shutdown
def include_if(self, c):
return c.app.conf.CELERY_ENABLE_REMOTE_CONTROL
class Gossip(bootsteps.ConsumerStep):
label = 'Gossip'
requires = (Mingle, )
_cons_stamp_fields = itemgetter(
'id', 'clock', 'hostname', 'pid', 'topic', 'action', 'cver',
)
compatible_transports = set(['amqp', 'redis'])
def __init__(self, c, without_gossip=False, interval=5.0, **kwargs):
self.enabled = not without_gossip and self.compatible_transport(c.app)
self.app = c.app
c.gossip = self
self.Receiver = c.app.events.Receiver
self.hostname = c.hostname
self.full_hostname = '.'.join([self.hostname, str(c.pid)])
self.timer = c.timer
if self.enabled:
self.state = c.app.events.State(
on_node_join=self.on_node_join,
on_node_leave=self.on_node_leave,
)
if c.hub:
c._mutex = DummyLock()
self.update_state = self.state.event
self.interval = interval
self._tref = None
self.consensus_requests = defaultdict(list)
self.consensus_replies = {}
self.event_handlers = {
'worker.elect': self.on_elect,
'worker.elect.ack': self.on_elect_ack,
}
self.clock = c.app.clock
self.election_handlers = {
'task': self.call_task
}
def compatible_transport(self, app):
with app.connection() as conn:
return conn.transport.driver_type in self.compatible_transports
def election(self, id, topic, action=None):
self.consensus_replies[id] = []
self.dispatcher.send(
'worker-elect',
id=id, topic=topic, action=action, cver=1,
)
def call_task(self, task):
try:
signature(task, app=self.app).apply_async()
except Exception as exc:
error('Could not call task: %r', exc, exc_info=1)
def on_elect(self, event):
try:
(id_, clock, hostname, pid,
topic, action, _) = self._cons_stamp_fields(event)
except KeyError as exc:
return error('election request missing field %s', exc, exc_info=1)
heappush(
self.consensus_requests[id_],
(clock, '%s.%s' % (hostname, pid), topic, action),
)
self.dispatcher.send('worker-elect-ack', id=id_)
def start(self, c):
super(Gossip, self).start(c)
self.dispatcher = c.event_dispatcher
def on_elect_ack(self, event):
id = event['id']
try:
replies = self.consensus_replies[id]
except KeyError:
return # not for us
alive_workers = self.state.alive_workers()
replies.append(event['hostname'])
if len(replies) >= len(alive_workers):
_, leader, topic, action = self.clock.sort_heap(
self.consensus_requests[id],
)
if leader == self.full_hostname:
info('I won the election %r', id)
try:
handler = self.election_handlers[topic]
except KeyError:
error('Unknown election topic %r', topic, exc_info=1)
else:
handler(action)
else:
info('node %s elected for %r', leader, id)
self.consensus_requests.pop(id, None)
self.consensus_replies.pop(id, None)
def on_node_join(self, worker):
debug('%s joined the party', worker.hostname)
def on_node_leave(self, worker):
debug('%s left', worker.hostname)
def on_node_lost(self, worker):
info('missed heartbeat from %s', worker.hostname)
def register_timer(self):
if self._tref is not None:
self._tref.cancel()
self._tref = self.timer.call_repeatedly(self.interval, self.periodic)
def periodic(self):
workers = self.state.workers
dirty = set()
for worker in values(workers):
if not worker.alive:
dirty.add(worker)
self.on_node_lost(worker)
for worker in dirty:
workers.pop(worker.hostname, None)
def get_consumers(self, channel):
self.register_timer()
ev = self.Receiver(channel, routing_key='worker.#')
return [kombu.Consumer(
channel,
queues=[ev.queue],
on_message=partial(self.on_message, ev.event_from_message),
no_ack=True
)]
def on_message(self, prepare, message):
_type = message.delivery_info['routing_key']
try:
handler = self.event_handlers[_type]
except KeyError:
pass
else:
return handler(message.payload)
hostname = (message.headers.get('hostname') or
message.payload['hostname'])
if hostname != self.hostname:
type, event = prepare(message.payload)
obj, subject = self.update_state(event)
else:
self.clock.forward()
class Evloop(bootsteps.StartStopStep):
label = 'event loop'
last = True
def start(self, c):
self.patch_all(c)
c.loop(*c.loop_args())
def patch_all(self, c):
c.qos._mutex = DummyLock()
| gpl-3.0 |
omar-AM/python-pgp | pgp/test_keys.py | 2 | 23039 | # python-pgp A Python OpenPGP implementation
# Copyright (C) 2014 Richard Mitchell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utility functions for creating keys and packets for test purposes.
To test all our code branches, we need to create some quite deformed
keys.
"""
import math
import time
from Crypto import Random
from Crypto.Hash import MD5
from Crypto.Hash import SHA
from Crypto.PublicKey import DSA
from Crypto.PublicKey import ElGamal
from Crypto.PublicKey import RSA
from Crypto.Util.number import bytes_to_long
from pgp import utils
TEST_ELG_K = bytes_to_long(
b'+|\xb2\x9e\xd5\x83o\xd0\xa4.D\xbfy\x1d\xa6\xeax\xe7\x1b\xc1\x92\x81)w'
b'\xf0\xa0 \x00\xdd7\xebH\r\xe2`G\x9d\x10Z\xb4*\xf3+w\x03\xa7\xa9tP\xaa'
b'\xc7\x1f\x11\xc5\x1f5`\x80\xec\x9c\xbf\xfeg\xc8\xd7|4\xd3\xd3\xdb\n\xd5'
b'J\xf7:\x8a\x99\x11\x9ciN\x8d\xbaV\xab\x9d\x8e;\xf4\xaa\xad\xd2\xd5H\xc1'
b'\x91v\x0c\xe1\x10\x959\x00BD\xf6\xa4\x02\xa4\xbb-\xad+x\xa21\xc6\x82'
b'\xfesQ\xce\xbb\xe4W\x02\xea\xdd'
)
"""This is a precalculated value of K for ElGamal signing when we don't
care about security - like in testing. Since the minimum ELG size is
1024, we can use this for all key sizes.
"""
def make_key_objects(pub_algorithm_type, key_size):
if pub_algorithm_type == 17:
secret_key = DSA.generate(key_size)
elif pub_algorithm_type in (1, 3):
secret_key = RSA.generate(key_size)
elif pub_algorithm_type == 20:
# TODO: This should not be allowed except for testing purposes.
# XXX: This can take a really long time
secret_key = ElGamal.generate(key_size, Random.new().read)
else:
# TODO: complete
raise ValueError
public_key = secret_key.publickey()
return secret_key, public_key
def make_key(user_id, pub_algorithm_type, key_size):
"""Returns a tuple of a bytearray representing the transferrable
public key and the secret key object.
"""
secret_key, public_key = make_key_objects(pub_algorithm_type, key_size)
packets = []
public_key_packet = make_public_key_packet(4, int(time.time()), 0,
public_key, pub_algorithm_type)
packets.append(public_key_packet)
user_id_packet = make_user_id_packet(4, user_id)
packets.append(user_id_packet)
user_id_selfsig_subpackets = [
make_creation_time_subpacket(int(time.time())),
make_expiration_time_subpacket(0),
make_issuer_key_subpacket(public_key_packet['key_id']),
]
user_id_selfsig = make_signature_packet(
secret_key, public_key_packet, user_id_packet, 4,
0x10, pub_algorithm_type, 8,
subpackets=user_id_selfsig_subpackets)
packets.append(user_id_selfsig)
result = bytearray()
for packet in packets:
result.extend(packet_to_bytes(packet))
return result, secret_key
def make_signature_subpacket(type_, data, critical=False, hashed=True):
return {'type': type_,
'critical': critical,
'hashed': hashed,
'data': data}
def make_creation_time_subpacket(t, critical=False, hashed=True):
data = utils.int_to_4byte(t)
return make_signature_subpacket(2, data, critical, hashed)
def make_expiration_time_subpacket(t, critical=False, hashed=True):
data = utils.int_to_4byte(t)
return make_signature_subpacket(3, data, critical, hashed)
def make_exportable_subpacket(bool_, critical=False, hashed=True):
data = bytearray([int(bool_)])
return make_signature_subpacket(4, data, critical, hashed)
def make_trust_signature_subpacket(depth, amount, critical=False,
hashed=True):
data = bytearray([depth, amount])
return make_signature_subpacket(5, data, critical, hashed)
def make_regex_subpacket(regex, critical=False, hashed=True):
data = bytearray(regex.encode('ascii'))
data.append(0x00)
return make_signature_subpacket(6, data, critical, hashed)
def make_revocable_subpacket(bool_, critical=False, hashed=True):
data = bytearray([int(bool_)])
return make_signature_subpacket(7, data, critical, hashed)
def make_key_expiration_time_subpacket(t, critical=False, hashed=True):
data = utils.int_to_4byte(t)
return make_signature_subpacket(9, data, critical, hashed)
def make_preferred_sym_algorithms_subpacket(types, critical=False,
hashed=True):
data = bytearray(types)
return make_signature_subpacket(11, data, critical, hashed)
def make_revocation_key_subpacket(fingerprint, pub_algorithm_type,
sensitive=False, critical=False,
hashed=True):
data = bytearray([0x80 + (0x40 if sensitive else 0x00),
pub_algorithm_type])
data.extend(utils.hex_to_bytes(fingerprint, 20))
return make_signature_subpacket(12, data, critical, hashed)
def make_issuer_key_subpacket(key_id, critical=False, hashed=True):
data = utils.hex_to_bytes(key_id, 8)
return make_signature_subpacket(16, data, critical, hashed)
def make_notation_subpacket(namespace, name, value, is_text, critical=False,
hashed=True):
data = bytearray([
0x80 if is_text else 0x00,
0x00,
0x00,
0x00
])
name_with_namespace = u'{0}@{1}'.format(name, namespace)
value_bytes = value
if is_text:
value_bytes = value.encode('utf8')
name_with_namespace_bytes = name_with_namespace.encode('utf8')
data.extend(utils.int_to_2byte(len(name_with_namespace_bytes)))
data.extend(utils.int_to_2byte(len(value_bytes)))
data.extend(bytearray(name_with_namespace_bytes))
data.extend(bytearray(value_bytes))
return make_signature_subpacket(20, data, critical, hashed)
def make_preferred_hash_algorithms_subpacket(types, critical=False,
hashed=True):
data = bytearray(types)
return make_signature_subpacket(21, data, critical, hashed)
def make_preferred_compression_algorithms_subpacket(types, critical=False,
hashed=True):
data = bytearray(types)
return make_signature_subpacket(22, data, critical, hashed)
def make_key_server_prefs_subpacket(no_modify, critical=False, hashed=True):
data = bytearray([0x80 if no_modify else 0x00])
return make_signature_subpacket(23, data, critical, hashed)
def make_preferred_key_server_subpacket(uri, critical=False, hashed=True):
data = bytearray(uri.encode('utf8'))
return make_signature_subpacket(24, data, critical, hashed)
def make_primary_user_id_subpacket(primary, critical=False, hashed=True):
data = bytearray([int(primary)])
return make_signature_subpacket(25, data, critical, hashed)
def make_policy_uri_subpacket(uri, critical=False, hashed=True):
data = bytearray(uri.encode('utf8'))
return make_signature_subpacket(26, data, critical, hashed)
def make_flags_subpacket(may_certify, may_sign, may_encrypt_comms,
may_encrypt_storage, may_have_been_split,
may_be_used_for_auth, may_be_shared, critical=False,
hashed=True):
data = bytearray([
(0x01 if may_certify else 0x00) +
(0x02 if may_sign else 0x00) +
(0x04 if may_encrypt_comms else 0x00) +
(0x08 if may_encrypt_storage else 0x00) +
(0x10 if may_have_been_split else 0x00) +
(0x20 if may_be_used_for_auth else 0x00) +
(0x80 if may_be_shared else 0x00)
])
return make_signature_subpacket(27, data, critical, hashed)
def make_user_id_subpacket(user_id, critical=False, hashed=True):
data = bytearray(user_id.encode('utf8'))
return make_signature_subpacket(28, data, critical, hashed)
def make_revocation_reason_subpacket(revocation_code, revocation_string,
critical=False, hashed=True):
data = bytearray([revocation_code])
data.extend(revocation_string.encode('utf8'))
return make_signature_subpacket(29, data, critical, hashed)
def make_features_subpacket(supports_modification_detection, critical=False,
hashed=True):
data = bytearray([
0x01 if supports_modification_detection else 0x00
])
return make_signature_subpacket(30, data, critical, hashed)
def make_target_subpacket(pub_algorithm_type, hash_algorithm_type, digest,
critical=False, hashed=True):
data = bytearray([pub_algorithm_type, hash_algorithm_type])
data.extend(digest)
return make_signature_subpacket(31, data, critical, hashed)
def make_embedded_signature_subpacket(signature, critical=False, hashed=True):
data = signature_to_bytes(signature)
return make_signature_subpacket(32, data, critical, hashed)
def make_signature_packet(secret_key, public_key_packet, packet, version,
type_, pub_algorithm_type, hash_algorithm_type,
creation_time=None, expiration_time=None,
key_id=None, subpackets=None):
subpackets = subpackets or []
signature = {
'type': 2,
'version': version,
'sig_version': version,
'sig_type': type_,
'pub_algorithm_type': pub_algorithm_type,
'hash_algorithm_type': hash_algorithm_type,
'subpackets': subpackets
}
hashed_subpacket_data = bytearray()
if version in (2, 3):
if None in (creation_time, key_id):
raise TypeError(
'Creation time and key ID must be provided for version 3 '
'signatures.')
signature['creation_time'] = creation_time
signature['key_id'] = key_id
elif version >= 4:
if (creation_time, key_id) != (None, None):
raise TypeError(
'Version 4 signatures must store creation time and key '
'ID in subpackets.')
for subpacket in subpackets:
if subpacket['hashed']:
hashed_subpacket_data.extend(subpacket_to_bytes(subpacket))
hash_ = utils.hash_packet_for_signature(
packet_to_content_bytes(public_key_packet),
packet['type'],
packet_to_content_bytes(packet),
type_,
version,
hash_algorithm_type,
creation_time,
pub_algorithm_type,
hashed_subpacket_data
)
digest = bytearray(hash_.digest())
signature['hash2'] = digest[:2]
k = None
if pub_algorithm_type == 20:
k = TEST_ELG_K
values = utils.sign_hash(pub_algorithm_type, secret_key, hash_, k=k)
signature['values'] = values
return signature
def subpacket_to_bytes(subpacket):
data_len = len(subpacket['data']) + 1 # For the type
result = bytearray()
packet_length_bytes, _ = utils.new_packet_length_to_bytes(data_len, False)
result.extend(packet_length_bytes)
raw = subpacket['type'] + (0x80 if subpacket['critical'] else 0x00)
result.append(raw)
result.extend(subpacket['data'])
return result
def signature_to_bytes(signature):
result = bytearray()
sig_version = signature['sig_version']
result.append(sig_version)
if sig_version >= 4:
result.append(signature['sig_type'])
result.append(signature['pub_algorithm_type'])
result.append(signature['hash_algorithm_type'])
hashed_subpacket_data = bytearray()
unhashed_subpacket_data = bytearray()
for sp in signature['subpackets']:
subpacket_data = subpacket_to_bytes(sp)
if sp['hashed']:
hashed_subpacket_data.extend(subpacket_data)
else:
unhashed_subpacket_data.extend(subpacket_data)
result.extend(utils.int_to_2byte(len(hashed_subpacket_data)))
result.extend(hashed_subpacket_data)
result.extend(utils.int_to_2byte(len(unhashed_subpacket_data)))
result.extend(unhashed_subpacket_data)
elif sig_version in (2, 3):
result.append(0x05)
result.append(signature['sig_type'])
result.extend(utils.int_to_4byte(signature['creation_time']))
result.extend(signature['key_id'])
result.append(signature['pub_algorithm_type'])
result.append(signature['hash_algorithm_type'])
result.extend(signature['hash2'])
for value in signature['values']:
if value is None:
continue
result.extend(utils.int_to_mpi(value))
return result
def make_fingerprint(pubkey):
# Derived from 'python-pgpdump'.
# https://github.com/toofishes/python-pgpdump
# Copyright (C) 2011-2014, Dan McGee.
# All rights reserved.
#
# Derived from 'pgpdump'. http://www.mew.org/~kazu/proj/pgpdump/
# Copyright (C) 1998, Kazuhiko Yamamoto.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the author nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
if pubkey['version'] < 4:
md5 = MD5.new()
# Key type must be RSA for v2 and v3 public keys
if pubkey['pub_algorithm_type'] in (1, 2, 3):
key_id = ('%X' % pubkey['modulus'])[-8:].zfill(8)
pubkey['key_id'] = key_id.encode('ascii')
md5.update(utils.int_to_bytes(pubkey['modulus']))
md5.update(utils.int_to_bytes(pubkey['exponent']))
elif pubkey['pub_algorithm_type'] == 16:
# Of course, there are ELG keys in the wild too. This formula
# for calculating key_id and fingerprint is derived from an old
# key and there is a test case based on it.
key_id = ('%X' % pubkey['prime'])[-8:].zfill(8)
pubkey['key_id'] = key_id.encode('ascii')
md5.update(utils.int_to_bytes(pubkey['prime']))
md5.update(utils.int_to_bytes(pubkey['group_gen']))
fingerprint = md5.hexdigest().upper().encode('ascii')
elif pubkey['version'] >= 4:
sha1 = SHA.new()
# TODO this is the same as hash_key
pubkey_data = public_key_to_bytes(pubkey)
pubkey_length = len(pubkey_data)
seed_bytes = (0x99, (pubkey_length >> 8) & 0xff, pubkey_length & 0xff)
sha1.update(bytearray(seed_bytes))
sha1.update(pubkey_data)
fingerprint = sha1.hexdigest().upper().encode('ascii')
return fingerprint
def make_public_key_packet(version, creation_time, expiration_days,
public_key, pub_algorithm_type, instance=None):
pubkey = instance or {
'type': 6,
'version': version,
}
pubkey['creation_time'] = creation_time
pubkey['pub_algorithm_type'] = pub_algorithm_type
if version in (2, 3):
pubkey['expiration_days'] = expiration_days
if pub_algorithm_type not in (1, 2, 3):
raise ValueError(('Invalid algorithm type for version {0} '
'public key').format(version))
if pub_algorithm_type in (1, 2, 3):
pubkey['modulus'] = public_key.n
pubkey['bitlen'] = \
int(math.ceil(math.log(pubkey['modulus'], 2)))
pubkey['exponent'] = public_key.e
elif pub_algorithm_type == 17:
pubkey['prime'] = public_key.p
pubkey['bitlen'] = \
int(math.ceil(math.log(pubkey['prime'], 2)))
pubkey['group_order'] = public_key.q
pubkey['group_gen'] = public_key.g
pubkey['key_value'] = public_key.y
elif pub_algorithm_type in (16, 20):
pubkey['prime'] = public_key.p
pubkey['bitlen'] = \
int(math.ceil(math.log(pubkey['prime'], 2)))
pubkey['group_gen'] = public_key.g
pubkey['key_value'] = public_key.y
pubkey['fingerprint'] = make_fingerprint(pubkey)
pubkey['key_id'] = pubkey['fingerprint'][-16:]
return pubkey
def public_key_to_bytes(pubkey):
result = bytearray([pubkey['version']])
result.extend(utils.int_to_4byte(pubkey['creation_time']))
if pubkey['version'] in (2, 3):
result.extend(utils.int_to_2byte(pubkey['expiration_days']))
result.append(pubkey['pub_algorithm_type'])
if pubkey['pub_algorithm_type'] in (1, 2, 3):
result.extend(utils.int_to_mpi(pubkey['modulus']))
result.extend(utils.int_to_mpi(pubkey['exponent']))
elif pubkey['pub_algorithm_type'] == 17:
result.extend(utils.int_to_mpi(pubkey['prime']))
result.extend(utils.int_to_mpi(pubkey['group_order']))
result.extend(utils.int_to_mpi(pubkey['group_gen']))
result.extend(utils.int_to_mpi(pubkey['key_value']))
elif pubkey['pub_algorithm_type'] in (16, 20):
result.extend(utils.int_to_mpi(pubkey['prime']))
result.extend(utils.int_to_mpi(pubkey['group_gen']))
result.extend(utils.int_to_mpi(pubkey['key_value']))
return result
def make_public_subkey_packet(version, creation_time, expiration_days,
public_key, pub_algorithm_type):
subkey = {
'type': 14,
'version': version,
}
return make_public_key_packet(version, creation_time, expiration_days,
public_key, pub_algorithm_type,
instance=subkey)
def public_subkey_to_bytes(packet):
return public_key_to_bytes(packet)
def packet_content_to_packet_bytes(version, type_, data):
data_length = len(data)
packet_type = type_
tag = 0x80
result = bytearray()
if version >= 4:
# "An implementation MAY use Partial Body Lengths for data packets, be
# they literal, compressed, or encrypted."
remaining = data_length
offset = 0
while remaining:
allow_partial = type_ in (8, 9, 11, 18)
tag += 0x40 + packet_type
result = bytearray([tag])
packet_length_bytes, remaining = utils.new_packet_length_to_bytes(
data_length,
allow_partial)
result.extend(packet_length_bytes)
result.extend(data[offset:-remaining])
offset = data_length - remaining
else:
tag += (packet_type << 2)
result = bytearray([tag])
result.extend(utils.old_packet_length_to_bytes(data_length))
result.extend(data)
return result
def make_user_id_packet(version, user_id):
packet = {
'type': 13,
'version': version,
}
packet['user'] = user_id
return packet
def user_id_to_bytes(packet):
return bytearray(packet['user'].encode('utf8'))
def make_user_attribute_subpacket(image_data):
subpacket = {}
subpacket['subtype'] = 1
subpacket['header_version'] = 1
subpacket['header_length'] = 16
subpacket['image_format'] = 1
subpacket['image_data'] = image_data
return subpacket
def make_user_attribute_packet(version, subpackets):
packet = {
'type': 17,
'version': version,
'subpackets': subpackets,
}
return packet
def user_attribute_subpacket_to_bytes(subpacket):
# 0x10, 0x00 is the header length, little-endian
result = bytearray([
subpacket['header_length'] & 0xff,
(subpacket['header_length'] >> 8) & 0xff,
subpacket['header_version'],
subpacket['image_format'],
])
result.extend([0] * (subpacket['header_length'] - 4))
result.extend(subpacket['image_data'])
return result
def user_attribute_to_bytes(packet):
result = bytearray()
for subpacket in packet['subpackets']:
sub_data = user_attribute_subpacket_to_bytes(subpacket)
packet_length_bytes, _ = utils.new_packet_length_to_bytes(
len(sub_data),
allow_partial=False)
result.extend(packet_length_bytes)
result.append(subpacket['subtype'])
result.extend(sub_data)
return result
def packet_to_content_bytes(packet):
if packet is None:
return None
if packet['type'] == 2:
content = signature_to_bytes(packet)
elif packet['type'] == 6:
content = public_key_to_bytes(packet)
elif packet['type'] == 13:
content = user_id_to_bytes(packet)
elif packet['type'] == 14:
content = public_subkey_to_bytes(packet)
elif packet['type'] == 17:
content = user_attribute_to_bytes(packet)
return content
def packet_to_bytes(packet):
content = packet_to_content_bytes(packet)
return packet_content_to_packet_bytes(
packet['version'],
packet['type'],
content
)
| gpl-3.0 |
Jannes123/django-oscar | src/oscar/apps/checkout/utils.py | 37 | 7910 | class CheckoutSessionData(object):
"""
Responsible for marshalling all the checkout session data
Multi-stage checkouts often require several forms to be submitted and their
data persisted until the final order is placed. This class helps store and
organise checkout form data until it is required to write out the final
order.
"""
SESSION_KEY = 'checkout_data'
def __init__(self, request):
self.request = request
if self.SESSION_KEY not in self.request.session:
self.request.session[self.SESSION_KEY] = {}
def _check_namespace(self, namespace):
"""
Ensure a namespace within the session dict is initialised
"""
if namespace not in self.request.session[self.SESSION_KEY]:
self.request.session[self.SESSION_KEY][namespace] = {}
def _get(self, namespace, key, default=None):
"""
Return a value from within a namespace
"""
self._check_namespace(namespace)
if key in self.request.session[self.SESSION_KEY][namespace]:
return self.request.session[self.SESSION_KEY][namespace][key]
return default
def _set(self, namespace, key, value):
"""
Set a namespaced value
"""
self._check_namespace(namespace)
self.request.session[self.SESSION_KEY][namespace][key] = value
self.request.session.modified = True
def _unset(self, namespace, key):
"""
Remove a namespaced value
"""
self._check_namespace(namespace)
if key in self.request.session[self.SESSION_KEY][namespace]:
del self.request.session[self.SESSION_KEY][namespace][key]
self.request.session.modified = True
def _flush_namespace(self, namespace):
"""
Flush a namespace
"""
self.request.session[self.SESSION_KEY][namespace] = {}
self.request.session.modified = True
def flush(self):
"""
Flush all session data
"""
self.request.session[self.SESSION_KEY] = {}
# Guest checkout
# ==============
def set_guest_email(self, email):
self._set('guest', 'email', email)
def get_guest_email(self):
return self._get('guest', 'email')
# Shipping address
# ================
# Options:
# 1. No shipping required (eg digital products)
# 2. Ship to new address (entered in a form)
# 3. Ship to an addressbook address (address chosen from list)
def reset_shipping_data(self):
self._flush_namespace('shipping')
def ship_to_user_address(self, address):
"""
Use an user address (from an address book) as the shipping address.
"""
self.reset_shipping_data()
self._set('shipping', 'user_address_id', address.id)
def ship_to_new_address(self, address_fields):
"""
Use a manually entered address as the shipping address
"""
self._unset('shipping', 'new_address_fields')
phone_number = address_fields.get('phone_number')
if phone_number:
# Phone number is stored as a PhoneNumber instance. As we store
# strings in the session, we need to serialize it.
address_fields = address_fields.copy()
address_fields['phone_number'] = phone_number.as_international
self._set('shipping', 'new_address_fields', address_fields)
def new_shipping_address_fields(self):
"""
Return shipping address fields
"""
return self._get('shipping', 'new_address_fields')
def shipping_user_address_id(self):
"""
Return user address id
"""
return self._get('shipping', 'user_address_id')
# Legacy accessor
user_address_id = shipping_user_address_id
def is_shipping_address_set(self):
"""
Test whether a shipping address has been stored in the session.
This can be from a new address or re-using an existing address.
"""
new_fields = self.new_shipping_address_fields()
has_new_address = new_fields is not None
user_address_id = self.shipping_user_address_id()
has_old_address = user_address_id is not None and user_address_id > 0
return has_new_address or has_old_address
# Shipping method
# ===============
def use_free_shipping(self):
"""
Set "free shipping" code to session
"""
self._set('shipping', 'method_code', '__free__')
def use_shipping_method(self, code):
"""
Set shipping method code to session
"""
self._set('shipping', 'method_code', code)
def shipping_method_code(self, basket):
"""
Return the shipping method code
"""
return self._get('shipping', 'method_code')
def is_shipping_method_set(self, basket):
"""
Test if a valid shipping method is stored in the session
"""
return self.shipping_method_code(basket) is not None
# Billing address fields
# ======================
#
# There are 3 common options:
# 1. Billing address is entered manually through a form
# 2. Billing address is selected from address book
# 3. Billing address is the same as the shipping address
def bill_to_new_address(self, address_fields):
"""
Store address fields for a billing address.
"""
self._flush_namespace('billing')
self._set('billing', 'new_address_fields', address_fields)
def bill_to_user_address(self, address):
"""
Set an address from a user's address book as the billing address
:address: The address object
"""
self._flush_namespace('billing')
self._set('billing', 'user_address_id', address.id)
def bill_to_shipping_address(self):
"""
Record fact that the billing address is to be the same as
the shipping address.
"""
self._flush_namespace('billing')
self._set('billing', 'billing_address_same_as_shipping', True)
# Legacy method name
billing_address_same_as_shipping = bill_to_shipping_address
def is_billing_address_same_as_shipping(self):
return self._get('billing', 'billing_address_same_as_shipping', False)
def billing_user_address_id(self):
"""
Return the ID of the user address being used for billing
"""
return self._get('billing', 'user_address_id')
def new_billing_address_fields(self):
"""
Return fields for a billing address
"""
return self._get('billing', 'new_address_fields')
def is_billing_address_set(self):
"""
Test whether a billing address has been stored in the session.
This can be from a new address or re-using an existing address.
"""
if self.is_billing_address_same_as_shipping():
return True
new_fields = self.new_billing_address_fields()
has_new_address = new_fields is not None
user_address_id = self.billing_user_address_id()
has_old_address = user_address_id is not None and user_address_id > 0
return has_new_address or has_old_address
# Payment methods
# ===============
def pay_by(self, method):
self._set('payment', 'method', method)
def payment_method(self):
return self._get('payment', 'method')
# Submission methods
# ==================
def set_order_number(self, order_number):
self._set('submission', 'order_number', order_number)
def get_order_number(self):
return self._get('submission', 'order_number')
def set_submitted_basket(self, basket):
self._set('submission', 'basket_id', basket.id)
def get_submitted_basket_id(self):
return self._get('submission', 'basket_id')
| bsd-3-clause |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/urbansim/datasets/plan_type_group_dataset.py | 2 | 1696 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.datasets.dataset import Dataset
from urbansim.datasets.resource_creator_plantypegroups import ResourceCreatorPlanTypeGroups
from opus_core.misc import DebugPrinter
class PlanTypeGroupDataset(Dataset):
"""Set of plan type groups."""
id_name_default = "group_id"
def __init__(self,
resources=None,
in_storage=None,
out_storage=None,
in_table_name=None,
out_table_name=None,
attributes=None,
id_name=None,
nchunks=None,
other_in_table_names=None,
debuglevel=0
):
debug = DebugPrinter(debuglevel)
debug.print_debug("Creating PlanTypeGroupDataset object.",2)
resources = ResourceCreatorPlanTypeGroups().get_resources_for_dataset(
resources = resources,
in_storage = in_storage,
out_storage = out_storage,
in_table_name = in_table_name,
out_table_name = out_table_name,
attributes = attributes,
id_name = id_name,
id_name_default = self.id_name_default,
nchunks = nchunks,
debug = debug,
)
Dataset.__init__(self,resources = resources)
if isinstance(other_in_table_names,list):
for place_name in other_in_table_names: #load other tables
ds = Dataset(resources = resources)
ds.load_dataset(in_table_name=place_name)
self.connect_datasets(ds)
| gpl-2.0 |
gsnedders/presto-testo | wpt/websockets/autobahn/oberstet-Autobahn-643d2ee/lib/python/autobahn/case/case9_5_3.py | 14 | 1112 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case9_5_1 import Case9_5_1
class Case9_5_3(Case9_5_1):
DESCRIPTION = """Send text message message with payload of length 1 * 2**20 (1M). Sent out data in chops of 256 octets."""
EXPECTATION = """Receive echo'ed text message (with payload as sent)."""
def setChopSize(self):
self.chopsize = 256
| bsd-3-clause |
tungvx/deploy | .google_appengine/lib/webapp2/tests/extras_protorpc_test.py | 3 | 6332 | # -*- coding: utf-8 -*-
import webapp2
import test_base
from protorpc import messages
from protorpc import remote
from protorpc.webapp import service_handlers
from webapp2_extras import protorpc
# Hello service ---------------------------------------------------------------
class HelloRequest(messages.Message):
my_name = messages.StringField(1, required=True)
class HelloResponse(messages.Message):
hello = messages.StringField(1, required=True)
class HelloService(remote.Service):
@remote.method(HelloRequest, HelloResponse)
def hello(self, request):
return HelloResponse(hello='Hello, %s!' %
request.my_name)
@remote.method(HelloRequest, HelloResponse)
def hello_error(self, request):
raise ValueError()
class AhoyService(remote.Service):
@remote.method(HelloRequest, HelloResponse)
def ahoy(self, request):
return HelloResponse(hello='Ahoy, %s!' %
request.my_name)
class HolaService(remote.Service):
@remote.method(HelloRequest, HelloResponse)
def hola(self, request):
return HelloResponse(hello='Hola, %s!' %
request.my_name)
service_mappings = protorpc.service_mapping([
('/hello', HelloService),
AhoyService,
])
app = webapp2.WSGIApplication(service_mappings)
service_mappings2 = protorpc.service_mapping({
'/hola': HolaService,
})
app2 = webapp2.WSGIApplication(service_mappings2)
# Tests -----------------------------------------------------------------------
class TestProtoRPC(test_base.BaseTestCase):
def test_example(self):
req = webapp2.Request.blank('/hello.hello')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, '{"hello": "Hello, bob!"}')
def test_run_services(self):
import os
os.environ['REQUEST_METHOD'] = 'POST'
os.environ['PATH_INFO'] = '/hello.hello'
protorpc.run_services([('/hello', HelloService)])
def test_ahoy(self):
req = webapp2.Request.blank('/extras_protorpc_test/AhoyService.ahoy')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, '{"hello": "Ahoy, bob!"}')
def test_hola(self):
req = webapp2.Request.blank('/hola.hola')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app2)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, '{"hello": "Hola, bob!"}')
def test_unrecognized_rpc_format(self):
# No content type
req = webapp2.Request.blank('/hello.hello')
req.method = 'POST'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 400)
# Invalid content type
req = webapp2.Request.blank('/hello.hello')
req.method = 'POST'
req.headers['Content-Type'] = 'text/xml'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 415)
# Bad request method
req = webapp2.Request.blank('/hello.hello')
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 405)
def test_invalid_method(self):
# Bad request method
req = webapp2.Request.blank('/hello.ahoy')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 400)
def test_invalid_json(self):
# Bad request method
req = webapp2.Request.blank('/hello.hello')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = '"my_name": "bob"'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 500)
def test_response_error(self):
# Bad request method
req = webapp2.Request.blank('/hello.hello_error')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 500)
def test_invalid_paths(self):
# Not starting with slash.
#self.assertRaises(ValueError, protorpc.service_mapping, [
# ('hello', HelloService),
#])
# Trailing slash.
self.assertRaises(ValueError, protorpc.service_mapping, [
('/hello/', HelloService),
])
# Double paths.
self.assertRaises(protorpc.service_handlers.ServiceConfigurationError,
protorpc.service_mapping, [
('/hello', HelloService),
('/hello', HelloService),
]
)
def test_lazy_services(self):
service_mappings = protorpc.service_mapping([
('/bonjour', 'resources.protorpc_services.BonjourService'),
'resources.protorpc_services.CiaoService',
])
app = webapp2.WSGIApplication(service_mappings)
# Bonjour
req = webapp2.Request.blank('/bonjour.bonjour')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, '{"hello": "Bonjour, bob!"}')
# Ciao
req = webapp2.Request.blank('/resources/protorpc_services/CiaoService.ciao')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = '{"my_name": "bob"}'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, '{"hello": "Ciao, bob!"}')
if __name__ == '__main__':
test_base.main()
| apache-2.0 |
miptliot/edx-platform | lms/djangoapps/instructor_task/tests/test_base.py | 3 | 16734 | """
Base test classes for LMS instructor-initiated background tasks
"""
import json
# pylint: disable=attribute-defined-outside-init
import os
import shutil
from tempfile import mkdtemp
from uuid import uuid4
import unicodecsv
from celery.states import FAILURE, SUCCESS
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from mock import Mock, patch
from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from courseware.model_data import StudentModule
from courseware.tests.tests import LoginEnrollmentTestCase
from lms.djangoapps.instructor_task.api_helper import encode_problem_and_student_input
from lms.djangoapps.instructor_task.models import PROGRESS, QUEUING, ReportStore
from lms.djangoapps.instructor_task.tests.factories import InstructorTaskFactory
from lms.djangoapps.instructor_task.views import instructor_task_status
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from openedx.core.lib.url_utils import quote_slashes
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
TEST_COURSE_ORG = 'edx'
TEST_COURSE_NAME = 'test_course'
TEST_COURSE_NUMBER = '1.23x'
TEST_COURSE_KEY = SlashSeparatedCourseKey(TEST_COURSE_ORG, TEST_COURSE_NUMBER, TEST_COURSE_NAME)
TEST_CHAPTER_NAME = "Section"
TEST_SECTION_NAME = "Subsection"
TEST_FAILURE_MESSAGE = 'task failed horribly'
TEST_FAILURE_EXCEPTION = 'RandomCauseError'
OPTION_1 = 'Option 1'
OPTION_2 = 'Option 2'
class InstructorTaskTestCase(CacheIsolationTestCase):
"""
Tests API and view methods that involve the reporting of status for background tasks.
"""
def setUp(self):
super(InstructorTaskTestCase, self).setUp()
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
self.problem_url = InstructorTaskTestCase.problem_location("test_urlname")
@staticmethod
def problem_location(problem_url_name):
"""
Create an internal location for a test problem.
"""
return TEST_COURSE_KEY.make_usage_key('problem', problem_url_name)
def _create_entry(self, task_state=QUEUING, task_output=None, student=None):
"""Creates a InstructorTask entry for testing."""
task_id = str(uuid4())
progress_json = json.dumps(task_output) if task_output is not None else None
task_input, task_key = encode_problem_and_student_input(self.problem_url, student)
instructor_task = InstructorTaskFactory.create(course_id=TEST_COURSE_KEY,
requester=self.instructor,
task_input=json.dumps(task_input),
task_key=task_key,
task_id=task_id,
task_state=task_state,
task_output=progress_json)
return instructor_task
def _create_failure_entry(self):
"""Creates a InstructorTask entry representing a failed task."""
# view task entry for task failure
progress = {'message': TEST_FAILURE_MESSAGE,
'exception': TEST_FAILURE_EXCEPTION,
}
return self._create_entry(task_state=FAILURE, task_output=progress)
def _create_success_entry(self, student=None):
"""Creates a InstructorTask entry representing a successful task."""
return self._create_progress_entry(student, task_state=SUCCESS)
def _create_progress_entry(self, student=None, task_state=PROGRESS):
"""Creates a InstructorTask entry representing a task in progress."""
progress = {'attempted': 3,
'succeeded': 2,
'total': 5,
'action_name': 'rescored',
}
return self._create_entry(task_state=task_state, task_output=progress, student=student)
class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Base test class for InstructorTask-related tests that require
the setup of a course.
"""
course = None
current_user = None
def initialize_course(self, course_factory_kwargs=None):
"""
Create a course in the store, with a chapter and section.
Arguments:
course_factory_kwargs (dict): kwargs dict to pass to
CourseFactory.create()
"""
self.module_store = modulestore()
# Create the course
course_args = {
"org": TEST_COURSE_ORG,
"number": TEST_COURSE_NUMBER,
"display_name": TEST_COURSE_NAME
}
if course_factory_kwargs is not None:
course_args.update(course_factory_kwargs)
self.course = CourseFactory.create(**course_args)
self.add_course_content()
def add_course_content(self):
"""
Add a chapter and a sequential to the current course.
"""
# Add a chapter to the course
self.chapter = ItemFactory.create(
parent_location=self.course.location,
display_name=TEST_CHAPTER_NAME,
)
# add a sequence to the course to which the problems can be added
self.problem_section = ItemFactory.create(
parent_location=self.chapter.location,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name=TEST_SECTION_NAME,
)
@staticmethod
def get_user_email(username):
"""Generate email address based on username"""
return u'{0}@test.com'.format(username)
def login_username(self, username):
"""Login the user, given the `username`."""
if self.current_user != username:
self.logout()
user_email = User.objects.get(username=username).email
self.login(user_email, "test")
self.current_user = username
def _create_user(self, username, email=None, is_staff=False, mode='honor', enrollment_active=True):
"""Creates a user and enrolls them in the test course."""
if email is None:
email = InstructorTaskCourseTestCase.get_user_email(username)
thisuser = UserFactory.create(username=username, email=email, is_staff=is_staff)
CourseEnrollmentFactory.create(user=thisuser, course_id=self.course.id, mode=mode, is_active=enrollment_active)
return thisuser
def create_instructor(self, username, email=None):
"""Creates an instructor for the test course."""
return self._create_user(username, email, is_staff=True)
def create_student(self, username, email=None, mode='honor', enrollment_active=True):
"""Creates a student for the test course."""
return self._create_user(username, email, is_staff=False, mode=mode, enrollment_active=enrollment_active)
@staticmethod
def get_task_status(task_id):
"""Use api method to fetch task status, using mock request."""
mock_request = Mock()
mock_request.GET = mock_request.POST = {'task_id': task_id}
response = instructor_task_status(mock_request)
status = json.loads(response.content)
return status
def create_task_request(self, requester_username):
"""Generate request that can be used for submitting tasks"""
request = Mock()
request.user = User.objects.get(username=requester_username)
request.get_host = Mock(return_value="testhost")
request.META = {'REMOTE_ADDR': '0:0:0:0', 'SERVER_NAME': 'testhost'}
request.is_secure = Mock(return_value=False)
return request
class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase):
"""
Base test class for InstructorTask-related tests that require
the setup of a course and problem in order to access StudentModule state.
"""
@staticmethod
def problem_location(problem_url_name, course_key=None):
"""
Create an internal location for a test problem.
"""
if "i4x:" in problem_url_name:
return Location.from_deprecated_string(problem_url_name)
elif course_key:
return course_key.make_usage_key('problem', problem_url_name)
else:
return TEST_COURSE_KEY.make_usage_key('problem', problem_url_name)
def _option_problem_factory_args(self, correct_answer=OPTION_1, num_inputs=1, num_responses=2):
"""
Returns the factory args for the option problem type.
"""
return {
'question_text': 'The correct answer is {0}'.format(correct_answer),
'options': [OPTION_1, OPTION_2],
'correct_option': correct_answer,
'num_responses': num_responses,
'num_inputs': num_inputs,
}
def define_option_problem(self, problem_url_name, parent=None, **kwargs):
"""Create the problem definition so the answer is Option 1"""
if parent is None:
parent = self.problem_section
factory = OptionResponseXMLFactory()
factory_args = self._option_problem_factory_args()
problem_xml = factory.build_xml(**factory_args)
ItemFactory.create(parent_location=parent.location,
parent=parent,
category="problem",
display_name=problem_url_name,
data=problem_xml,
**kwargs)
def redefine_option_problem(self, problem_url_name, correct_answer=OPTION_1, num_inputs=1, num_responses=2):
"""Change the problem definition so the answer is Option 2"""
factory = OptionResponseXMLFactory()
factory_args = self._option_problem_factory_args(correct_answer, num_inputs, num_responses)
problem_xml = factory.build_xml(**factory_args)
location = InstructorTaskTestCase.problem_location(problem_url_name)
item = self.module_store.get_item(location)
with self.module_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, location.course_key):
item.data = problem_xml
self.module_store.update_item(item, self.user.id)
self.module_store.publish(location, self.user.id)
def get_student_module(self, username, descriptor):
"""Get StudentModule object for test course, given the `username` and the problem's `descriptor`."""
return StudentModule.objects.get(course_id=self.course.id,
student=User.objects.get(username=username),
module_type=descriptor.location.category,
module_state_key=descriptor.location,
)
def submit_student_answer(self, username, problem_url_name, responses):
"""
Use ajax interface to submit a student answer.
Assumes the input list of responses has two values.
"""
def get_input_id(response_id):
"""Creates input id using information about the test course and the current problem."""
# Note that this is a capa-specific convention. The form is a version of the problem's
# URL, modified so that it can be easily stored in html, prepended with "input-" and
# appended with a sequence identifier for the particular response the input goes to.
course_key = self.course.id
return u'input_i4x-{0}-{1}-problem-{2}_{3}'.format(
course_key.org.replace(u'.', u'_'),
course_key.course.replace(u'.', u'_'),
problem_url_name,
response_id
)
# make sure that the requested user is logged in, so that the ajax call works
# on the right problem:
self.login_username(username)
# make ajax call:
modx_url = reverse('xblock_handler', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(
InstructorTaskModuleTestCase.problem_location(problem_url_name, self.course.id).to_deprecated_string()
),
'handler': 'xmodule_handler',
'suffix': 'problem_check',
})
# assign correct identifier to each response.
resp = self.client.post(modx_url, {
get_input_id(u'{}_1').format(index): response for index, response in enumerate(responses, 2)
})
return resp
class TestReportMixin(object):
"""
Cleans up after tests that place files in the reports directory.
"""
def setUp(self):
def clean_up_tmpdir():
"""Remove temporary directory created for instructor task models."""
if os.path.exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
super(TestReportMixin, self).setUp()
# Ensure that working with the temp directories in tests is thread safe
# by creating a unique temporary directory for each testcase.
self.tmp_dir = mkdtemp()
mock_grades_download = {'STORAGE_TYPE': 'localfs', 'BUCKET': 'test-grades', 'ROOT_PATH': self.tmp_dir}
self.grades_patch = patch.dict('django.conf.settings.GRADES_DOWNLOAD', mock_grades_download)
self.grades_patch.start()
self.addCleanup(self.grades_patch.stop)
mock_fin_report = {'STORAGE_TYPE': 'localfs', 'BUCKET': 'test-financial-reports', 'ROOT_PATH': self.tmp_dir}
self.reports_patch = patch.dict('django.conf.settings.FINANCIAL_REPORTS', mock_fin_report)
self.reports_patch.start()
self.addCleanup(self.reports_patch.stop)
self.addCleanup(clean_up_tmpdir)
def verify_rows_in_csv(self, expected_rows, file_index=0, verify_order=True, ignore_other_columns=False):
"""
Verify that the last ReportStore CSV contains the expected content.
Arguments:
expected_rows (iterable): An iterable of dictionaries,
where each dict represents a row of data in the last
ReportStore CSV. Each dict maps keys from the CSV
header to values in that row's corresponding cell.
file_index (int): Describes which report store file to
open. Files are ordered by last modified date, and 0
corresponds to the most recently modified file.
verify_order (boolean): When True (default), we verify that
both the content and order of `expected_rows` matches
the actual csv rows. When False, we only verify that
the content matches.
ignore_other_columns (boolean): When True, we verify that `expected_rows`
contain data which is the subset of actual csv rows.
"""
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(self.course.id)[file_index][0]
report_path = report_store.path_to(self.course.id, report_csv_filename)
with report_store.storage.open(report_path) as csv_file:
# Expand the dict reader generator so we don't lose it's content
csv_rows = [row for row in unicodecsv.DictReader(csv_file)]
if ignore_other_columns:
csv_rows = [
{key: row.get(key) for key in expected_rows[index].keys()} for index, row in enumerate(csv_rows)
]
if verify_order:
self.assertEqual(csv_rows, expected_rows)
else:
self.assertItemsEqual(csv_rows, expected_rows)
def get_csv_row_with_headers(self):
"""
Helper function to return list with the column names from the CSV file (the first row)
"""
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(self.course.id)[0][0]
report_path = report_store.path_to(self.course.id, report_csv_filename)
with report_store.storage.open(report_path) as csv_file:
rows = unicodecsv.reader(csv_file, encoding='utf-8')
return rows.next()
| agpl-3.0 |
kmoocdev2/edx-platform | cms/djangoapps/contentstore/management/commands/tests/test_fix_not_found.py | 87 | 2147 | """
Tests for the fix_not_found management command
"""
from django.core.management import CommandError, call_command
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class TestFixNotFound(ModuleStoreTestCase):
"""
Tests for the fix_not_found management command
"""
def test_no_args(self):
"""
Test fix_not_found command with no arguments
"""
with self.assertRaisesRegexp(CommandError, "Error: too few arguments"):
call_command('fix_not_found')
def test_fix_not_found_non_split(self):
"""
The management command doesn't work on non split courses
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
with self.assertRaisesRegexp(CommandError, "The owning modulestore does not support this command."):
call_command("fix_not_found", unicode(course.id))
def test_fix_not_found(self):
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
ItemFactory.create(category='chapter', parent_location=course.location)
# get course again in order to update its children list
course = self.store.get_course(course.id)
# create a dangling usage key that we'll add to the course's children list
dangling_pointer = course.id.make_usage_key('chapter', 'DanglingPointer')
course.children.append(dangling_pointer)
self.store.update_item(course, self.user.id)
# the course block should now point to two children, one of which
# doesn't actually exist
self.assertEqual(len(course.children), 2)
self.assertIn(dangling_pointer, course.children)
call_command("fix_not_found", unicode(course.id))
# make sure the dangling pointer was removed from
# the course block's children
course = self.store.get_course(course.id)
self.assertEqual(len(course.children), 1)
self.assertNotIn(dangling_pointer, course.children)
| agpl-3.0 |
mvaled/OpenUpgrade | addons/gamification/models/badge.py | 287 | 13760 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.translate import _
from datetime import date
import logging
_logger = logging.getLogger(__name__)
class gamification_badge_user(osv.Model):
"""User having received a badge"""
_name = 'gamification.badge.user'
_description = 'Gamification user badge'
_order = "create_date desc"
_rec_name = "badge_name"
_columns = {
'user_id': fields.many2one('res.users', string="User", required=True, ondelete="cascade"),
'sender_id': fields.many2one('res.users', string="Sender", help="The user who has send the badge"),
'badge_id': fields.many2one('gamification.badge', string='Badge', required=True, ondelete="cascade"),
'challenge_id': fields.many2one('gamification.challenge', string='Challenge originating', help="If this badge was rewarded through a challenge"),
'comment': fields.text('Comment'),
'badge_name': fields.related('badge_id', 'name', type="char", string="Badge Name"),
'create_date': fields.datetime('Created', readonly=True),
'create_uid': fields.many2one('res.users', string='Creator', readonly=True),
}
def _send_badge(self, cr, uid, ids, context=None):
"""Send a notification to a user for receiving a badge
Does not verify constrains on badge granting.
The users are added to the owner_ids (create badge_user if needed)
The stats counters are incremented
:param ids: list(int) of badge users that will receive the badge
"""
res = True
temp_obj = self.pool.get('email.template')
user_obj = self.pool.get('res.users')
template_id = self.pool['ir.model.data'].get_object(cr, uid, 'gamification', 'email_template_badge_received', context)
for badge_user in self.browse(cr, uid, ids, context=context):
body_html = temp_obj.render_template(cr, uid, template_id.body_html, 'gamification.badge.user', badge_user.id, context=context)
res = user_obj.message_post(
cr, uid, badge_user.user_id.id,
body=body_html,
subtype='gamification.mt_badge_granted',
partner_ids=[badge_user.user_id.partner_id.id],
context=context)
return res
def create(self, cr, uid, vals, context=None):
self.pool.get('gamification.badge').check_granting(cr, uid, badge_id=vals.get('badge_id'), context=context)
return super(gamification_badge_user, self).create(cr, uid, vals, context=context)
class gamification_badge(osv.Model):
"""Badge object that users can send and receive"""
CAN_GRANT = 1
NOBODY_CAN_GRANT = 2
USER_NOT_VIP = 3
BADGE_REQUIRED = 4
TOO_MANY = 5
_name = 'gamification.badge'
_description = 'Gamification badge'
_inherit = ['mail.thread']
def _get_owners_info(self, cr, uid, ids, name, args, context=None):
"""Return:
the list of unique res.users ids having received this badge
the total number of time this badge was granted
the total number of users this badge was granted to
"""
result = dict((res_id, {'stat_count': 0, 'stat_count_distinct': 0, 'unique_owner_ids': []}) for res_id in ids)
cr.execute("""
SELECT badge_id, count(user_id) as stat_count,
count(distinct(user_id)) as stat_count_distinct,
array_agg(distinct(user_id)) as unique_owner_ids
FROM gamification_badge_user
WHERE badge_id in %s
GROUP BY badge_id
""", (tuple(ids),))
for (badge_id, stat_count, stat_count_distinct, unique_owner_ids) in cr.fetchall():
result[badge_id] = {
'stat_count': stat_count,
'stat_count_distinct': stat_count_distinct,
'unique_owner_ids': unique_owner_ids,
}
return result
def _get_badge_user_stats(self, cr, uid, ids, name, args, context=None):
"""Return stats related to badge users"""
result = dict.fromkeys(ids, False)
badge_user_obj = self.pool.get('gamification.badge.user')
first_month_day = date.today().replace(day=1).strftime(DF)
for bid in ids:
result[bid] = {
'stat_my': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid)], context=context, count=True),
'stat_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_date', '>=', first_month_day)], context=context, count=True),
'stat_my_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True),
'stat_my_monthly_sending': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_uid', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True)
}
return result
def _remaining_sending_calc(self, cr, uid, ids, name, args, context=None):
"""Computes the number of badges remaining the user can send
0 if not allowed or no remaining
integer if limited sending
-1 if infinite (should not be displayed)
"""
result = dict.fromkeys(ids, False)
for badge in self.browse(cr, uid, ids, context=context):
if self._can_grant_badge(cr, uid, badge.id, context) != 1:
# if the user cannot grant this badge at all, result is 0
result[badge.id] = 0
elif not badge.rule_max:
# if there is no limitation, -1 is returned which means 'infinite'
result[badge.id] = -1
else:
result[badge.id] = badge.rule_max_number - badge.stat_my_monthly_sending
return result
_columns = {
'name': fields.char('Badge', required=True, translate=True),
'description': fields.text('Description'),
'image': fields.binary("Image", help="This field holds the image used for the badge, limited to 256x256"),
'rule_auth': fields.selection([
('everyone', 'Everyone'),
('users', 'A selected list of users'),
('having', 'People having some badges'),
('nobody', 'No one, assigned through challenges'),
],
string="Allowance to Grant",
help="Who can grant this badge",
required=True),
'rule_auth_user_ids': fields.many2many('res.users', 'rel_badge_auth_users',
string='Authorized Users',
help="Only these people can give this badge"),
'rule_auth_badge_ids': fields.many2many('gamification.badge',
'gamification_badge_rule_badge_rel', 'badge1_id', 'badge2_id',
string='Required Badges',
help="Only the people having these badges can give this badge"),
'rule_max': fields.boolean('Monthly Limited Sending',
help="Check to set a monthly limit per person of sending this badge"),
'rule_max_number': fields.integer('Limitation Number',
help="The maximum number of time this badge can be sent per month per person."),
'stat_my_monthly_sending': fields.function(_get_badge_user_stats,
type="integer",
string='My Monthly Sending Total',
multi='badge_users',
help="The number of time the current user has sent this badge this month."),
'remaining_sending': fields.function(_remaining_sending_calc, type='integer',
string='Remaining Sending Allowed', help="If a maxium is set"),
'challenge_ids': fields.one2many('gamification.challenge', 'reward_id',
string="Reward of Challenges"),
'goal_definition_ids': fields.many2many('gamification.goal.definition', 'badge_unlocked_definition_rel',
string='Rewarded by',
help="The users that have succeeded theses goals will receive automatically the badge."),
'owner_ids': fields.one2many('gamification.badge.user', 'badge_id',
string='Owners', help='The list of instances of this badge granted to users'),
'active': fields.boolean('Active'),
'unique_owner_ids': fields.function(_get_owners_info,
string='Unique Owners',
help="The list of unique users having received this badge.",
multi='unique_users',
type="many2many", relation="res.users"),
'stat_count': fields.function(_get_owners_info, string='Total',
type="integer",
multi='unique_users',
help="The number of time this badge has been received."),
'stat_count_distinct': fields.function(_get_owners_info,
type="integer",
string='Number of users',
multi='unique_users',
help="The number of time this badge has been received by unique users."),
'stat_this_month': fields.function(_get_badge_user_stats,
type="integer",
string='Monthly total',
multi='badge_users',
help="The number of time this badge has been received this month."),
'stat_my': fields.function(_get_badge_user_stats, string='My Total',
type="integer",
multi='badge_users',
help="The number of time the current user has received this badge."),
'stat_my_this_month': fields.function(_get_badge_user_stats,
type="integer",
string='My Monthly Total',
multi='badge_users',
help="The number of time the current user has received this badge this month."),
}
_defaults = {
'rule_auth': 'everyone',
'active': True,
}
def check_granting(self, cr, uid, badge_id, context=None):
"""Check the user 'uid' can grant the badge 'badge_id' and raise the appropriate exception
if not
Do not check for SUPERUSER_ID
"""
status_code = self._can_grant_badge(cr, uid, badge_id, context=context)
if status_code == self.CAN_GRANT:
return True
elif status_code == self.NOBODY_CAN_GRANT:
raise osv.except_osv(_('Warning!'), _('This badge can not be sent by users.'))
elif status_code == self.USER_NOT_VIP:
raise osv.except_osv(_('Warning!'), _('You are not in the user allowed list.'))
elif status_code == self.BADGE_REQUIRED:
raise osv.except_osv(_('Warning!'), _('You do not have the required badges.'))
elif status_code == self.TOO_MANY:
raise osv.except_osv(_('Warning!'), _('You have already sent this badge too many time this month.'))
else:
_logger.exception("Unknown badge status code: %d" % int(status_code))
return False
def _can_grant_badge(self, cr, uid, badge_id, context=None):
"""Check if a user can grant a badge to another user
:param uid: the id of the res.users trying to send the badge
:param badge_id: the granted badge id
:return: integer representing the permission.
"""
if uid == SUPERUSER_ID:
return self.CAN_GRANT
badge = self.browse(cr, uid, badge_id, context=context)
if badge.rule_auth == 'nobody':
return self.NOBODY_CAN_GRANT
elif badge.rule_auth == 'users' and uid not in [user.id for user in badge.rule_auth_user_ids]:
return self.USER_NOT_VIP
elif badge.rule_auth == 'having':
all_user_badges = self.pool.get('gamification.badge.user').search(cr, uid, [('user_id', '=', uid)], context=context)
for required_badge in badge.rule_auth_badge_ids:
if required_badge.id not in all_user_badges:
return self.BADGE_REQUIRED
if badge.rule_max and badge.stat_my_monthly_sending >= badge.rule_max_number:
return self.TOO_MANY
# badge.rule_auth == 'everyone' -> no check
return self.CAN_GRANT
def check_progress(self, cr, uid, context=None):
try:
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'badge_hidden')
except ValueError:
return True
badge_user_obj = self.pool.get('gamification.badge.user')
if not badge_user_obj.search(cr, uid, [('user_id', '=', uid), ('badge_id', '=', res_id)], context=context):
values = {
'user_id': uid,
'badge_id': res_id,
}
badge_user_obj.create(cr, SUPERUSER_ID, values, context=context)
return True
| agpl-3.0 |
hstefan/grpc | test/http2_test/http2_base_server.py | 13 | 9323 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import messages_pb2
import struct
import h2
import h2.connection
import twisted
import twisted.internet
import twisted.internet.protocol
_READ_CHUNK_SIZE = 16384
_GRPC_HEADER_SIZE = 5
_MIN_SETTINGS_MAX_FRAME_SIZE = 16384
class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
def __init__(self):
self._conn = h2.connection.H2Connection(client_side=False)
self._recv_buffer = {}
self._handlers = {}
self._handlers['ConnectionMade'] = self.on_connection_made_default
self._handlers['DataReceived'] = self.on_data_received_default
self._handlers['WindowUpdated'] = self.on_window_update_default
self._handlers['RequestReceived'] = self.on_request_received_default
self._handlers['SendDone'] = self.on_send_done_default
self._handlers['ConnectionLost'] = self.on_connection_lost
self._handlers['PingAcknowledged'] = self.on_ping_acknowledged_default
self._stream_status = {}
self._send_remaining = {}
self._outstanding_pings = 0
def set_handlers(self, handlers):
self._handlers = handlers
def connectionMade(self):
self._handlers['ConnectionMade']()
def connectionLost(self, reason):
self._handlers['ConnectionLost'](reason)
def on_connection_made_default(self):
logging.info('Connection Made')
self._conn.initiate_connection()
self.transport.setTcpNoDelay(True)
self.transport.write(self._conn.data_to_send())
def on_connection_lost(self, reason):
logging.info('Disconnected %s' % reason)
def dataReceived(self, data):
try:
events = self._conn.receive_data(data)
except h2.exceptions.ProtocolError:
# this try/except block catches exceptions due to race between sending
# GOAWAY and processing a response in flight.
return
if self._conn.data_to_send:
self.transport.write(self._conn.data_to_send())
for event in events:
if isinstance(event, h2.events.RequestReceived) and self._handlers.has_key('RequestReceived'):
logging.info('RequestReceived Event for stream: %d' % event.stream_id)
self._handlers['RequestReceived'](event)
elif isinstance(event, h2.events.DataReceived) and self._handlers.has_key('DataReceived'):
logging.info('DataReceived Event for stream: %d' % event.stream_id)
self._handlers['DataReceived'](event)
elif isinstance(event, h2.events.WindowUpdated) and self._handlers.has_key('WindowUpdated'):
logging.info('WindowUpdated Event for stream: %d' % event.stream_id)
self._handlers['WindowUpdated'](event)
elif isinstance(event, h2.events.PingAcknowledged) and self._handlers.has_key('PingAcknowledged'):
logging.info('PingAcknowledged Event')
self._handlers['PingAcknowledged'](event)
self.transport.write(self._conn.data_to_send())
def on_ping_acknowledged_default(self, event):
logging.info('ping acknowledged')
self._outstanding_pings -= 1
def on_data_received_default(self, event):
self._conn.acknowledge_received_data(len(event.data), event.stream_id)
self._recv_buffer[event.stream_id] += event.data
def on_request_received_default(self, event):
self._recv_buffer[event.stream_id] = ''
self._stream_id = event.stream_id
self._stream_status[event.stream_id] = True
self._conn.send_headers(
stream_id=event.stream_id,
headers=[
(':status', '200'),
('content-type', 'application/grpc'),
('grpc-encoding', 'identity'),
('grpc-accept-encoding', 'identity,deflate,gzip'),
],
)
self.transport.write(self._conn.data_to_send())
def on_window_update_default(self, _, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
# try to resume sending on all active streams (update might be for connection)
for stream_id in self._send_remaining:
self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def send_reset_stream(self):
self._conn.reset_stream(self._stream_id)
self.transport.write(self._conn.data_to_send())
def setup_send(self, data_to_send, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
logging.info('Setting up data to send for stream_id: %d' % stream_id)
self._send_remaining[stream_id] = len(data_to_send)
self._send_offset = 0
self._data_to_send = data_to_send
self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def default_send(self, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
if not self._send_remaining.has_key(stream_id):
# not setup to send data yet
return
while self._send_remaining[stream_id] > 0:
lfcw = self._conn.local_flow_control_window(stream_id)
padding_bytes = pad_length + 1 if pad_length is not None else 0
if lfcw - padding_bytes <= 0:
logging.info('Stream %d. lfcw: %d. padding bytes: %d. not enough quota yet' % (stream_id, lfcw, padding_bytes))
break
chunk_size = min(lfcw - padding_bytes, read_chunk_size)
bytes_to_send = min(chunk_size, self._send_remaining[stream_id])
logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d. includes %d total padding bytes' %
(lfcw, self._send_offset, self._send_offset + bytes_to_send + padding_bytes,
stream_id, padding_bytes))
# The receiver might allow sending frames larger than the http2 minimum
# max frame size (16384), but this test should never send more than 16384
# for simplicity (which is always legal).
if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE:
raise ValueError("overload: sending %d" % (bytes_to_send + padding_bytes))
data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send]
try:
self._conn.send_data(stream_id, data, end_stream=False, pad_length=pad_length)
except h2.exceptions.ProtocolError:
logging.info('Stream %d is closed' % stream_id)
break
self._send_remaining[stream_id] -= bytes_to_send
self._send_offset += bytes_to_send
if self._send_remaining[stream_id] == 0:
self._handlers['SendDone'](stream_id)
def default_ping(self):
logging.info('sending ping')
self._outstanding_pings += 1
self._conn.ping(b'\x00'*8)
self.transport.write(self._conn.data_to_send())
def on_send_done_default(self, stream_id):
if self._stream_status[stream_id]:
self._stream_status[stream_id] = False
self.default_send_trailer(stream_id)
else:
logging.error('Stream %d is already closed' % stream_id)
def default_send_trailer(self, stream_id):
logging.info('Sending trailer for stream id %d' % stream_id)
self._conn.send_headers(stream_id,
headers=[ ('grpc-status', '0') ],
end_stream=True
)
self.transport.write(self._conn.data_to_send())
@staticmethod
def default_response_data(response_size):
sresp = messages_pb2.SimpleResponse()
sresp.payload.body = b'\x00'*response_size
serialized_resp_proto = sresp.SerializeToString()
response_data = b'\x00' + struct.pack('i', len(serialized_resp_proto))[::-1] + serialized_resp_proto
return response_data
def parse_received_data(self, stream_id):
""" returns a grpc framed string of bytes containing response proto of the size
asked in request """
recv_buffer = self._recv_buffer[stream_id]
grpc_msg_size = struct.unpack('i',recv_buffer[1:5][::-1])[0]
if len(recv_buffer) != _GRPC_HEADER_SIZE + grpc_msg_size:
return None
req_proto_str = recv_buffer[5:5+grpc_msg_size]
sr = messages_pb2.SimpleRequest()
sr.ParseFromString(req_proto_str)
logging.info('Parsed simple request for stream %d' % stream_id)
return sr
| bsd-3-clause |
chemiron/aiopool | aiopool/fork.py | 1 | 6082 | import asyncio
import logging
import os
import signal
from struct import Struct
import time
from .base import (WorkerProcess, ChildProcess,
IDLE_CHECK, IDLE_TIME)
MSG_HEAD = 0x0
MSG_PING = 0x1
MSG_PONG = 0x2
MSG_CLOSE = 0x3
PACK_MSG = Struct('!BB').pack
UNPACK_MSG = Struct('!BB').unpack
logger = logging.getLogger(__name__)
class ConnectionClosedError(Exception):
pass
@asyncio.coroutine
def connect_write_pipe(file):
loop = asyncio.get_event_loop()
transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol, file)
return PipeWriter(transport)
@asyncio.coroutine
def connect_read_pipe(file):
loop = asyncio.get_event_loop()
pipe_reader = PipeReader(loop=loop)
transport, _ = yield from loop.connect_read_pipe(
lambda: PipeReadProtocol(pipe_reader), file)
pipe_reader.transport = transport
return pipe_reader
class PipeWriter:
def __init__(self, transport):
self.transport = transport
def _send(self, msg):
self.transport.write(PACK_MSG(MSG_HEAD, msg))
def ping(self):
self._send(MSG_PING)
def pong(self):
self._send(MSG_PONG)
def stop(self):
self._send(MSG_CLOSE)
def close(self):
if self.transport is not None:
self.transport.close()
class PipeReadProtocol(asyncio.Protocol):
def __init__(self, reader):
self.reader = reader
def data_received(self, data):
self.reader.feed(data)
def connection_lost(self, exc):
self.reader.close()
class PipeReader:
closed = False
transport = None
def __init__(self, loop):
self.loop = loop
self._waiters = asyncio.Queue()
def close(self):
self.closed = True
while not self._waiters.empty():
waiter = self._waiters.get_nowait()
if not waiter.done():
waiter.set_exception(ConnectionClosedError())
if self.transport is not None:
self.transport.close()
def feed(self, data):
asyncio.async(self._feed_waiter(data))
@asyncio.coroutine
def _feed_waiter(self, data):
waiter = yield from self._waiters.get()
waiter.set_result(data)
@asyncio.coroutine
def read(self):
if self.closed:
raise ConnectionClosedError()
waiter = asyncio.Future(loop=self.loop)
yield from self._waiters.put(waiter)
data = yield from waiter
hdr, msg = UNPACK_MSG(data)
if hdr == MSG_HEAD:
return msg
class ForkChild(ChildProcess):
_heartbeat_task = None
def __init__(self, parent_read, parent_write, loader, **options):
ChildProcess.__init__(self, loader, **options)
self.parent_read = parent_read
self.parent_write = parent_write
@asyncio.coroutine
def on_start(self):
self._heartbeat_task = asyncio.Task(self.heartbeat())
def stop(self):
if self._heartbeat_task is not None:
self._heartbeat_task.cancel()
ChildProcess.stop(self)
@asyncio.coroutine
def heartbeat(self):
# setup pipes
reader = yield from connect_read_pipe(
os.fdopen(self.parent_read, 'rb'))
writer = yield from connect_write_pipe(
os.fdopen(self.parent_write, 'wb'))
while True:
try:
msg = yield from reader.read()
except ConnectionClosedError:
logger.info('Parent is dead, {} stopping...'
''.format(os.getpid()))
break
if msg == MSG_PING:
writer.pong()
elif msg.tp == MSG_CLOSE:
break
reader.close()
writer.close()
self.stop()
class ForkWorker(WorkerProcess):
pid = ping = None
reader = writer = None
chat_task = heartbeat_task = None
def start_child(self):
parent_read, child_write = os.pipe()
child_read, parent_write = os.pipe()
pid = os.fork()
if pid:
# parent
os.close(parent_read)
os.close(parent_write)
asyncio.async(self.connect(pid, child_write, child_read))
else:
# child
os.close(child_write)
os.close(child_read)
# cleanup after fork
asyncio.set_event_loop(None)
# setup process
process = ForkChild(parent_read, parent_write, self.loader)
process.start()
def kill_child(self):
self.chat_task.cancel()
self.heartbeat_task.cancel()
self.reader.close()
self.writer.close()
try:
os.kill(self.pid, signal.SIGTERM)
os.waitpid(self.pid, 0)
except ProcessLookupError:
pass
@asyncio.coroutine
def heartbeat(self, writer):
idle_time = self.options.get('idle_time', IDLE_TIME)
idle_check = self.options.get('idle_check', IDLE_CHECK)
while True:
yield from asyncio.sleep(idle_check)
if (time.monotonic() - self.ping) < idle_time:
writer.ping()
else:
self.restart()
return
@asyncio.coroutine
def chat(self, reader):
while True:
try:
msg = yield from reader.read()
except ConnectionClosedError:
self.restart()
return
if msg == MSG_PONG:
self.ping = time.monotonic()
@asyncio.coroutine
def connect(self, pid, up_write, down_read):
# setup pipes
reader = yield from connect_read_pipe(
os.fdopen(down_read, 'rb'))
writer = yield from connect_write_pipe(
os.fdopen(up_write, 'wb'))
# store info
self.pid = pid
self.ping = time.monotonic()
self.reader = reader
self.writer = writer
self.chat_task = asyncio.Task(self.chat(reader))
self.heartbeat_task = asyncio.Task(self.heartbeat(writer))
| mit |
Shaps/ansible | lib/ansible/module_utils/facts/virtual/hpux.py | 199 | 2486 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def get_virtual_facts(self):
virtual_facts = {}
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = self.module.run_command("/usr/sbin/vecheck")
if rc == 0:
virtual_facts['virtualization_type'] = 'guest'
virtual_facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
virtual_facts['virtualization_type'] = 'guest'
virtual_facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
virtual_facts['virtualization_type'] = 'guest'
virtual_facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
virtual_facts['virtualization_type'] = 'host'
virtual_facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = self.module.run_command("/usr/sbin/parstatus")
if rc == 0:
virtual_facts['virtualization_type'] = 'guest'
virtual_facts['virtualization_role'] = 'HP nPar'
return virtual_facts
class HPUXVirtualCollector(VirtualCollector):
_fact_class = HPUXVirtual
_platform = 'HP-UX'
| gpl-3.0 |
jsilter/scipy | scipy/weave/catalog.py | 97 | 34645 | """ Track relationships between compiled extension functions & code fragments
catalog keeps track of which compiled(or even standard) functions are
related to which code fragments. It also stores these relationships
to disk so they are remembered between Python sessions. When
a = 1
compiler.inline('printf("printed from C: %d",a);',['a'] )
is called, inline() first looks to see if it has seen the code
'printf("printed from C");' before. If not, it calls
catalog.get_functions('printf("printed from C: %d", a);')
which returns a list of all the function objects that have been compiled
for the code fragment. Multiple functions can occur because the code
could be compiled for different types for 'a' (although not likely in
this case). The catalog first looks in its cache and quickly returns
a list of the functions if possible. If the cache lookup fails, it then
looks through possibly multiple catalog files on disk and fills its
cache with all the functions that match the code fragment.
In case where the code fragment hasn't been compiled, inline() compiles
the code and then adds it to the catalog:
function = <code to compile function>
catalog.add_function('printf("printed from C: %d", a);',function)
add_function() adds function to the front of the cache. function,
along with the path information to its module, are also stored in a
persistent catalog for future use by python sessions.
"""
from __future__ import absolute_import, print_function
import os
import sys
import stat
import pickle
import socket
import tempfile
import warnings
try:
# importing dbhash is necessary because this regularly fails on Python 2.x
# installs (due to known bsddb issues). While importing shelve doesn't
# fail, it won't work correctly if dbhash import fails. So in that case we
# want to use _dumb_shelve
import dbhash
import shelve
dumb = 0
except ImportError:
from . import _dumb_shelve as shelve
dumb = 1
def getmodule(object):
""" Discover the name of the module where object was defined.
This is an augmented version of inspect.getmodule that can discover
the parent module for extension functions.
"""
import inspect
value = inspect.getmodule(object)
if value is None:
# walk trough all modules looking for function
for name,mod in sys.modules.items():
# try except used because of some comparison failures
# in wxPoint code. Need to review this
try:
if mod and object in mod.__dict__.values():
value = mod
# if it is a built-in module, keep looking to see
# if a non-builtin also has it. Otherwise quit and
# consider the module found. (ain't perfect, but will
# have to do for now).
if str(mod) not in '(built-in)':
break
except (TypeError, KeyError, ImportError):
pass
return value
def expr_to_filename(expr):
""" Convert an arbitrary expr string to a valid file name.
The name is based on the SHA-256 check sum for the string and
Something that was a little more human readable would be
nice, but the computer doesn't seem to care.
"""
from hashlib import sha256
base = 'sc_'
# 32 chars is enough for unique filenames; too long names don't work for
# MSVC (see gh-3216). Don't use md5, gives a FIPS warning.
return base + sha256(expr).hexdigest()[:32]
def unique_file(d,expr):
""" Generate a unqiue file name based on expr in directory d
This is meant for use with building extension modules, so
a file name is considered unique if none of the following
extension '.cpp','.o','.so','module.so','.py', or '.pyd'
exists in directory d. The fully qualified path to the
new name is returned. You'll need to append your own
extension to it before creating files.
"""
files = os.listdir(d)
# base = 'scipy_compile'
base = expr_to_filename(expr)
for i in xrange(1000000):
fname = base + repr(i)
if not (fname+'.cpp' in files or
fname+'.o' in files or
fname+'.so' in files or
fname+'module.so' in files or
fname+'.py' in files or
fname+'.pyd' in files):
break
return os.path.join(d,fname)
def is_writable(dir):
"""Determine whether a given directory is writable in a portable manner.
Parameters
----------
dir : str
A string represeting a path to a directory on the filesystem.
Returns
-------
res : bool
True or False.
"""
if not os.path.isdir(dir):
return False
# Do NOT use a hardcoded name here due to the danger from race conditions
# on NFS when multiple processes are accessing the same base directory in
# parallel. We use both hostname and process id for the prefix in an
# attempt to ensure that there can really be no name collisions (tempfile
# appends 6 random chars to this prefix).
prefix = 'dummy_%s_%s_' % (socket.gethostname(),os.getpid())
try:
tmp = tempfile.TemporaryFile(prefix=prefix,dir=dir)
except OSError:
return False
# The underlying file is destroyed upon closing the file object (under
# *nix, it was unlinked at creation time)
tmp.close()
return True
def whoami():
"""return a string identifying the user."""
return os.environ.get("USER") or os.environ.get("USERNAME") or "unknown"
def _create_dirs(path):
""" create provided path, ignore errors """
try:
os.makedirs(path, mode=0o700)
except OSError:
pass
def default_dir_posix(tmp_dir=None):
"""
Create or find default catalog store for posix systems
purpose of 'tmp_dir' is to enable way how to test this function easily
"""
path_candidates = []
python_name = "python%d%d_compiled" % tuple(sys.version_info[:2])
if tmp_dir:
home_dir = tmp_dir
else:
home_dir = os.path.expanduser('~')
tmp_dir = tmp_dir or tempfile.gettempdir()
xdg_cache = (os.environ.get("XDG_CACHE_HOME", None) or
os.path.join(home_dir, '.cache'))
xdg_temp_dir = os.path.join(xdg_cache, 'scipy', python_name)
path_candidates.append(xdg_temp_dir)
home_temp_dir_name = '.' + python_name
home_temp_dir = os.path.join(home_dir, home_temp_dir_name)
path_candidates.append(home_temp_dir)
temp_dir_name = repr(os.getuid()) + '_' + python_name
temp_dir_path = os.path.join(tmp_dir, temp_dir_name)
path_candidates.append(temp_dir_path)
for path in path_candidates:
_create_dirs(path)
if check_dir(path):
return path
# since we got here, both dirs are not useful
tmp_dir_path = find_valid_temp_dir(temp_dir_name, tmp_dir)
if not tmp_dir_path:
tmp_dir_path = create_temp_dir(temp_dir_name, tmp_dir=tmp_dir)
return tmp_dir_path
def default_dir_win(tmp_dir=None):
"""
Create or find default catalog store for Windows systems
purpose of 'tmp_dir' is to enable way how to test this function easily
"""
def create_win_temp_dir(prefix, inner_dir=None, tmp_dir=None):
"""
create temp dir starting with 'prefix' in 'tmp_dir' or
'tempfile.gettempdir'; if 'inner_dir' is specified, it should be
created inside
"""
tmp_dir_path = find_valid_temp_dir(prefix, tmp_dir)
if tmp_dir_path:
if inner_dir:
tmp_dir_path = os.path.join(tmp_dir_path, inner_dir)
if not os.path.isdir(tmp_dir_path):
os.mkdir(tmp_dir_path, 0o700)
else:
tmp_dir_path = create_temp_dir(prefix, inner_dir, tmp_dir)
return tmp_dir_path
python_name = "python%d%d_compiled" % tuple(sys.version_info[:2])
tmp_dir = tmp_dir or tempfile.gettempdir()
temp_dir_name = "%s" % whoami()
temp_root_dir = os.path.join(tmp_dir, temp_dir_name)
temp_dir_path = os.path.join(temp_root_dir, python_name)
_create_dirs(temp_dir_path)
if check_dir(temp_dir_path) and check_dir(temp_root_dir):
return temp_dir_path
else:
if check_dir(temp_root_dir):
return create_win_temp_dir(python_name, tmp_dir=temp_root_dir)
else:
return create_win_temp_dir(temp_dir_name, python_name, tmp_dir)
def default_dir():
""" Return a default location to store compiled files and catalogs.
XX is the Python version number in all paths listed below
On windows, the default location is the temporary directory
returned by gettempdir()/pythonXX.
On Unix, ~/.pythonXX_compiled is the default location. If it doesn't
exist, it is created. The directory is marked rwx------.
If for some reason it isn't possible to build a default directory
in the user's home, /tmp/<uid>_pythonXX_compiled is used. If it
doesn't exist, it is created. The directory is marked rwx------
to try and keep people from being able to sneak a bad module
in on you. If the directory already exists in /tmp/ and is not
secure, new one is created.
"""
# Use a cached value for fast return if possible
if hasattr(default_dir, "cached_path") and \
check_dir(default_dir.cached_path):
return default_dir.cached_path
if sys.platform == 'win32':
path = default_dir_win()
else:
path = default_dir_posix()
# Cache the default dir path so that this function returns quickly after
# being called once (nothing in it should change after the first call)
default_dir.cached_path = path
return path
def check_dir(im_dir):
"""
Check if dir is safe; if it is, return True.
These checks make sense only on posix:
* directory has correct owner
* directory has correct permissions (0700)
* directory is not a symlink
"""
def check_is_dir():
return os.path.isdir(im_dir)
def check_permissions():
""" If on posix, permissions should be 0700. """
writable = is_writable(im_dir)
if sys.platform != 'win32':
try:
im_dir_stat = os.stat(im_dir)
except OSError:
return False
writable &= stat.S_IMODE(im_dir_stat.st_mode) == 0o0700
return writable
def check_ownership():
""" Intermediate dir owner should be same as owner of process. """
if sys.platform != 'win32':
try:
im_dir_stat = os.stat(im_dir)
except OSError:
return False
proc_uid = os.getuid()
return proc_uid == im_dir_stat.st_uid
return True
def check_is_symlink():
""" Check if intermediate dir is symlink. """
try:
return not os.path.islink(im_dir)
except OSError:
return False
checks = [check_is_dir, check_permissions,
check_ownership, check_is_symlink]
for check in checks:
if not check():
return False
return True
def create_temp_dir(prefix, inner_dir=None, tmp_dir=None):
"""
Create intermediate dirs <tmp>/<prefix+random suffix>/<inner_dir>/
argument 'tmp_dir' is used in unit tests
"""
if not tmp_dir:
tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
else:
tmp_dir_path = tempfile.mkdtemp(prefix=prefix, dir=tmp_dir)
if inner_dir:
tmp_dir_path = os.path.join(tmp_dir_path, inner_dir)
os.mkdir(tmp_dir_path, 0o700)
return tmp_dir_path
def intermediate_dir_prefix():
""" Prefix of root intermediate dir (<tmp>/<root_im_dir>). """
return "%s-%s-" % ("scipy", whoami())
def find_temp_dir(prefix, tmp_dir=None):
""" Find temp dirs in 'tmp_dir' starting with 'prefix'"""
matches = []
tmp_dir = tmp_dir or tempfile.gettempdir()
for tmp_file in os.listdir(tmp_dir):
if tmp_file.startswith(prefix):
matches.append(os.path.join(tmp_dir, tmp_file))
return matches
def find_valid_temp_dir(prefix, tmp_dir=None):
"""
Try to look for existing temp dirs.
If there is one suitable found, return it, otherwise return None.
"""
matches = find_temp_dir(prefix, tmp_dir)
for match in matches:
if check_dir(match):
# as soon as we find correct dir, we can stop searching
return match
def py_intermediate_dir():
"""
Name of intermediate dir for current python interpreter:
<temp dir>/<name>/pythonXY_intermediate/
"""
name = "python%d%d_intermediate" % tuple(sys.version_info[:2])
return name
def create_intermediate_dir(tmp_dir=None):
py_im_dir = py_intermediate_dir()
return create_temp_dir(intermediate_dir_prefix(), py_im_dir, tmp_dir)
def intermediate_dir(tmp_dir=None):
"""
Temporary directory for storing .cpp and .o files during builds.
First, try to find the dir and if it exists, verify it is safe.
Otherwise, create it.
"""
im_dir = find_valid_temp_dir(intermediate_dir_prefix(), tmp_dir)
py_im_dir = py_intermediate_dir()
if im_dir is None:
py_im_dir = py_intermediate_dir()
im_dir = create_intermediate_dir(tmp_dir)
else:
im_dir = os.path.join(im_dir, py_im_dir)
if not os.path.isdir(im_dir):
os.mkdir(im_dir, 0o700)
return im_dir
def default_temp_dir():
path = os.path.join(default_dir(),'temp')
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
if not is_writable(path):
warnings.warn('Default directory is not write accessible.\n'
'default: %s' % path)
return path
def os_dependent_catalog_name():
""" Generate catalog name dependent on OS and Python version being used.
This allows multiple platforms to have catalog files in the
same directory without stepping on each other. For now, it
bases the name of the value returned by sys.platform and the
version of python being run. If this isn't enough to descriminate
on some platforms, we can try to add other info. It has
occurred to me that if we get fancy enough to optimize for different
architectures, then chip type might be added to the catalog name also.
"""
version = '%d%d' % sys.version_info[:2]
return sys.platform+version+'compiled_catalog'
def catalog_path(module_path):
""" Return the full path name for the catalog file in the given directory.
module_path can either be a file name or a path name. If it is a
file name, the catalog file name in its parent directory is returned.
If it is a directory, the catalog file in that directory is returned.
If module_path doesn't exist, None is returned. Note though, that the
catalog file does *not* have to exist, only its parent. '~', shell
variables, and relative ('.' and '..') paths are all acceptable.
catalog file names are os dependent (based on sys.platform), so this
should support multiple platforms sharing the same disk space
(NFS mounts). See os_dependent_catalog_name() for more info.
"""
module_path = os.path.expanduser(module_path)
module_path = os.path.expandvars(module_path)
module_path = os.path.abspath(module_path)
if not os.path.exists(module_path):
catalog_file = None
elif not os.path.isdir(module_path):
module_path,dummy = os.path.split(module_path)
catalog_file = os.path.join(module_path,os_dependent_catalog_name())
else:
catalog_file = os.path.join(module_path,os_dependent_catalog_name())
return catalog_file
def get_catalog(module_path,mode='r'):
""" Return a function catalog (shelve object) from the path module_path
If module_path is a directory, the function catalog returned is
from that directory. If module_path is an actual module_name,
then the function catalog returned is from its parent directory.
mode uses the standard 'c' = create, 'n' = new, 'r' = read,
'w' = write file open modes available for anydbm databases.
Well... it should be. Stuck with dumbdbm for now and the modes
almost don't matter. We do some checking for 'r' mode, but that
is about it.
See catalog_path() for more information on module_path.
"""
if mode not in ['c','r','w','n']:
msg = " mode must be 'c', 'n', 'r', or 'w'. See anydbm for more info"
raise ValueError(msg)
catalog_file = catalog_path(module_path)
if (catalog_file is not None) \
and ((dumb and os.path.exists(catalog_file+'.dat'))
or os.path.exists(catalog_file)):
sh = shelve.open(catalog_file,mode)
else:
if mode == 'r':
sh = None
else:
sh = shelve.open(catalog_file,mode)
return sh
class catalog(object):
""" Stores information about compiled functions both in cache and on disk.
catalog stores (code, list_of_function) pairs so that all the functions
that have been compiled for code are available for calling (usually in
inline or blitz).
catalog keeps a dictionary of previously accessed code values cached
for quick access. It also handles the looking up of functions compiled
in previously called Python sessions on disk in function catalogs.
catalog searches the directories in the PYTHONCOMPILED environment
variable in order loading functions that correspond to the given code
fragment. A default directory is also searched for catalog functions.
On unix, the default directory is usually '~/.pythonxx_compiled' where
xx is the version of Python used. On windows, it is the directory
returned by temfile.gettempdir(). Functions closer to the front are of
the variable list are guaranteed to be closer to the front of the
function list so that they will be called first. See
get_cataloged_functions() for more info on how the search order is
traversed.
Catalog also handles storing information about compiled functions to
a catalog. When writing this information, the first writable catalog
file in PYTHONCOMPILED path is used. If a writable catalog is not
found, it is written to the catalog in the default directory. This
directory should always be writable.
"""
def __init__(self,user_path_list=None):
""" Create a catalog for storing/searching for compiled functions.
user_path_list contains directories that should be searched
first for function catalogs. They will come before the path
entries in the PYTHONCOMPILED environment varilable.
"""
if isinstance(user_path_list, str):
self.user_path_list = [user_path_list]
elif user_path_list:
self.user_path_list = user_path_list
else:
self.user_path_list = []
self.cache = {}
self.module_dir = None
self.paths_added = 0
# unconditionally append the default dir for auto-generated compiled
# extension modules, so that pickle.load()s don't fail.
sys.path.append(default_dir())
def set_module_directory(self,module_dir):
""" Set the path that will replace 'MODULE' in catalog searches.
You should call clear_module_directory() when your finished
working with it.
"""
self.module_dir = module_dir
def get_module_directory(self):
""" Return the path used to replace the 'MODULE' in searches.
"""
return self.module_dir
def clear_module_directory(self):
""" Reset 'MODULE' path to None so that it is ignored in searches.
"""
self.module_dir = None
def get_environ_path(self):
""" Return list of paths from 'PYTHONCOMPILED' environment variable.
On Unix the path in PYTHONCOMPILED is a ':' separated list of
directories. On Windows, a ';' separated list is used.
"""
paths = []
if 'PYTHONCOMPILED' in os.environ:
path_string = os.environ['PYTHONCOMPILED']
paths = path_string.split(os.path.pathsep)
return paths
def build_search_order(self):
""" Returns a list of paths that are searched for catalogs.
Values specified in the catalog constructor are searched first,
then values found in the PYTHONCOMPILED environment variable.
The directory returned by default_dir() is always returned at
the end of the list.
There is a 'magic' path name called 'MODULE' that is replaced
by the directory defined by set_module_directory(). If the
module directory hasn't been set, 'MODULE' is ignored.
"""
paths = self.user_path_list + self.get_environ_path()
search_order = []
for path in paths:
if path == 'MODULE':
if self.module_dir:
search_order.append(self.module_dir)
else:
search_order.append(path)
search_order.append(default_dir())
return search_order
def get_catalog_files(self):
""" Returns catalog file list in correct search order.
Some of the catalog files may not currently exists.
However, all will be valid locations for a catalog
to be created (if you have write permission).
"""
files = map(catalog_path,self.build_search_order())
files = filter(lambda x: x is not None,files)
return files
def get_existing_files(self):
""" Returns all existing catalog file list in correct search order.
"""
files = self.get_catalog_files()
# open every stinking file to check if it exists.
# This is because anydbm doesn't provide a consistent naming
# convention across platforms for its files
existing_files = []
for file in files:
cat = get_catalog(os.path.dirname(file),'r')
if cat is not None:
existing_files.append(file)
cat.close()
# This is the non-portable (and much faster) old code
# existing_files = filter(os.path.exists,files)
return existing_files
def get_writable_file(self,existing_only=0):
""" Return the name of the first writable catalog file.
Its parent directory must also be writable. This is so that
compiled modules can be written to the same directory.
"""
# note: both file and its parent directory must be writeable
if existing_only:
files = self.get_existing_files()
else:
files = self.get_catalog_files()
# filter for (file exists and is writable) OR directory is writable
def file_test(x):
from os import access, F_OK, W_OK
return (access(x,F_OK) and access(x,W_OK) or
access(os.path.dirname(x),W_OK))
writable = filter(file_test,files)
if writable:
file = writable[0]
else:
file = None
return file
def get_writable_dir(self):
""" Return the parent directory of first writable catalog file.
The returned directory has write access.
"""
return os.path.dirname(self.get_writable_file())
def unique_module_name(self,code,module_dir=None):
""" Return full path to unique file name that in writable location.
The directory for the file is the first writable directory in
the catalog search path. The unique file name is derived from
the code fragment. If, module_dir is specified, it is used
to replace 'MODULE' in the search path.
"""
if module_dir is not None:
self.set_module_directory(module_dir)
try:
d = self.get_writable_dir()
finally:
if module_dir is not None:
self.clear_module_directory()
return unique_file(d, code)
def path_key(self,code):
""" Return key for path information for functions associated with code.
"""
return '__path__' + code
def configure_path(self,cat,code):
""" Add the python path for the given code to the sys.path
unconfigure_path() should be called as soon as possible after
imports associated with code are finished so that sys.path
is restored to normal.
"""
try:
paths = cat[self.path_key(code)]
self.paths_added = len(paths)
sys.path = paths + sys.path
except:
self.paths_added = 0
def unconfigure_path(self):
""" Restores sys.path to normal after calls to configure_path()
Remove the previously added paths from sys.path
"""
sys.path = sys.path[self.paths_added:]
self.paths_added = 0
def get_cataloged_functions(self,code):
""" Load all functions associated with code from catalog search path.
Sometimes there can be trouble loading a function listed in a
catalog file because the actual module that holds the function
has been moved or deleted. When this happens, that catalog file
is "repaired", meaning the entire entry for this function is
removed from the file. This only affects the catalog file that
has problems -- not the others in the search path.
The "repair" behavior may not be needed, but I'll keep it for now.
"""
mode = 'r'
cat = None
function_list = []
for path in self.build_search_order():
cat = get_catalog(path,mode)
if cat is not None and code in cat:
# set up the python path so that modules for this
# function can be loaded.
self.configure_path(cat,code)
try:
function_list += cat[code]
except: # SystemError and ImportError so far seen
# problems loading a function from the catalog. Try to
# repair the cause.
cat.close()
self.repair_catalog(path,code)
self.unconfigure_path()
if cat is not None:
# ensure that the catalog is properly closed
cat.close()
return function_list
def repair_catalog(self,catalog_path,code):
""" Remove entry for code from catalog_path
Occasionally catalog entries could get corrupted. An example
would be when a module that had functions in the catalog was
deleted or moved on the disk. The best current repair method is
just to trash the entire catalog entry for this piece of code.
This may loose function entries that are valid, but thats life.
catalog_path must be writable for repair. If it isn't, the
function exists with a warning.
"""
writable_cat = None
if (catalog_path is not None) and (not os.path.exists(catalog_path)):
return
try:
writable_cat = get_catalog(catalog_path,'w')
except:
warnings.warn('Unable to repair catalog entry\n %s\n in\n %s' %
(code, catalog_path))
# shelve doesn't guarantee flushing, so it's safest to explicitly
# close the catalog
writable_cat.close()
return
if code in writable_cat:
del writable_cat[code]
# it is possible that the path key doesn't exist (if the function
# registered was a built-in function), so we have to check if the path
# exists before arbitrarily deleting it.
path_key = self.path_key(code)
if path_key in writable_cat:
del writable_cat[path_key]
writable_cat.close()
def get_functions_fast(self,code):
""" Return list of functions for code from the cache.
Return an empty list if the code entry is not found.
"""
return self.cache.get(code,[])
def get_functions(self,code,module_dir=None):
""" Return the list of functions associated with this code fragment.
The cache is first searched for the function. If an entry
in the cache is not found, then catalog files on disk are
searched for the entry. This is slooooow, but only happens
once per code object. All the functions found in catalog files
on a cache miss are loaded into the cache to speed up future calls.
The search order is as follows:
1. user specified path (from catalog initialization)
2. directories from the PYTHONCOMPILED environment variable
3. The temporary directory on your platform.
The path specified by module_dir will replace the 'MODULE'
place holder in the catalog search path. See build_search_order()
for more info on the search path.
"""
# Fast!! try cache first.
if code in self.cache:
return self.cache[code]
# 2. Slow!! read previously compiled functions from disk.
try:
self.set_module_directory(module_dir)
function_list = self.get_cataloged_functions(code)
# put function_list in cache to save future lookups.
if function_list:
self.cache[code] = function_list
# return function_list, empty or otherwise.
finally:
self.clear_module_directory()
return function_list
def add_function(self,code,function,module_dir=None):
""" Adds a function to the catalog.
The function is added to the cache as well as the first
writable file catalog found in the search path. If no
code entry exists in the cache, the on disk catalogs
are loaded into the cache and function is added to the
beginning of the function list.
The path specified by module_dir will replace the 'MODULE'
place holder in the catalog search path. See build_search_order()
for more info on the search path.
"""
# 1. put it in the cache.
if code in self.cache:
if function not in self.cache[code]:
self.cache[code].insert(0,function)
else:
# if it is in the cache, then it is also
# been persisted
return
else:
# Load functions and put this one up front
self.cache[code] = self.get_functions(code)
self.fast_cache(code,function)
# 2. Store the function entry to disk.
try:
self.set_module_directory(module_dir)
self.add_function_persistent(code,function)
finally:
self.clear_module_directory()
def add_function_persistent(self,code,function):
""" Store the code->function relationship to disk.
Two pieces of information are needed for loading functions
from disk -- the function pickle (which conveniently stores
the module name, etc.) and the path to its module's directory.
The latter is needed so that the function can be loaded no
matter what the user's Python path is.
"""
# add function to data in first writable catalog
mode = 'c' # create if doesn't exist, otherwise, use existing
cat_dir = self.get_writable_dir()
cat = get_catalog(cat_dir,mode)
if cat is None:
cat_dir = default_dir()
cat = get_catalog(cat_dir,mode)
if cat is None:
cat_dir = default_dir()
cat_file = catalog_path(cat_dir)
warnings.warn('problems with default catalog -- removing')
import glob
files = glob.glob(cat_file+'*')
for f in files:
os.remove(f)
cat = get_catalog(cat_dir,mode)
if cat is None:
raise ValueError('Failed to access a catalog for storing functions')
# Prabhu was getting some corrupt catalog errors. I'll put a try/except
# to protect against this, but should really try and track down the issue.
function_list = [function]
try:
function_list = function_list + cat.get(code,[])
except pickle.UnpicklingError:
pass
cat[code] = function_list
# now add needed path information for loading function
module = getmodule(function)
try:
# built in modules don't have the __file__ extension, so this
# will fail. Just pass in this case since path additions aren't
# needed for built-in modules.
mod_path,f = os.path.split(os.path.abspath(module.__file__))
pkey = self.path_key(code)
cat[pkey] = [mod_path] + cat.get(pkey,[])
except:
pass
cat.close()
def fast_cache(self,code,function):
""" Move function to the front of the cache entry for code
If future calls to the function have the same type signature,
this will speed up access significantly because the first
function call is correct.
Note: The cache added to the inline_tools module is significantly
faster than always calling get_functions, so this isn't
as necessary as it used to be. Still, it's probably worth
doing.
"""
try:
if self.cache[code][0] == function:
return
except: # KeyError, IndexError
pass
try:
self.cache[code].remove(function)
except ValueError:
pass
# put new function at the beginning of the list to search.
self.cache[code].insert(0,function)
| bsd-3-clause |
sivaprakashniet/push_pull | p2p/lib/python2.7/site-packages/django/conf/locale/az/formats.py | 1059 | 1267 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y г.'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j E Y г. G:i'
YEAR_MONTH_FORMAT = 'F Y г.'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y', # '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
keimlink/django-cms | cms/tests/test_cache.py | 24 | 14080 | # -*- coding: utf-8 -*-
from django.template import Template, RequestContext
from django.conf import settings
from sekizai.context import SekizaiContext
from cms.api import add_plugin, create_page
from cms.cache import _get_cache_version, invalidate_cms_page_cache
from cms.models import Page
from cms.plugin_pool import plugin_pool
from cms.plugin_rendering import render_placeholder
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.project.pluginapp.plugins.caching.cms_plugins import NoCachePlugin, SekizaiPlugin
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils import get_cms_setting
class CacheTestCase(CMSTestCase):
def tearDown(self):
from django.core.cache import cache
super(CacheTestCase, self).tearDown()
cache.clear()
def setUp(self):
from django.core.cache import cache
super(CacheTestCase, self).setUp()
cache.clear()
def test_cache_placeholder(self):
template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}")
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
rctx = RequestContext(request)
with self.assertNumQueries(5):
template.render(rctx)
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = False
rctx = RequestContext(request)
template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}")
with self.assertNumQueries(1):
template.render(rctx)
# toolbar
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}")
rctx = RequestContext(request)
with self.assertNumQueries(3):
template.render(rctx)
page1.publish('en')
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
middleware = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(CMS_PAGE_CACHE=False, MIDDLEWARE_CLASSES=middleware):
with self.assertNumQueries(FuzzyInt(13, 17)):
self.client.get('/en/')
with self.assertNumQueries(FuzzyInt(5, 9)):
self.client.get('/en/')
with self.settings(CMS_PAGE_CACHE=False, MIDDLEWARE_CLASSES=middleware, CMS_PLACEHOLDER_CACHE=False):
with self.assertNumQueries(FuzzyInt(7, 11)):
self.client.get('/en/')
def test_no_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(NoCachePlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}")
rctx = RequestContext(request)
with self.assertNumQueries(3):
template.render(rctx)
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}")
rctx = RequestContext(request)
with self.assertNumQueries(1):
template.render(rctx)
add_plugin(placeholder1, "NoCachePlugin", 'en')
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}")
rctx = RequestContext(request)
with self.assertNumQueries(4):
render = template.render(rctx)
with self.assertNumQueries(FuzzyInt(14, 18)):
response = self.client.get('/en/')
resp1 = response.content.decode('utf8').split("$$$")[1]
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
template = Template("{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}")
rctx = RequestContext(request)
with self.assertNumQueries(4):
render2 = template.render(rctx)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(8, 13)):
response = self.client.get('/en/')
resp2 = response.content.decode('utf8').split("$$$")[1]
self.assertNotEqual(render, render2)
self.assertNotEqual(resp1, resp2)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_cache_page(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
mw_classes = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(MIDDLEWARE_CLASSES=mw_classes):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1.get_path(), 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated())
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 22)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that the cache is invalidated on unpublishing the page
#
old_version = _get_cache_version()
page1.unpublish('en')
self.assertGreater(_get_cache_version(), old_version)
#
# Test that this means the page is actually not cached.
#
page1.publish('en')
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that the above behavior is different when CMS_PAGE_CACHE is
# set to False (disabled)
#
with self.settings(CMS_PAGE_CACHE=False):
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are still requires DB
# access.
#
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
def test_invalidate_restart(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
mw_classes = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(MIDDLEWARE_CLASSES=mw_classes):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1.get_path(), 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated())
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
old_plugins = plugin_pool.plugins
plugin_pool.clear()
plugin_pool.discover_plugins()
plugin_pool.plugins = old_plugins
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
def test_sekizai_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(SekizaiPlugin)
add_plugin(placeholder1, "SekizaiPlugin", 'en')
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
page1.publish('en')
response = self.client.get('/en/')
self.assertContains(response, 'alert(')
response = self.client.get('/en/')
self.assertContains(response, 'alert(')
def test_cache_invalidation(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
mw_classes = [mw for mw in settings.MIDDLEWARE_CLASSES if mw not in exclude]
with self.settings(MIDDLEWARE_CLASSES=mw_classes):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder = page1.placeholders.get(slot="body")
add_plugin(placeholder, "TextPlugin", 'en', body="First content")
page1.publish('en')
response = self.client.get('/en/')
self.assertContains(response, 'First content')
response = self.client.get('/en/')
self.assertContains(response, 'First content')
add_plugin(placeholder, "TextPlugin", 'en', body="Second content")
page1.publish('en')
response = self.client.get('/en/')
self.assertContains(response, 'Second content')
def test_render_placeholder_cache(self):
"""
Regression test for #4223
Assert that placeholder cache is cleared correctly when a plugin is saved
"""
invalidate_cms_page_cache()
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder
###
# add the test plugin
##
test_plugin = add_plugin(ph1, u"TextPlugin", u"en", body="Some text")
test_plugin.save()
# asserting initial text
context = SekizaiContext()
context['request'] = self.get_request()
text = render_placeholder(ph1, context)
self.assertEqual(text, "Some text")
# deleting local plugin cache
del ph1._plugins_cache
test_plugin.body = 'Other text'
test_plugin.save()
# plugin text has changed, so the placeholder rendering
text = render_placeholder(ph1, context)
self.assertEqual(text, "Other text")
| bsd-3-clause |
ioannistsanaktsidis/inspire-next | inspire/base/format_elements/bfe_inspire_url.py | 2 | 1497 | # -*- coding: utf-8 -*-
##
## This file is part of INSPIRE.
## Copyright (C) 2015 CERN.
##
## INSPIRE is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## INSPIRE is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
"""BibFormat element - Prints full-text URLs
"""
__revision__ = "$Id$"
def format_element(bfo, style, separator='; '):
"""Default format for formatting full-text URLs.
@param separator: the separator between urls.
@param style: CSS class of the link
"""
urls_u = bfo.fields("8564_u")
if style != "":
style = 'class="'+style+'"'
urls = ['<a ' + style +
'href="' + url + '">' + url + '</a>'
for url in urls_u]
return separator.join(urls)
def escape_values(bfo):
"""
Check if output of this element should be escaped.
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
hypebeast/etapi | etapi/utils.py | 1 | 1765 | # -*- coding: utf-8 -*-
'''Helper utilities and decorators.'''
import time
from flask import flash
def flash_errors(form, category="warning"):
'''Flash all errors for a form.'''
for field, errors in form.errors.items():
for error in errors:
flash("{0} - {1}"
.format(getattr(form, field).label.text, error), category)
def pretty_date(dt, default=None):
"""
Returns string representing "time since" e.g.
3 days ago, 5 hours ago etc.
Ref: https://bitbucket.org/danjac/newsmeme/src/a281babb9ca3/newsmeme/
"""
if default is None:
default = 'just now'
now = datetime.utcnow()
diff = now - dt
periods = (
(diff.days / 365, 'year', 'years'),
(diff.days / 30, 'month', 'months'),
(diff.days / 7, 'week', 'weeks'),
(diff.days, 'day', 'days'),
(diff.seconds / 3600, 'hour', 'hours'),
(diff.seconds / 60, 'minute', 'minutes'),
(diff.seconds, 'second', 'seconds'),
)
for period, singular, plural in periods:
if not period:
continue
if period == 1:
return u'%d %s ago' % (period, singular)
else:
return u'%d %s ago' % (period, plural)
return default
def pretty_seconds_to_hhmmss(seconds):
if not seconds:
return None
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d h %d m %s s" % (h, m, s)
def pretty_seconds_to_hhmm(seconds):
if not seconds:
return None
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d h %d m" % (h, m)
def pretty_seconds_to_hh(seconds):
if not seconds:
return None
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d h" % (h)
| bsd-3-clause |
DBeath/flask-feedrsub | tests/period_test.py | 1 | 1488 | from datetime import datetime
from dateutil.relativedelta import relativedelta
from feedrsub.database import db
from feedrsub.models.period import PERIOD, Period
from feedrsub.models.populate_db import populate_periods
def test_populate_periods(session):
populate_periods()
daily = Period.query.filter_by(name=PERIOD.DAILY).first()
assert daily.name == PERIOD.DAILY
immediate = Period.query.filter_by(name=PERIOD.IMMEDIATE).first()
assert immediate.name == PERIOD.IMMEDIATE
weekly = Period.query.filter_by(name=PERIOD.WEEKLY).first()
assert weekly.name == PERIOD.WEEKLY
monthly = Period.query.filter_by(name=PERIOD.MONTHLY).first()
assert monthly.name == PERIOD.MONTHLY
def test_period_creation(session):
period_desc = "A Yearly period"
period_name = "YEARLY"
period = Period(period_name, period_desc)
db.session.add(period)
db.session.commit()
yearly = Period.query.filter_by(name=period_name).first()
assert yearly.name == period_name
assert yearly.description == period_desc
def test_get_from_date_with_name(session):
now = datetime.utcnow()
past = now - relativedelta(days=1)
from_date = Period.get_from_date(PERIOD.DAILY, now)
assert from_date == past
def test_get_from_date_with_period(session):
now = datetime.utcnow()
past = now - relativedelta(days=1)
period = Period(name=PERIOD.DAILY)
from_date = Period.get_from_date(period, now)
assert from_date == past
| mit |
nigelb/Static-UPnP | examples/Chromecast/StaticUPnP_StaticServices.py | 1 | 3345 | # static_upnp responds to upnp search requests with statically configures responses.
# Copyright (C) 2016 NigelB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import socket
from dnslib import DNSQuestion, QTYPE
from static_upnp.chromecast_helpers import get_chromecast_uuid, get_date, get_chromecast_mdns_response
from static_upnp.chromecast_helpers import get_service_descriptor, get_chromecast_friendly_name
from static_upnp.mDNS import StaticMDNDService
from static_upnp.static import StaticService
OK = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age={max_age}
DATE: {date}
EXT:
LOCATION: http://{ip}:{port}/ssdp/device-desc.xml
OPT: "http://schemas.upnp.org/upnp/1/0/"; ns=01
01-NLS: 161d2e68-1dd2-11b2-9fd5-f9d9dc2ad10b
SERVER: Linux/3.8.13+, UPnP/1.0, Portable SDK for UPnP devices/1.6.18
X-User-Agent: redsonic
ST: {st}
USN: {usn}
BOOTID.UPNP.ORG: 4
CONFIGID.UPNP.ORG: 2
"""
NOTIFY = """NOTIFY * HTTP/1.1
HOST: 239.255.255.250:1900
CACHE-CONTROL: max-age=1800
LOCATION: http://{ip}:{port}/ssdp/device-desc.xml
NT: {st}
NTS: {nts}
OPT: "http://schemas.upnp.org/upnp/1/0/"; ns=01
01-NLS: 161d2e68-1dd2-11b2-9fd5-f9d9dc2ad10b
SERVER: Linux/3.8.13+, UPnP/1.0, Portable SDK for UPnP devices/1.6.18
X-User-Agent: redsonic
USN: {uuid}
"""
chromecast_ip = socket.gethostbyname_ex("Chromecast")[2][0]
chromecast_port = 8008
chromecast_service_descriptor = get_service_descriptor(chromecast_ip, chromecast_port)
chromecast_uuid = get_chromecast_uuid(chromecast_service_descriptor)
chromecast_friendly_name = get_chromecast_friendly_name(chromecast_service_descriptor)
chromecast_bs = "XXXXXXXXXXXX"
chromecast_cd = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
services = [
StaticService({
"ip": chromecast_ip,
"port": chromecast_port,
"uuid": chromecast_uuid,
"max_age": "1800",
"date": get_date
}, 1024,
OK=OK,
NOTIFY=NOTIFY,
services=[
{
"st": "upnp:rootdevice",
"usn": "uuid:{uuid}::{st}"
},
{
"st": "uuid:{uuid}",
"usn": "uuid:{uuid}"
},
{
"st": "urn:dial-multiscreen-org:device:dial:1",
"usn": "uuid:{uuid}::{st}"
},
{
"st": "urn:dial-multiscreen-org:service:dial:1",
"usn": "uuid:{uuid}::{st}"
},
])
]
mdns_services=[StaticMDNDService(
response_generator=lambda query: get_chromecast_mdns_response(query, chromecast_ip, chromecast_uuid, chromecast_friendly_name, chromecast_bs, chromecast_cd),
dns_question=DNSQuestion(qname="_googlecast._tcp.local", qtype=QTYPE.PTR, qclass=32769)
)]
| gpl-2.0 |
Tranzystorek/servo | tests/wpt/css-tests/tools/webdriver/webdriver/__init__.py | 149 | 1055 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from client import Cookies, Element, Find, Session, Timeouts, Window
from error import (
ElementNotSelectableException,
ElementNotVisibleException,
InvalidArgumentException,
InvalidCookieDomainException,
InvalidElementCoordinatesException,
InvalidElementStateException,
InvalidSelectorException,
InvalidSessionIdException,
JavascriptErrorException,
MoveTargetOutOfBoundsException,
NoSuchAlertException,
NoSuchElementException,
NoSuchFrameException,
NoSuchWindowException,
ScriptTimeoutException,
SessionNotCreatedException,
StaleElementReferenceException,
TimeoutException,
UnableToSetCookieException,
UnexpectedAlertOpenException,
UnknownCommandException,
UnknownErrorException,
UnknownMethodException,
UnsupportedOperationException,
WebDriverException)
| mpl-2.0 |
UWPCE-PythonCert/IntroPython2016 | students/liverpoolforever/session04/dict_lab.py | 3 | 1402 | ''' Dictionary Lab '''
inputDict = {'name':'Chris','city':'Seattle','cake':'Chocolate'}
def print_dict(dict):
for key,value in dict.items():
print(key,value)
def delete_item(key):
# specificy the key to delete
del inputDict[key]
print(inputDict)
def add_an_item(key,value):
inputDict[key] = value
print(inputDict)
def display_keys(dict):
list_of_keys = dict.keys()
print(list_of_keys)
def display_values(dict):
list_of_values = dict.items()
print(list_of_values)
def check_key_exists(key):
# call display keys
# list_of_keys = list(dict.keys())
if key in inputDict:
print("{:s} exists in dictonary".format(key))
def new_dict(dict):
# list_of_values = dict.items())
count_dict = {}
counter = 0;
for key,value in dict.items():
# count_list.append(value)
for char in value:
if char == 't':
counter =+ counter + 1
count_dict[key] = counter
print(count_dict)
def main():
# iterate key, value pairs
print_dict(inputDict)
# display keys
display_keys(inputDict)
# display values
display_values(inputDict)
# remove a key
delete_item('cake')
# add a ket
add_an_item('fruit','Mango')
# check if fruit exists
check_key_exists('fruit')
# new dict
new_dict(inputDict)
if __name__ == '__main__':
main() | unlicense |
avanov/django | django/db/models/sql/where.py | 439 | 8054 | """
Code to manage the creation and SQL rendering of 'where' constraints.
"""
from django.db.models.sql.datastructures import EmptyResultSet
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = 'AND'
OR = 'OR'
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
def split_having(self, negated=False):
"""
Returns two possibly None nodes: one for those parts of self that
should be included in the WHERE clause and one for those parts of
self that must be included in the HAVING clause.
"""
if not self.contains_aggregate:
return self, None
in_negated = negated ^ self.negated
# If the effective connector is OR and this node contains an aggregate,
# then we need to push the whole branch to HAVING clause.
may_need_split = (
(in_negated and self.connector == AND) or
(not in_negated and self.connector == OR))
if may_need_split and self.contains_aggregate:
return None, self
where_parts = []
having_parts = []
for c in self.children:
if hasattr(c, 'split_having'):
where_part, having_part = c.split_having(in_negated)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
having_node = self.__class__(having_parts, self.connector, self.negated) if having_parts else None
where_node = self.__class__(where_parts, self.connector, self.negated) if where_parts else None
return where_node, having_node
def as_sql(self, compiler, connection):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns '', [] if this node matches everything,
None, [] if this node is empty, and raises EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return '', []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return '', []
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = 'NOT (%s)' % sql_string
elif len(result) > 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def get_group_by_cols(self):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def relabel_aliases(self, change_map):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, 'relabel_aliases'):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, 'relabeled_clone'):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
"""
Creates a clone of the tree. Must only be called on root nodes (nodes
with empty subtree_parents). Childs must be either (Contraint, lookup,
value) tuples, or objects supporting .clone().
"""
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
class NothingNode(object):
"""
A node that matches nothing.
"""
contains_aggregate = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere(object):
# The contents are a black box - assume no aggregates are used.
contains_aggregate = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint(object):
# Even if aggregates would be used in a subquery, the outer query isn't
# interested about those.
contains_aggregate = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
# QuerySet was sent
if hasattr(query, 'values'):
if query._db and connection.alias != query._db:
raise ValueError("Can't do subqueries with queries on different DBs.")
# Do not override already existing values.
if query._fields is None:
query = query.values(*self.targets)
else:
query = query._clone()
query = query.query
if query.can_filter():
# If there is no slicing in use, then we can safely drop all ordering
query.clear_ordering(True)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
def relabel_aliases(self, change_map):
self.alias = change_map.get(self.alias, self.alias)
def clone(self):
return self.__class__(
self.alias, self.columns, self.targets,
self.query_object)
| bsd-3-clause |
ininex/geofire-python | resource/lib/python2.7/site-packages/Crypto/SelfTest/Cipher/test_CTR.py | 5 | 16449 | # ===================================================================
#
# Copyright (c) 2015, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import unittest
from Crypto.SelfTest.st_common import list_test_cases
from Crypto.Util.py3compat import tobytes, b, unhexlify, bchr
from Crypto.Cipher import AES, DES3
from Crypto.Hash import SHAKE128
from Crypto.Util import Counter
def get_tag_random(tag, length):
return SHAKE128.new(data=tobytes(tag)).read(length)
class CtrTests(unittest.TestCase):
key_128 = get_tag_random("key_128", 16)
key_192 = get_tag_random("key_192", 24)
nonce_32 = get_tag_random("nonce_32", 4)
nonce_64 = get_tag_random("nonce_64", 8)
ctr_64 = Counter.new(32, prefix=nonce_32)
ctr_128 = Counter.new(64, prefix=nonce_64)
def test_loopback_128(self):
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
pt = get_tag_random("plaintext", 16 * 100)
ct = cipher.encrypt(pt)
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
pt2 = cipher.decrypt(ct)
self.assertEqual(pt, pt2)
def test_loopback_64(self):
cipher = DES3.new(self.key_192, DES3.MODE_CTR, counter=self.ctr_64)
pt = get_tag_random("plaintext", 8 * 100)
ct = cipher.encrypt(pt)
cipher = DES3.new(self.key_192, DES3.MODE_CTR, counter=self.ctr_64)
pt2 = cipher.decrypt(ct)
self.assertEqual(pt, pt2)
def test_invalid_counter_parameter(self):
# Counter object is required for ciphers with short block size
self.assertRaises(TypeError, DES3.new, self.key_192, AES.MODE_CTR)
# Positional arguments are not allowed (Counter must be passed as
# keyword)
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR, self.ctr_128)
def test_nonce_attribute(self):
# Nonce attribute is the prefix passed to Counter (DES3)
cipher = DES3.new(self.key_192, DES3.MODE_CTR, counter=self.ctr_64)
self.assertEqual(cipher.nonce, self.nonce_32)
# Nonce attribute is the prefix passed to Counter (AES)
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
self.assertEqual(cipher.nonce, self.nonce_64)
# Nonce attribute is not defined if suffix is used in Counter
counter = Counter.new(64, prefix=self.nonce_32, suffix=self.nonce_32)
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=counter)
self.failIf(hasattr(cipher, "nonce"))
def test_nonce_parameter(self):
# Nonce parameter becomes nonce attribute
cipher1 = AES.new(self.key_128, AES.MODE_CTR, nonce=self.nonce_64)
self.assertEqual(cipher1.nonce, self.nonce_64)
counter = Counter.new(64, prefix=self.nonce_64, initial_value=0)
cipher2 = AES.new(self.key_128, AES.MODE_CTR, counter=counter)
self.assertEqual(cipher1.nonce, cipher2.nonce)
pt = get_tag_random("plaintext", 65536)
self.assertEqual(cipher1.encrypt(pt), cipher2.encrypt(pt))
# Nonce is implicitly created (for AES) when no parameters are passed
nonce1 = AES.new(self.key_128, AES.MODE_CTR).nonce
nonce2 = AES.new(self.key_128, AES.MODE_CTR).nonce
self.assertNotEqual(nonce1, nonce2)
self.assertEqual(len(nonce1), 8)
# Nonce can be zero-length
cipher = AES.new(self.key_128, AES.MODE_CTR, nonce=b(""))
self.assertEqual(b(""), cipher.nonce)
# Nonce and Counter are mutually exclusive
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR,
counter=self.ctr_128, nonce=self.nonce_64)
def test_initial_value_parameter(self):
# Test with nonce parameter
cipher1 = AES.new(self.key_128, AES.MODE_CTR,
nonce=self.nonce_64, initial_value=0xFFFF)
counter = Counter.new(64, prefix=self.nonce_64, initial_value=0xFFFF)
cipher2 = AES.new(self.key_128, AES.MODE_CTR, counter=counter)
pt = get_tag_random("plaintext", 65536)
self.assertEqual(cipher1.encrypt(pt), cipher2.encrypt(pt))
# Test without nonce parameter
cipher1 = AES.new(self.key_128, AES.MODE_CTR,
initial_value=0xFFFF)
counter = Counter.new(64, prefix=cipher1.nonce, initial_value=0xFFFF)
cipher2 = AES.new(self.key_128, AES.MODE_CTR, counter=counter)
pt = get_tag_random("plaintext", 65536)
self.assertEqual(cipher1.encrypt(pt), cipher2.encrypt(pt))
# Initial_value and Counter are mutually exclusive
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR,
counter=self.ctr_128, initial_value=0)
def test_iv_with_matching_length(self):
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CTR,
counter=Counter.new(120))
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CTR,
counter=Counter.new(136))
def test_block_size_128(self):
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
self.assertEqual(cipher.block_size, AES.block_size)
def test_block_size_64(self):
cipher = DES3.new(self.key_192, DES3.MODE_CTR, counter=self.ctr_64)
self.assertEqual(cipher.block_size, DES3.block_size)
def test_unaligned_data_128(self):
plaintexts = [ b("7777777") ] * 100
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
def test_unaligned_data_64(self):
plaintexts = [ b("7777777") ] * 100
cipher = DES3.new(self.key_192, AES.MODE_CTR, counter=self.ctr_64)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = DES3.new(self.key_192, AES.MODE_CTR, counter=self.ctr_64)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
cipher = DES3.new(self.key_192, AES.MODE_CTR, counter=self.ctr_64)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = DES3.new(self.key_192, AES.MODE_CTR, counter=self.ctr_64)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
def test_unknown_parameters(self):
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR,
7, counter=self.ctr_128)
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR,
counter=self.ctr_128, unknown=7)
# But some are only known by the base cipher (e.g. use_aesni consumed by the AES module)
AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128, use_aesni=False)
def test_null_encryption_decryption(self):
for func in "encrypt", "decrypt":
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
result = getattr(cipher, func)(b(""))
self.assertEqual(result, b(""))
def test_either_encrypt_or_decrypt(self):
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
cipher.encrypt(b(""))
self.assertRaises(TypeError, cipher.decrypt, b(""))
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128)
cipher.decrypt(b(""))
self.assertRaises(TypeError, cipher.encrypt, b(""))
def test_wrap_around(self):
counter = Counter.new(8, prefix=bchr(9) * 15)
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=counter)
cipher.encrypt(bchr(9) * 16 * 255)
self.assertRaises(OverflowError, cipher.encrypt, bchr(9) * 16)
cipher = AES.new(self.key_128, AES.MODE_CTR, counter=counter)
cipher.decrypt(bchr(9) * 16 * 255)
self.assertRaises(OverflowError, cipher.decrypt, bchr(9) * 16)
class SP800TestVectors(unittest.TestCase):
"""Class exercising the CTR test vectors found in Section F.3
of NIST SP 800-3A"""
def test_aes_128(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = '874d6191b620e3261bef6864990db6ce' +\
'9806f66b7970fdff8617187bb9fffdff' +\
'5ae4df3edbd5d35e5b4f09020db03eab' +\
'1e031dda2fbe03d1792170a0f3009cee'
key = '2b7e151628aed2a6abf7158809cf4f3c'
counter = Counter.new(nbits=16,
prefix=unhexlify('f0f1f2f3f4f5f6f7f8f9fafbfcfd'),
initial_value=0xfeff)
key = unhexlify(key)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CTR, counter=counter)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CTR, counter=counter)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_192(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = '1abc932417521ca24f2b0459fe7e6e0b' +\
'090339ec0aa6faefd5ccc2c6f4ce8e94' +\
'1e36b26bd1ebc670d1bd1d665620abf7' +\
'4f78a7f6d29809585a97daec58c6b050'
key = '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b'
counter = Counter.new(nbits=16,
prefix=unhexlify('f0f1f2f3f4f5f6f7f8f9fafbfcfd'),
initial_value=0xfeff)
key = unhexlify(key)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CTR, counter=counter)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CTR, counter=counter)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_256(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = '601ec313775789a5b7a7f504bbf3d228' +\
'f443e3ca4d62b59aca84e990cacaf5c5' +\
'2b0930daa23de94ce87017ba2d84988d' +\
'dfc9c58db67aada613c2dd08457941a6'
key = '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4'
counter = Counter.new(nbits=16,
prefix=unhexlify('f0f1f2f3f4f5f6f7f8f9fafbfcfd'),
initial_value=0xfeff)
key = unhexlify(key)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CTR, counter=counter)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CTR, counter=counter)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
class RFC3686TestVectors(unittest.TestCase):
# Each item is a test vector with:
# - plaintext
# - ciphertext
# - key (AES 128, 192 or 256 bits)
# - counter prefix
data = (
('53696e676c6520626c6f636b206d7367',
'e4095d4fb7a7b3792d6175a3261311b8',
'ae6852f8121067cc4bf7a5765577f39e',
'00000030'+'0000000000000000'),
('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f',
'5104a106168a72d9790d41ee8edad388eb2e1efc46da57c8fce630df9141be28',
'7e24067817fae0d743d6ce1f32539163',
'006cb6dbc0543b59da48d90b'),
('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223',
'c1cf48a89f2ffdd9cf4652e9efdb72d74540a42bde6d7836d59a5ceaaef3105325b2072f',
'7691be035e5020a8ac6e618529f9a0dc',
'00e0017b27777f3f4a1786f0'),
('53696e676c6520626c6f636b206d7367',
'4b55384fe259c9c84e7935a003cbe928',
'16af5b145fc9f579c175f93e3bfb0eed863d06ccfdb78515',
'0000004836733c147d6d93cb'),
('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f',
'453243fc609b23327edfaafa7131cd9f8490701c5ad4a79cfc1fe0ff42f4fb00',
'7c5cb2401b3dc33c19e7340819e0f69c678c3db8e6f6a91a',
'0096b03b020c6eadc2cb500d'),
('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223',
'96893fc55e5c722f540b7dd1ddf7e758d288bc95c69165884536c811662f2188abee0935',
'02bf391ee8ecb159b959617b0965279bf59b60a786d3e0fe',
'0007bdfd5cbd60278dcc0912'),
('53696e676c6520626c6f636b206d7367',
'145ad01dbf824ec7560863dc71e3e0c0',
'776beff2851db06f4c8a0542c8696f6c6a81af1eec96b4d37fc1d689e6c1c104',
'00000060db5672c97aa8f0b2'),
('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f',
'f05e231b3894612c49ee000b804eb2a9b8306b508f839d6a5530831d9344af1c',
'f6d66d6bd52d59bb0796365879eff886c66dd51a5b6a99744b50590c87a23884',
'00faac24c1585ef15a43d875'),
('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223',
'eb6c52821d0bbbf7ce7594462aca4faab407df866569fd07f48cc0b583d6071f1ec0e6b8',
'ff7a617ce69148e4f1726e2f43581de2aa62d9f805532edff1eed687fb54153d',
'001cc5b751a51d70a1c11148')
)
bindata = []
for tv in data:
bindata.append([unhexlify(x) for x in tv])
def runTest(self):
for pt, ct, key, prefix in self.bindata:
counter = Counter.new(32, prefix=prefix)
cipher = AES.new(key, AES.MODE_CTR, counter=counter)
result = cipher.encrypt(pt)
self.assertEqual(ct, result)
def get_tests(config={}):
tests = []
tests += list_test_cases(CtrTests)
tests += list_test_cases(SP800TestVectors)
tests += [ RFC3686TestVectors() ]
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| mit |
abhattad4/Digi-Menu | digimenu2/django/db/models/sql/subqueries.py | 81 | 7861 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE, NO_RESULTS
from django.db.models.sql.query import Query
from django.utils import six
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
self.get_compiler(using).execute_sql(NO_RESULTS)
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
self.do_query(self.get_meta().db_table, self.where, using=using)
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if ((not innerq_used_tables or innerq_used_tables == self.tables)
and not len(innerq.having)):
# There is only the base table in use in the query, and there is
# no aggregate filtering going on.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return
self.delete_batch(values, using)
return
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
self.get_compiler(using).execute_sql(NO_RESULTS)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(
with_col_aliases=True,
subquery=True,
)
| bsd-3-clause |
andrewmchen/incubator-airflow | tests/www/api/experimental/test_endpoints.py | 4 | 6128 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime, timedelta
from urllib.parse import quote_plus
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag
import json
class ApiExperimentalTests(unittest.TestCase):
def setUp(self):
from airflow import configuration
configuration.load_test_config()
from airflow.www import app as application
app = application.create_app(testing=True)
self.app = app.test_client()
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.app.get(
url_template.format('example_bash_operator', 'runme_0')
)
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.app.get(
url_template.format('example_bash_operator', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.app.get(
url_template.format('DNE', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
response = self.app.post(
url_template.format('example_bash_operator'),
data=json.dumps(dict(run_id='my_run' + datetime.now().isoformat())),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps(dict()),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
hour_from_now = datetime.now() + timedelta(hours=1)
execution_date = datetime(hour_from_now.year,
hour_from_now.month,
hour_from_now.day,
hour_from_now.hour)
datetime_string = execution_date.isoformat()
# Test Correct execution
response = self.app.post(
url_template.format(dag_id),
data=json.dumps(dict(execution_date=datetime_string)),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps(dict(execution_date=execution_date.isoformat())),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.app.post(
url_template.format(dag_id),
data=json.dumps(dict(execution_date='not_a_datetime')),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
def test_task_instance_info(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = datetime.now().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(datetime(1990, 1, 1, 1, 1, 1).isoformat())
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.app.get(
url_template.format(dag_id, datetime_string, task_id)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.app.get(
url_template.format('does_not_exist_dag', datetime_string, task_id),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent task
response = self.app.get(
url_template.format(dag_id, datetime_string, 'does_not_exist_task')
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.app.get(
url_template.format(dag_id, wrong_datetime_string, task_id)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.app.get(
url_template.format(dag_id, 'not_a_datetime', task_id)
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
| apache-2.0 |
MrLoick/python-for-android | python-build/python-libs/gdata/tests/atom_tests/data_test.py | 87 | 37163 | #!/usr/bin/python
# -*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder@gmail.com (Jeff Scudder)'
import sys
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom.data
import atom.core
import gdata.test_config as conf
XML_ENTRY_1 = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<id> http://www.google.com/test/id/url </id>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<link rel='license'
href='http://creativecommons.org/licenses/by-nc/2.5/rdf'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
class AuthorTest(unittest.TestCase):
def setUp(self):
self.author = atom.data.Author()
def testEmptyAuthorShouldHaveEmptyExtensionLists(self):
self.assertTrue(isinstance(self.author._other_elements, list))
self.assertEqual(len(self.author._other_elements), 0)
self.assertTrue(isinstance(self.author._other_attributes, dict))
self.assertEqual(len(self.author._other_attributes), 0)
def testNormalAuthorShouldHaveNoExtensionElements(self):
self.author.name = atom.data.Name(text='Jeff Scudder')
self.assertEqual(self.author.name.text, 'Jeff Scudder')
self.assertEqual(len(self.author._other_elements), 0)
new_author = atom.core.XmlElementFromString(self.author.ToString(),
atom.data.Author)
self.assertEqual(len(new_author._other_elements), 0)
self.assertEqual(new_author.name.text, 'Jeff Scudder')
self.author.extension_elements.append(atom.data.ExtensionElement(
'foo', text='bar'))
self.assertEqual(len(self.author.extension_elements), 1)
self.assertEqual(self.author.name.text, 'Jeff Scudder')
new_author = atom.core.parse(self.author.ToString(), atom.data.Author)
self.assertEqual(len(self.author.extension_elements), 1)
self.assertEqual(new_author.name.text, 'Jeff Scudder')
def testEmptyAuthorToAndFromStringShouldMatch(self):
string_from_author = self.author.ToString()
new_author = atom.core.XmlElementFromString(string_from_author,
atom.data.Author)
string_from_new_author = new_author.ToString()
self.assertEqual(string_from_author, string_from_new_author)
def testAuthorWithNameToAndFromStringShouldMatch(self):
self.author.name = atom.data.Name()
self.author.name.text = 'Jeff Scudder'
string_from_author = self.author.ToString()
new_author = atom.core.XmlElementFromString(string_from_author,
atom.data.Author)
string_from_new_author = new_author.ToString()
self.assertEqual(string_from_author, string_from_new_author)
self.assertEqual(self.author.name.text, new_author.name.text)
def testExtensionElements(self):
self.author.extension_attributes['foo1'] = 'bar'
self.author.extension_attributes['foo2'] = 'rab'
self.assertEqual(self.author.extension_attributes['foo1'], 'bar')
self.assertEqual(self.author.extension_attributes['foo2'], 'rab')
new_author = atom.core.parse(str(self.author), atom.data.Author)
self.assertEqual(new_author.extension_attributes['foo1'], 'bar')
self.assertEqual(new_author.extension_attributes['foo2'], 'rab')
def testConvertFullAuthorToAndFromString(self):
TEST_AUTHOR = """<?xml version="1.0" encoding="utf-8"?>
<author xmlns="http://www.w3.org/2005/Atom">
<name xmlns="http://www.w3.org/2005/Atom">John Doe</name>
<email xmlns="http://www.w3.org/2005/Atom">john@example.com</email>
<uri>http://www.google.com</uri>
</author>"""
author = atom.core.parse(TEST_AUTHOR, atom.data.Author)
self.assertEqual(author.name.text, 'John Doe')
self.assertEqual(author.email.text, 'john@example.com')
self.assertEqual(author.uri.text, 'http://www.google.com')
class EmailTest(unittest.TestCase):
def setUp(self):
self.email = atom.data.Email()
def testEmailToAndFromString(self):
self.email.text = 'This is a test'
new_email = atom.core.parse(self.email.to_string(), atom.data.Email)
self.assertEqual(self.email.text, new_email.text)
self.assertEqual(self.email.extension_elements,
new_email.extension_elements)
class NameTest(unittest.TestCase):
def setUp(self):
self.name = atom.data.Name()
def testEmptyNameToAndFromStringShouldMatch(self):
string_from_name = self.name.ToString()
new_name = atom.core.XmlElementFromString(string_from_name,
atom.data.Name)
string_from_new_name = new_name.ToString()
self.assertEqual(string_from_name, string_from_new_name)
def testText(self):
self.assertTrue(self.name.text is None)
self.name.text = 'Jeff Scudder'
self.assertEqual(self.name.text, 'Jeff Scudder')
new_name = atom.core.parse(self.name.to_string(), atom.data.Name)
self.assertEqual(new_name.text, self.name.text)
def testExtensionElements(self):
self.name.extension_attributes['foo'] = 'bar'
self.assertEqual(self.name.extension_attributes['foo'], 'bar')
new_name = atom.core.parse(self.name.ToString(), atom.data.Name)
self.assertEqual(new_name.extension_attributes['foo'], 'bar')
class ExtensionElementTest(unittest.TestCase):
def setUp(self):
self.ee = atom.data.ExtensionElement('foo')
self.EXTENSION_TREE = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<g:author xmlns:g="http://www.google.com">
<g:name>John Doe
<g:foo yes="no" up="down">Bar</g:foo>
</g:name>
</g:author>
</feed>"""
def testEmptyEEShouldProduceEmptyString(self):
pass
def testEEParsesTreeCorrectly(self):
deep_tree = atom.core.xml_element_from_string(self.EXTENSION_TREE,
atom.data.ExtensionElement)
self.assertEqual(deep_tree.tag, 'feed')
self.assertEqual(deep_tree.namespace, 'http://www.w3.org/2005/Atom')
self.assert_(deep_tree.children[0].tag == 'author')
self.assert_(deep_tree.children[0].namespace == 'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].tag == 'name')
self.assert_(deep_tree.children[0].children[0].namespace ==
'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].text.strip() == 'John Doe')
self.assert_(deep_tree.children[0].children[0].children[0].text.strip() ==
'Bar')
foo = deep_tree.children[0].children[0].children[0]
self.assert_(foo.tag == 'foo')
self.assert_(foo.namespace == 'http://www.google.com')
self.assert_(foo.attributes['up'] == 'down')
self.assert_(foo.attributes['yes'] == 'no')
self.assert_(foo.children == [])
def testEEToAndFromStringShouldMatch(self):
string_from_ee = self.ee.ToString()
new_ee = atom.core.xml_element_from_string(string_from_ee,
atom.data.ExtensionElement)
string_from_new_ee = new_ee.ToString()
self.assert_(string_from_ee == string_from_new_ee)
deep_tree = atom.core.xml_element_from_string(self.EXTENSION_TREE,
atom.data.ExtensionElement)
string_from_deep_tree = deep_tree.ToString()
new_deep_tree = atom.core.xml_element_from_string(string_from_deep_tree,
atom.data.ExtensionElement)
string_from_new_deep_tree = new_deep_tree.ToString()
self.assert_(string_from_deep_tree == string_from_new_deep_tree)
class LinkTest(unittest.TestCase):
def setUp(self):
self.link = atom.data.Link()
def testLinkToAndFromString(self):
self.link.href = 'test href'
self.link.hreflang = 'english'
self.link.type = 'text/html'
self.link.extension_attributes['foo'] = 'bar'
self.assert_(self.link.href == 'test href')
self.assert_(self.link.hreflang == 'english')
self.assert_(self.link.type == 'text/html')
self.assert_(self.link.extension_attributes['foo'] == 'bar')
new_link = atom.core.parse(self.link.ToString(), atom.data.Link)
self.assert_(self.link.href == new_link.href)
self.assert_(self.link.type == new_link.type)
self.assert_(self.link.hreflang == new_link.hreflang)
self.assert_(self.link.extension_attributes['foo'] ==
new_link.extension_attributes['foo'])
def testLinkType(self):
test_link = atom.data.Link(type='text/html')
self.assertEqual(test_link.type, 'text/html')
class GeneratorTest(unittest.TestCase):
def setUp(self):
self.generator = atom.data.Generator()
def testGeneratorToAndFromString(self):
self.generator.uri = 'www.google.com'
self.generator.version = '1.0'
self.generator.extension_attributes['foo'] = 'bar'
self.assert_(self.generator.uri == 'www.google.com')
self.assert_(self.generator.version == '1.0')
self.assert_(self.generator.extension_attributes['foo'] == 'bar')
new_generator = atom.core.parse(self.generator.ToString(), atom.data.Generator)
self.assert_(self.generator.uri == new_generator.uri)
self.assert_(self.generator.version == new_generator.version)
self.assert_(self.generator.extension_attributes['foo'] ==
new_generator.extension_attributes['foo'])
class TitleTest(unittest.TestCase):
def setUp(self):
self.title = atom.data.Title()
def testTitleToAndFromString(self):
self.title.type = 'text'
self.title.text = 'Less: <'
self.assert_(self.title.type == 'text')
self.assert_(self.title.text == 'Less: <')
new_title = atom.core.parse(str(self.title), atom.data.Title)
self.assert_(self.title.type == new_title.type)
self.assert_(self.title.text == new_title.text)
class SubtitleTest(unittest.TestCase):
def setUp(self):
self.subtitle = atom.data.Subtitle()
def testTitleToAndFromString(self):
self.subtitle.type = 'text'
self.subtitle.text = 'sub & title'
self.assert_(self.subtitle.type == 'text')
self.assert_(self.subtitle.text == 'sub & title')
new_subtitle = atom.core.parse(self.subtitle.ToString(),
atom.data.Subtitle)
self.assert_(self.subtitle.type == new_subtitle.type)
self.assert_(self.subtitle.text == new_subtitle.text)
class SummaryTest(unittest.TestCase):
def setUp(self):
self.summary = atom.data.Summary()
def testTitleToAndFromString(self):
self.summary.type = 'text'
self.summary.text = 'Less: <'
self.assert_(self.summary.type == 'text')
self.assert_(self.summary.text == 'Less: <')
new_summary = atom.core.parse(self.summary.ToString(), atom.data.Summary)
self.assert_(self.summary.type == new_summary.type)
self.assert_(self.summary.text == new_summary.text)
class CategoryTest(unittest.TestCase):
def setUp(self):
self.category = atom.data.Category()
def testCategoryToAndFromString(self):
self.category.term = 'x'
self.category.scheme = 'y'
self.category.label = 'z'
self.assert_(self.category.term == 'x')
self.assert_(self.category.scheme == 'y')
self.assert_(self.category.label == 'z')
new_category = atom.core.parse(self.category.to_string(),
atom.data.Category)
self.assert_(self.category.term == new_category.term)
self.assert_(self.category.scheme == new_category.scheme)
self.assert_(self.category.label == new_category.label)
class ContributorTest(unittest.TestCase):
def setUp(self):
self.contributor = atom.data.Contributor()
def testContributorToAndFromString(self):
self.contributor.name = atom.data.Name(text='J Scud')
self.contributor.email = atom.data.Email(text='nobody@nowhere')
self.contributor.uri = atom.data.Uri(text='http://www.google.com')
self.assert_(self.contributor.name.text == 'J Scud')
self.assert_(self.contributor.email.text == 'nobody@nowhere')
self.assert_(self.contributor.uri.text == 'http://www.google.com')
new_contributor = atom.core.parse(self.contributor.ToString(),
atom.data.Contributor)
self.assert_(self.contributor.name.text == new_contributor.name.text)
self.assert_(self.contributor.email.text == new_contributor.email.text)
self.assert_(self.contributor.uri.text == new_contributor.uri.text)
class IdTest(unittest.TestCase):
def setUp(self):
self.my_id = atom.data.Id()
def testIdToAndFromString(self):
self.my_id.text = 'my nifty id'
self.assert_(self.my_id.text == 'my nifty id')
new_id = atom.core.parse(self.my_id.ToString(), atom.data.Id)
self.assert_(self.my_id.text == new_id.text)
class IconTest(unittest.TestCase):
def setUp(self):
self.icon = atom.data.Icon()
def testIconToAndFromString(self):
self.icon.text = 'my picture'
self.assert_(self.icon.text == 'my picture')
new_icon = atom.core.parse(str(self.icon), atom.data.Icon)
self.assert_(self.icon.text == new_icon.text)
class LogoTest(unittest.TestCase):
def setUp(self):
self.logo = atom.data.Logo()
def testLogoToAndFromString(self):
self.logo.text = 'my logo'
self.assert_(self.logo.text == 'my logo')
new_logo = atom.core.parse(self.logo.ToString(), atom.data.Logo)
self.assert_(self.logo.text == new_logo.text)
class RightsTest(unittest.TestCase):
def setUp(self):
self.rights = atom.data.Rights()
def testContributorToAndFromString(self):
self.rights.text = 'you have the right to remain silent'
self.rights.type = 'text'
self.assert_(self.rights.text == 'you have the right to remain silent')
self.assert_(self.rights.type == 'text')
new_rights = atom.core.parse(self.rights.ToString(), atom.data.Rights)
self.assert_(self.rights.text == new_rights.text)
self.assert_(self.rights.type == new_rights.type)
class UpdatedTest(unittest.TestCase):
def setUp(self):
self.updated = atom.data.Updated()
def testUpdatedToAndFromString(self):
self.updated.text = 'my time'
self.assert_(self.updated.text == 'my time')
new_updated = atom.core.parse(self.updated.ToString(), atom.data.Updated)
self.assert_(self.updated.text == new_updated.text)
class PublishedTest(unittest.TestCase):
def setUp(self):
self.published = atom.data.Published()
def testPublishedToAndFromString(self):
self.published.text = 'pub time'
self.assert_(self.published.text == 'pub time')
new_published = atom.core.parse(self.published.ToString(),
atom.data.Published)
self.assert_(self.published.text == new_published.text)
class FeedEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def testConvertToAndFromElementTree(self):
# Use entry because FeedEntryParent doesn't have a tag or namespace.
original = atom.data.Entry()
copy = atom.data.FeedEntryParent()
original.author.append(atom.data.Author(name=atom.data.Name(
text='J Scud')))
self.assert_(original.author[0].name.text == 'J Scud')
self.assert_(copy.author == [])
original.id = atom.data.Id(text='test id')
self.assert_(original.id.text == 'test id')
self.assert_(copy.id is None)
copy._harvest_tree(original._to_tree())
self.assert_(original.author[0].name.text == copy.author[0].name.text)
self.assert_(original.id.text == copy.id.text)
class EntryTest(unittest.TestCase):
def testConvertToAndFromString(self):
entry = atom.data.Entry()
entry.author.append(atom.data.Author(name=atom.data.Name(text='js')))
entry.title = atom.data.Title(text='my test entry')
self.assert_(entry.author[0].name.text == 'js')
self.assert_(entry.title.text == 'my test entry')
new_entry = atom.core.parse(entry.ToString(), atom.data.Entry)
self.assert_(new_entry.author[0].name.text == 'js')
self.assert_(new_entry.title.text == 'my test entry')
def testEntryCorrectlyConvertsActualData(self):
entry = atom.core.parse(XML_ENTRY_1, atom.data.Entry)
self.assert_(entry.category[0].scheme ==
'http://base.google.com/categories/itemtypes')
self.assert_(entry.category[0].term == 'products')
self.assert_(entry.id.text == ' http://www.google.com/test/id/url ')
self.assert_(entry.title.text == 'Testing 2000 series laptop')
self.assert_(entry.title.type == 'text')
self.assert_(entry.content.type == 'xhtml')
#TODO check all other values for the test entry
def testAppControl(self):
TEST_BASE_ENTRY = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<app:control xmlns:app='http://purl.org/atom/app#'>
<app:draft>yes</app:draft>
<gm:disapproved
xmlns:gm='http://base.google.com/ns-metadata/1.0'/>
</app:control>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
entry = atom.core.parse(TEST_BASE_ENTRY, atom.data.Entry)
self.assertEquals(entry.control.draft.text, 'yes')
self.assertEquals(len(entry.control.extension_elements), 1)
self.assertEquals(entry.control.extension_elements[0].tag, 'disapproved')
class ControlTest(unittest.TestCase):
def testVersionRuleGeneration(self):
self.assertEqual(atom.core._get_qname(atom.data.Control, 1),
'{http://purl.org/atom/app#}control')
self.assertEqual(atom.data.Control._get_rules(1)[0],
'{http://purl.org/atom/app#}control')
def testVersionedControlFromString(self):
xml_v1 = """<control xmlns="http://purl.org/atom/app#">
<draft>no</draft></control>"""
xml_v2 = """<control xmlns="http://www.w3.org/2007/app">
<draft>no</draft></control>"""
control_v1 = atom.core.parse(xml_v1, atom.data.Control, 1)
control_v2 = atom.core.parse(xml_v2, atom.data.Control, 2)
self.assertFalse(control_v1 is None)
self.assertFalse(control_v2 is None)
# Parsing with mismatched version numbers should return None.
self.assertTrue(atom.core.parse(xml_v1, atom.data.Control, 2) is None)
self.assertTrue(atom.core.parse(xml_v2, atom.data.Control, 1) is None)
def testConvertToAndFromString(self):
control = atom.data.Control()
control.text = 'some text'
control.draft = atom.data.Draft(text='yes')
self.assertEquals(control.draft.text, 'yes')
self.assertEquals(control.text, 'some text')
self.assertTrue(isinstance(control.draft, atom.data.Draft))
new_control = atom.core.parse(str(control), atom.data.Control)
self.assertEquals(control.draft.text, new_control.draft.text)
self.assertEquals(control.text, new_control.text)
self.assertTrue(isinstance(new_control.draft, atom.data.Draft))
class DraftTest(unittest.TestCase):
def testConvertToAndFromString(self):
draft = atom.data.Draft()
draft.text = 'maybe'
draft.extension_attributes['foo'] = 'bar'
self.assertEquals(draft.text, 'maybe')
self.assertEquals(draft.extension_attributes['foo'], 'bar')
new_draft = atom.core.parse(str(draft), atom.data.Draft)
self.assertEquals(draft.text, new_draft.text)
self.assertEquals(draft.extension_attributes['foo'],
new_draft.extension_attributes['foo'])
class SourceTest(unittest.TestCase):
def testConvertToAndFromString(self):
source = atom.data.Source()
source.author.append(atom.data.Author(name=atom.data.Name(text='js')))
source.title = atom.data.Title(text='my test source')
source.generator = atom.data.Generator(text='gen')
self.assert_(source.author[0].name.text == 'js')
self.assert_(source.title.text == 'my test source')
self.assert_(source.generator.text == 'gen')
new_source = atom.core.parse(source.ToString(), atom.data.Source)
self.assert_(new_source.author[0].name.text == 'js')
self.assert_(new_source.title.text == 'my test source')
self.assert_(new_source.generator.text == 'gen')
class FeedTest(unittest.TestCase):
def testConvertToAndFromString(self):
feed = atom.data.Feed()
feed.author.append(atom.data.Author(name=atom.data.Name(text='js')))
feed.title = atom.data.Title(text='my test source')
feed.generator = atom.data.Generator(text='gen')
feed.entry.append(atom.data.Entry(author=[atom.data.Author(
name=atom.data.Name(text='entry author'))]))
self.assert_(feed.author[0].name.text == 'js')
self.assert_(feed.title.text == 'my test source')
self.assert_(feed.generator.text == 'gen')
self.assert_(feed.entry[0].author[0].name.text == 'entry author')
new_feed = atom.core.parse(feed.ToString(), atom.data.Feed)
self.assert_(new_feed.author[0].name.text == 'js')
self.assert_(new_feed.title.text == 'my test source')
self.assert_(new_feed.generator.text == 'gen')
self.assert_(new_feed.entry[0].author[0].name.text == 'entry author')
def testPreserveEntryOrder(self):
test_xml = (
'<feed xmlns="http://www.w3.org/2005/Atom">'
'<entry><id>0</id></entry>'
'<entry><id>1</id></entry>'
'<title>Testing Order</title>'
'<entry><id>2</id></entry>'
'<entry><id>3</id></entry>'
'<entry><id>4</id></entry>'
'<entry><id>5</id></entry>'
'<entry><id>6</id></entry>'
'<entry><id>7</id></entry>'
'<author/>'
'<entry><id>8</id></entry>'
'<id>feed_id</id>'
'<entry><id>9</id></entry>'
'</feed>')
feed = atom.core.parse(test_xml, atom.data.Feed)
for i in xrange(10):
self.assertEqual(feed.entry[i].id.text, str(i))
feed = atom.core.parse(feed.ToString(), atom.data.Feed)
for i in xrange(10):
self.assertEqual(feed.entry[i].id.text, str(i))
temp = feed.entry[3]
feed.entry[3] = feed.entry[4]
feed.entry[4] = temp
self.assert_(feed.entry[2].id.text == '2')
self.assert_(feed.entry[3].id.text == '4')
self.assert_(feed.entry[4].id.text == '3')
self.assert_(feed.entry[5].id.text == '5')
feed = atom.core.parse(feed.to_string(), atom.data.Feed)
self.assertEqual(feed.entry[2].id.text, '2')
self.assertEqual(feed.entry[3].id.text, '4')
self.assertEqual(feed.entry[4].id.text, '3')
self.assertEqual(feed.entry[5].id.text, '5')
class ContentEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def setUp(self):
self.content = atom.data.Content()
def testConvertToAndFromElementTree(self):
self.content.text = 'my content'
self.content.type = 'text'
self.content.src = 'my source'
self.assert_(self.content.text == 'my content')
self.assert_(self.content.type == 'text')
self.assert_(self.content.src == 'my source')
new_content = atom.core.parse(self.content.ToString(), atom.data.Content)
self.assert_(self.content.text == new_content.text)
self.assert_(self.content.type == new_content.type)
self.assert_(self.content.src == new_content.src)
def testContentConstructorSetsSrc(self):
new_content = atom.data.Content(src='abcd')
self.assertEquals(new_content.src, 'abcd')
def testContentFromString(self):
content_xml = '<content xmlns="http://www.w3.org/2005/Atom" type="test"/>'
content = atom.core.parse(content_xml, atom.data.Content)
self.assertTrue(isinstance(content, atom.data.Content))
self.assertEqual(content.type, 'test')
class PreserveUnkownElementTest(unittest.TestCase):
"""Tests correct preservation of XML elements which are non Atom"""
def setUp(self):
GBASE_ATTRIBUTE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id>http://www.google.com/base/feeds/attributes</id>
<updated>2006-11-01T20:35:59.578Z</updated>
<category scheme='http://base.google.com/categories/itemtypes'
term='online jobs'></category>
<category scheme='http://base.google.com/categories/itemtypes'
term='jobs'></category>
<title type='text'>histogram for query: [item type:jobs]</title>
<link rel='alternate' type='text/html'
href='http://base.google.com'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/base/attributes/jobs'></link>
<generator version='1.0'
uri='http://base.google.com'>GoogleBase</generator>
<openSearch:totalResults>16</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>16</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/attributes/job+industy</id>
<updated>2006-11-01T20:36:00.100Z</updated>
<title type='text'>job industry(text)</title>
<content type='text'>Attribute"job industry" of type text.
</content>
<gm:attribute name='job industry' type='text' count='4416629'>
<gm:value count='380772'>it internet</gm:value>
<gm:value count='261565'>healthcare</gm:value>
<gm:value count='142018'>information technology</gm:value>
<gm:value count='124622'>accounting</gm:value>
<gm:value count='111311'>clerical and administrative</gm:value>
<gm:value count='82928'>other</gm:value>
<gm:value count='77620'>sales and sales management</gm:value>
<gm:value count='68764'>information systems</gm:value>
<gm:value count='65859'>engineering and architecture</gm:value>
<gm:value count='64757'>sales</gm:value>
</gm:attribute>
</entry>
</feed>"""
self.feed = atom.core.parse(GBASE_ATTRIBUTE_FEED,
atom.data.Feed)
def testCaptureOpenSearchElements(self):
self.assertEquals(self.feed.FindExtensions('totalResults')[0].tag,
'totalResults')
self.assertEquals(self.feed.FindExtensions('totalResults')[0].namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
open_search_extensions = self.feed.FindExtensions(
namespace='http://a9.com/-/spec/opensearchrss/1.0/')
self.assertEquals(len(open_search_extensions), 3)
for element in open_search_extensions:
self.assertEquals(element.namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
def testCaptureMetaElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_elements), 1)
self.assertEquals(meta_elements[0].attributes['count'], '4416629')
self.assertEquals(len(meta_elements[0].children), 10)
def testCaptureMetaChildElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
meta_children = meta_elements[0].FindChildren(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_children), 10)
for child in meta_children:
self.assertEquals(child.tag, 'value')
class LinkFinderTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(XML_ENTRY_1, atom.data.Entry)
def testLinkFinderGetsLicenseLink(self):
self.assertTrue(isinstance(self.entry.GetLink('license'), atom.data.Link))
self.assertTrue(isinstance(self.entry.GetLicenseLink(), atom.data.Link))
self.assertEquals(self.entry.GetLink('license').href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.get_license_link().href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.GetLink('license').rel, 'license')
self.assertEquals(self.entry.FindLicenseLink(),
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
def testLinkFinderGetsAlternateLink(self):
self.assertTrue(isinstance(self.entry.GetLink('alternate'),
atom.data.Link))
self.assertEquals(self.entry.GetLink('alternate').href,
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.FindAlternateLink(),
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.GetLink('alternate').rel, 'alternate')
class AtomBaseTest(unittest.TestCase):
def testAtomBaseConvertsExtensions(self):
# Using Id because it adds no additional members.
atom_base = atom.data.Id()
extension_child = atom.data.ExtensionElement('foo',
namespace='http://ns0.com')
extension_grandchild = atom.data.ExtensionElement('bar',
namespace='http://ns0.com')
extension_child.children.append(extension_grandchild)
atom_base.extension_elements.append(extension_child)
self.assertEquals(len(atom_base.extension_elements), 1)
self.assertEquals(len(atom_base.extension_elements[0].children), 1)
self.assertEquals(atom_base.extension_elements[0].tag, 'foo')
self.assertEquals(atom_base.extension_elements[0].children[0].tag, 'bar')
element_tree = atom_base._to_tree()
self.assert_(element_tree.find('{http://ns0.com}foo') is not None)
self.assert_(element_tree.find('{http://ns0.com}foo').find(
'{http://ns0.com}bar') is not None)
class UtfParsingTest(unittest.TestCase):
def setUp(self):
self.test_xml = u"""<?xml version="1.0" encoding="utf-8"?>
<entry xmlns='http://www.w3.org/2005/Atom'>
<id>http://www.google.com/test/id/url</id>
<title type='αλφα'>αλφα</title>
</entry>"""
def testMemberStringEncoding(self):
atom_entry = atom.core.parse(self.test_xml, atom.data.Entry)
self.assertTrue(isinstance(atom_entry.title.type, unicode))
self.assertEqual(atom_entry.title.type, u'\u03B1\u03BB\u03C6\u03B1')
self.assertEqual(atom_entry.title.text, u'\u03B1\u03BB\u03C6\u03B1')
# Setting object members to unicode strings is supported.
atom_entry.title.type = u'\u03B1\u03BB\u03C6\u03B1'
xml = atom_entry.ToString()
# The unicode code points should be converted to XML escaped sequences.
self.assertTrue('αλφα' in xml)
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is utf8
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
# Test something else than utf-8
atom.core.STRING_ENCODING = 'iso8859_7'
atom_entry = atom.core.parse(self.test_xml, atom.data.Entry)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1')
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1')
# Test using unicode strings directly for object members
atom_entry = atom.core.parse(self.test_xml, atom.data.Entry)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1')
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1')
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is
# unicode
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
def testConvertExampleXML(self):
GBASE_STRING_ENCODING_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:gm='http://base.google.com/ns-metadata/1.0'
xmlns:g='http://base.google.com/ns/1.0'
xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets/1749</id>
<published>2007-12-09T03:13:07.000Z</published>
<updated>2008-01-07T03:26:46.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes'
term='Products'/>
<title type='text'>Digital Camera Cord Fits DSC-R1 S40</title>
<content type='html'>SONY \xC2\xB7 Cybershot Digital Camera Usb
Cable DESCRIPTION This is a 2.5 USB 2.0 A to Mini B (5 Pin)
high quality digital camera cable used for connecting your
Sony Digital Cameras and Camcoders. Backward
Compatible with USB 2.0, 1.0 and 1.1. Fully ...</content>
<link rel='alternate' type='text/html'
href='http://adfarm.mediaplex.com/ad/ck/711-5256-8196-2mm'/>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/base/feeds/snippets/1749'/>
<author>
<name>eBay</name>
</author>
<g:item_type type='text'>Products</g:item_type>
<g:item_language type='text'>EN</g:item_language>
<g:target_country type='text'>US</g:target_country>
<g:price type='floatUnit'>0.99 usd</g:price>
<g:image_link
type='url'>http://www.example.com/pict/27_1.jpg</g:image_link>
<g:category type='text'>Cameras & Photo>Digital Camera
Accessories>Cables</g:category>
<g:category type='text'>Cords & USB Cables</g:category>
<g:customer_id type='int'>11729</g:customer_id>
<g:id type='text'>270195049057</g:id>
<g:expiration_date
type='dateTime'>2008-02-06T03:26:46Z</g:expiration_date>
</entry>"""
try:
entry = atom.core.parse(GBASE_STRING_ENCODING_ENTRY,
atom.data.Entry)
except UnicodeDecodeError:
self.fail('Error when converting XML')
class VersionedXmlTest(unittest.TestCase):
def test_monoversioned_parent_with_multiversioned_child(self):
v2_rules = atom.data.Entry._get_rules(2)
self.assertTrue('{http://www.w3.org/2007/app}control' in v2_rules[1])
entry_xml = """<entry xmlns='http://www.w3.org/2005/Atom'>
<app:control xmlns:app='http://www.w3.org/2007/app'>
<app:draft>yes</app:draft>
</app:control>
</entry>"""
entry = e = atom.core.parse(entry_xml, atom.data.Entry, version=2)
self.assertTrue(entry is not None)
self.assertTrue(entry.control is not None)
self.assertTrue(entry.control.draft is not None)
self.assertEqual(entry.control.draft.text, 'yes')
# v1 rules should not parse v2 XML.
entry = e = atom.core.parse(entry_xml, atom.data.Entry, version=1)
self.assertTrue(entry is not None)
self.assertTrue(entry.control is None)
# The default version should be v1.
entry = e = atom.core.parse(entry_xml, atom.data.Entry)
self.assertTrue(entry is not None)
self.assertTrue(entry.control is None)
def suite():
return conf.build_suite([AuthorTest, EmailTest, NameTest,
ExtensionElementTest, LinkTest, GeneratorTest,
TitleTest, SubtitleTest, SummaryTest, IdTest,
IconTest, LogoTest, RightsTest, UpdatedTest,
PublishedTest, FeedEntryParentTest, EntryTest,
ContentEntryParentTest, PreserveUnkownElementTest,
FeedTest, LinkFinderTest, AtomBaseTest,
UtfParsingTest, VersionedXmlTest])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
civisanalytics/ansible | lib/ansible/modules/cloud/vmware/vmware_host.py | 16 | 7732 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: vmware_host
short_description: Add/remove ESXi host to/from vCenter
description:
- This module can be used to add/remove an ESXi host to/from vCenter
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- Name of the datacenter to add the host
required: True
cluster_name:
description:
- Name of the cluster to add the host
required: True
esxi_hostname:
description:
- ESXi hostname to manage
required: True
esxi_username:
description:
- ESXi username
required: True
esxi_password:
description:
- ESXi password
required: True
state:
description:
- Add or remove the host
default: 'present'
choices:
- 'present'
- 'absent'
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example from Ansible playbook
- name: Add ESXi Host to VCSA
local_action:
module: vmware_host
hostname: vcsa_host
username: vcsa_user
password: vcsa_pass
datacenter_name: datacenter_name
cluster_name: cluster_name
esxi_hostname: esxi_hostname
esxi_username: esxi_username
esxi_password: esxi_password
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareHost(object):
def __init__(self, module):
self.module = module
self.datacenter_name = module.params['datacenter_name']
self.cluster_name = module.params['cluster_name']
self.esxi_hostname = module.params['esxi_hostname']
self.esxi_username = module.params['esxi_username']
self.esxi_password = module.params['esxi_password']
self.state = module.params['state']
self.dc = None
self.cluster = None
self.host = None
self.content = connect_to_api(module)
def process_state(self):
try:
# Currently state_update_dvs is not implemented.
host_states = {
'absent': {
'present': self.state_remove_host,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_exit_unchanged,
'absent': self.state_add_host,
}
}
host_states[self.state][self.check_host_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def find_host_by_cluster_datacenter(self):
self.dc = find_datacenter_by_name(self.content, self.datacenter_name)
self.cluster = find_cluster_by_name_datacenter(self.dc, self.cluster_name)
for host in self.cluster.host:
if host.name == self.esxi_hostname:
return host, self.cluster
return None, self.cluster
def add_host_to_vcenter(self):
host_connect_spec = vim.host.ConnectSpec()
host_connect_spec.hostName = self.esxi_hostname
host_connect_spec.userName = self.esxi_username
host_connect_spec.password = self.esxi_password
host_connect_spec.force = True
host_connect_spec.sslThumbprint = ""
as_connected = True
esxi_license = None
resource_pool = None
try:
task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
success, result = wait_for_task(task)
return success, result
except TaskError as add_task_error:
# This is almost certain to fail the first time.
# In order to get the sslThumbprint we first connect
# get the vim.fault.SSLVerifyFault then grab the sslThumbprint
# from that object.
#
# args is a tuple, selecting the first tuple
ssl_verify_fault = add_task_error.args[0]
host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint
task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
success, result = wait_for_task(task)
return success, result
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_remove_host(self):
changed = True
result = None
if not self.module.check_mode:
if not self.host.runtime.inMaintenanceMode:
maintenance_mode_task = self.host.EnterMaintenanceMode_Task(300, True, None)
changed, result = wait_for_task(maintenance_mode_task)
if changed:
task = self.host.Destroy_Task()
changed, result = wait_for_task(task)
else:
raise Exception(result)
self.module.exit_json(changed=changed, result=str(result))
def state_update_host(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_add_host(self):
changed = True
result = None
if not self.module.check_mode:
changed, result = self.add_host_to_vcenter()
self.module.exit_json(changed=changed, result=str(result))
def check_host_state(self):
self.host, self.cluster = self.find_host_by_cluster_datacenter()
if self.host is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
cluster_name=dict(required=True, type='str'),
esxi_hostname=dict(required=True, type='str'),
esxi_username=dict(required=True, type='str'),
esxi_password=dict(required=True, type='str', no_log=True),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_host = VMwareHost(module)
vmware_host.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
stephane-martin/salt-debian-packaging | salt-2016.3.3/tests/integration/cloud/providers/msazure.py | 2 | 5620 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
import random
import string
from distutils.version import LooseVersion
# Import Salt Testing Libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath, expensiveTest
ensure_in_syspath('../../../')
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Third-Party Libs
from salt.ext.six.moves import range
try:
import azure # pylint: disable=unused-import
HAS_AZURE = True
except ImportError:
HAS_AZURE = False
if HAS_AZURE and not hasattr(azure, '__version__'):
import azure.common
def __random_name(size=6):
'''
Generates a random cloud instance name
'''
return 'CLOUD-TEST-' + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(size)
)
# Create the cloud instance name to be used throughout the tests
INSTANCE_NAME = __random_name()
PROVIDER_NAME = 'azure'
PROFILE_NAME = 'azure-test'
REQUIRED_AZURE = '0.11.1'
def __has_required_azure():
'''
Returns True/False if the required version of the Azure SDK is installed.
'''
if hasattr(azure, '__version__'):
version = LooseVersion(azure.__version__)
else:
version = LooseVersion(azure.common.__version__)
if HAS_AZURE is True and REQUIRED_AZURE <= version:
return True
else:
return False
@skipIf(HAS_AZURE is False, 'These tests require the Azure Python SDK to be installed.')
@skipIf(__has_required_azure() is False, 'The Azure Python SDK must be >= 0.11.1.')
class AzureTest(integration.ShellCase):
'''
Integration tests for the Azure cloud provider in Salt-Cloud
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
'''
super(AzureTest, self).setUp()
# check if appropriate cloud provider and profile files are present
provider_str = 'azure-config'
providers = self.run_cloud('--list-providers')
if provider_str + ':' not in providers:
self.skipTest(
'Configuration file for {0} was not found. Check {0}.conf files '
'in tests/integration/files/conf/cloud.*.d/ to run these tests.'
.format(PROVIDER_NAME)
)
# check if subscription_id and certificate_path are present in provider file
provider_config = cloud_providers_config(
os.path.join(
integration.FILES,
'conf',
'cloud.providers.d',
PROVIDER_NAME + '.conf'
)
)
sub_id = provider_config[provider_str][PROVIDER_NAME]['subscription_id']
cert_path = provider_config[provider_str][PROVIDER_NAME]['certificate_path']
if sub_id == '' or cert_path == '':
self.skipTest(
'A subscription_id and certificate_path must be provided to run '
'these tests. Check '
'tests/integration/files/conf/cloud.providers.d/{0}.conf'.format(
PROVIDER_NAME
)
)
# check if ssh_username, ssh_password, and media_link are present
# in the azure configuration file
profile_config = cloud_providers_config(
os.path.join(
integration.FILES,
'conf',
'cloud.profiles.d',
PROVIDER_NAME + '.conf'
)
)
ssh_user = profile_config[PROFILE_NAME][provider_str]['ssh_username']
ssh_pass = profile_config[PROFILE_NAME][provider_str]['ssh_password']
media_link = profile_config[PROFILE_NAME][provider_str]['media_link']
if ssh_user == '' or ssh_pass == '' or media_link == '':
self.skipTest(
'An ssh_username, ssh_password, and media_link must be provided to run '
'these tests. One or more of these elements is missing. Check '
'tests/integration/files/conf/cloud.profiles.d/{0}.conf'.format(
PROVIDER_NAME
)
)
def test_instance(self):
'''
Test creating an instance on Azure
'''
# check if instance with salt installed returned
try:
self.assertIn(
INSTANCE_NAME,
[i.strip() for i in self.run_cloud(
'-p {0} {1}'.format(
PROFILE_NAME,
INSTANCE_NAME
)
)]
)
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))
raise
# delete the instance
try:
self.assertIn(
INSTANCE_NAME + ':',
[i.strip() for i in self.run_cloud(
'-d {0} --assume-yes'.format(
INSTANCE_NAME
)
)]
)
except AssertionError:
raise
def tearDown(self):
'''
Clean up after tests
'''
query = self.run_cloud('--query')
ret_str = ' {0}:'.format(INSTANCE_NAME)
# if test instance is still present, delete it
if ret_str in query:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(AzureTest)
| apache-2.0 |
iamroot12CD/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
MaryanMorel/faker | faker/utils/datetime_safe.py | 20 | 2881 | # coding=utf-8
# From django.utils.datetime_safe
# Python's datetime strftime doesn't handle dates before 1900.
# These classes override date and datetime to support the formatting of a date
# through its full "proleptic Gregorian" date range.
#
# Based on code submitted to comp.lang.python by Andrew Dalke
#
# >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A")
# '1850/08/02 was a Friday'
from __future__ import unicode_literals
from datetime import date as real_date
from datetime import datetime as real_datetime
import re
import time
class date(real_date):
def strftime(self, fmt):
return strftime(self, fmt)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(self, fmt)
def combine(self, date, time):
return datetime(date.year, date.month, date.day,
time.hour, time.minute, time.microsecond,
time.tzinfo)
def date(self):
return date(self.year, self.month, self.day)
def new_date(d):
"""Generate a safe date from a datetime.date object."""
return date(d.year, d.month, d.day)
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw)
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
def strftime(dt, fmt):
if dt.year >= 1900:
return super(type(dt), dt).strftime(fmt)
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
msg = 'strftime of dates before 1900 does not handle {0}'
raise TypeError(msg.format(illegal_formatting.group(0)))
year = dt.year
# for every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year += off
# move to around the year 2000
year += ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = time.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = _findall(s2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site + 4:]
return s
| mit |
ygenc/onlineLDA | onlineldavb_new/build/scipy/scipy/sparse/linalg/eigen/lobpcg/info.py | 10 | 3809 | """
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
Call the function lobpcg - see help for lobpcg.lobpcg. See also lobpcg.as2d,
which can be used in the preconditioner (example below)
Acknowledgements
----------------
lobpcg.py code was written by Robert Cimrman. Many thanks belong to Andrew
Knyazev, the author of the algorithm, for lots of advice and support.
Examples
--------
>>> # Solve A x = lambda B x with constraints and preconditioning.
>>> n = 100
>>> vals = [nm.arange( n, dtype = nm.float64 ) + 1]
>>> # Matrix A.
>>> operatorA = spdiags( vals, 0, n, n )
>>> # Matrix B
>>> operatorB = nm.eye( n, n )
>>> # Constraints.
>>> Y = nm.eye( n, 3 )
>>> # Initial guess for eigenvectors, should have linearly independent
>>> # columns. Column dimension = number of requested eigenvalues.
>>> X = sc.rand( n, 3 )
>>> # Preconditioner - inverse of A.
>>> ivals = [1./vals[0]]
>>> def precond( x ):
invA = spdiags( ivals, 0, n, n )
y = invA * x
if sp.issparse( y ):
y = y.toarray()
return as2d( y )
>>>
>>> # Alternative way of providing the same preconditioner.
>>> #precond = spdiags( ivals, 0, n, n )
>>>
>>> tt = time.clock()
>>> eigs, vecs = lobpcg( X, operatorA, operatorB, blockVectorY = Y,
>>> operatorT = precond,
>>> residualTolerance = 1e-4, maxIterations = 40,
>>> largest = False, verbosityLevel = 1 )
>>> print 'solution time:', time.clock() - tt
>>> print eigs
Notes
-----
In the following ``n`` denotes the matrix size and ``m`` the number
of required eigenvalues (smallest or largest).
The LOBPCG code internally solves eigenproblems of the size 3``m`` on every
iteration by calling the "standard" dense eigensolver, so if ``m`` is not
small enough compared to ``n``, it does not make sense to call the LOBPCG
code, but rather one should use the "standard" eigensolver, e.g. scipy or symeig
function in this case. If one calls the LOBPCG algorithm for 5``m``>``n``,
it will most likely break internally, so the code tries to call the standard
function instead.
It is not that n should be large for the LOBPCG to work, but rather the
ratio ``n``/``m`` should be large. It you call the LOBPCG code with ``m``=1
and ``n``=10, it should work, though ``n`` is small. The method is intended
for extremely large ``n``/``m``, see e.g., reference [28] in
http://arxiv.org/abs/0705.2626
The convergence speed depends basically on two factors:
1. How well relatively separated the seeking eigenvalues are from the rest of
the eigenvalues. One can try to vary ``m`` to make this better.
2. How well conditioned the problem is. This can be changed by using proper
preconditioning. For example, a rod vibration test problem (under tests
directory) is ill-conditioned for large ``n``, so convergence will be
slow, unless efficient preconditioning is used. For this specific problem,
a good simple preconditioner function would be a linear solve for A, which
is easy to code since A is tridiagonal.
References
----------
A. V. Knyazev, Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method. SIAM Journal on Scientific
Computing 23 (2001), no. 2,
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov, Block Locally
Optimal Preconditioned Eigenvalue Xolvers (BLOPEX) in hypre and PETSc
(2007). http://arxiv.org/abs/0705.2626
A. V. Knyazev's C and MATLAB implementations:
http://www-math.cudenver.edu/~aknyazev/software/BLOPEX/
"""
__docformat__ = "restructuredtext en"
postpone_import = 1
| gpl-3.0 |
ponty/MyElectronicProjects | pavement.py | 1 | 1718 | from easyprocess import Proc
from paver.easy import *
import paver.doctools
import paver.virtual
import paver.misctasks
from paved import *
from paved.dist import *
from paved.util import *
from paved.docs import *
from paved.pycheck import *
from paved.pkg import *
options(
sphinx=Bunch(
docroot='docs',
builddir="_build",
),
# pdf=Bunch(
# builddir='_build',
# builder='latex',
# ),
)
options.paved.clean.rmdirs += ['.tox',
'dist',
'build',
]
options.paved.clean.patterns += ['*.pickle',
'*.doctree',
'*.gz',
'nosetests.xml',
'sloccount.sc',
'*.pdf', '*.tex',
'*_sch_*.png',
'*_brd_*.png',
'*.b#*', '*.s#*', # eagle
#'*.pro',
'*.hex',
'*.zip',
'distribute_setup.py',
'*.bak',
# kicad
'$savepcb.brd',
'*.erc',
'*.000',
]
options.paved.dist.manifest.include.remove('distribute_setup.py')
options.paved.dist.manifest.include.remove('paver-minilib.zip')
@task
@needs(
# 'clean',
'cog',
'html',
'pdf',
)
def alltest():
'all tasks to check'
pass
| bsd-2-clause |
jart/tensorflow | tensorflow/contrib/data/python/kernel_tests/sql_dataset_op_test_base.py | 13 | 4314 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing SqlDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sqlite3
from tensorflow.contrib.data.python.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SqlDatasetTestBase(test.TestCase):
"""Base class for setting up and testing SqlDataset."""
def _createSqlDataset(self, output_types, num_repeats=1):
dataset = readers.SqlDataset(self.driver_name, self.data_source_name,
self.query, output_types).repeat(num_repeats)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
return init_op, get_next
def setUp(self):
self.data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
self.driver_name = array_ops.placeholder_with_default(
array_ops.constant("sqlite", dtypes.string), shape=[])
self.query = array_ops.placeholder(dtypes.string, shape=[])
conn = sqlite3.connect(self.data_source_name)
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS students")
c.execute("DROP TABLE IF EXISTS people")
c.execute("DROP TABLE IF EXISTS townspeople")
c.execute(
"CREATE TABLE IF NOT EXISTS students (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), motto VARCHAR(100), "
"school_id VARCHAR(100), favorite_nonsense_word VARCHAR(100), "
"desk_number INTEGER, income INTEGER, favorite_number INTEGER, "
"favorite_big_number INTEGER, favorite_negative_number INTEGER, "
"favorite_medium_sized_number INTEGER, brownie_points INTEGER, "
"account_balance INTEGER, registration_complete INTEGER)")
c.executemany(
"INSERT INTO students (first_name, last_name, motto, school_id, "
"favorite_nonsense_word, desk_number, income, favorite_number, "
"favorite_big_number, favorite_negative_number, "
"favorite_medium_sized_number, brownie_points, account_balance, "
"registration_complete) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[("John", "Doe", "Hi!", "123", "n\0nsense", 9, 0, 2147483647,
9223372036854775807, -2, 32767, 0, 0, 1),
("Jane", "Moe", "Hi again!", "1000", "nonsense\0", 127, -20000,
-2147483648, -9223372036854775808, -128, -32768, 255, 65535, 0)])
c.execute(
"CREATE TABLE IF NOT EXISTS people (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), state VARCHAR(100))")
c.executemany(
"INSERT INTO PEOPLE (first_name, last_name, state) VALUES (?, ?, ?)",
[("Benjamin", "Franklin", "Pennsylvania"), ("John", "Doe",
"California")])
c.execute(
"CREATE TABLE IF NOT EXISTS townspeople (id INTEGER NOT NULL PRIMARY "
"KEY, first_name VARCHAR(100), last_name VARCHAR(100), victories "
"FLOAT, accolades FLOAT, triumphs FLOAT)")
c.executemany(
"INSERT INTO townspeople (first_name, last_name, victories, "
"accolades, triumphs) VALUES (?, ?, ?, ?, ?)",
[("George", "Washington", 20.00,
1331241.321342132321324589798264627463827647382647382643874,
9007199254740991.0),
("John", "Adams", -19.95,
1331241321342132321324589798264627463827647382647382643874.0,
9007199254740992.0)])
conn.commit()
conn.close()
| apache-2.0 |
GoogleCloudPlatform/python-docs-samples | appengine/standard/endpoints-frameworks-v2/quickstart/main_test.py | 1 | 1894 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from endpoints import message_types
import mock
import main
def test_list_greetings(testbed):
api = main.GreetingApi()
response = api.list_greetings(message_types.VoidMessage())
assert len(response.items) == 2
def test_get_greeting(testbed):
api = main.GreetingApi()
request = main.GreetingApi.get_greeting.remote.request_type(id=1)
response = api.get_greeting(request)
assert response.message == 'goodbye world!'
def test_multiply_greeting(testbed):
api = main.GreetingApi()
request = main.GreetingApi.multiply_greeting.remote.request_type(
times=4,
message='help I\'m trapped in a test case.')
response = api.multiply_greeting(request)
assert response.message == 'help I\'m trapped in a test case.' * 4
def test_authed_greet(testbed):
api = main.AuthedGreetingApi()
with mock.patch('main.endpoints.get_current_user') as user_mock:
user_mock.return_value = None
response = api.greet(message_types.VoidMessage())
assert response.message == 'Hello, Anonymous'
user_mock.return_value = mock.Mock()
user_mock.return_value.email.return_value = 'user@example.com'
response = api.greet(message_types.VoidMessage())
assert response.message == 'Hello, user@example.com'
| apache-2.0 |
OSU-CS-325/Project_Two_Coin_Change | run-files/analysisQ7.py | 1 | 2957 | import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import random
import datetime
# Import the three change making algorithms
sys.path.insert(0, "../divide-conquer/")
sys.path.insert(0, "../dynamic-programming")
sys.path.insert(0, "../greedy")
from changeslow import changeslow
from changegreedy import changegreedy
from changedp import changedp
### QUESTION 7 ###
def Q7(slow, minChange, maxChange):
lenV = []
runtimeGreedy = []
runtimeDP = []
runtimeSlow = []
numExp = 10
maxRange = 1000
if (slow):
maxRange = 10 # some much smaller number
for i in range(1, maxRange): # V can be of length 1 to (maxRange - 1)
print "\n------ running V length=" + str(i) + "------"
lenV.append(i)
#print "lenV:", lenV
runtimeGreedy.append(0)
runtimeDP.append(0)
runtimeSlow.append(0)
for j in range(numExp): # run numExp experiments for this length of V
print "\n ---- running experiment=" + str(j + 1) + " ----"
coinArray = []
for k in range(i): # generate V of size i [1, rand, ..., rand, max=1 + 5*(maxRange-2)]
if (k == 0):
coinArray.append(1)
else:
randFrom = coinArray[len(coinArray) - 1] + 1
randTo = coinArray[len(coinArray) - 1] + 5
coinArray.append(random.randint(randFrom, randTo))
change = random.randint(minChange, maxChange)
#print " coinArray:", coinArray
#print " change:", change
print " running greedy..."
start = datetime.datetime.now()
_, _ = changegreedy(coinArray, change)
end = datetime.datetime.now()
delta = end - start
delta = int(delta.total_seconds() * 1000000)
print " " + str(delta)
runtimeGreedy[i - 1] += delta
print " running DP..."
start = datetime.datetime.now()
_, _ = changedp(coinArray, change)
end = datetime.datetime.now()
delta = end - start
delta = int(delta.total_seconds() * 1000000)
print " " + str(delta)
runtimeDP[i - 1] += delta
if (slow):
print " running slow..."
start = datetime.datetime.now()
_, _ = changeslow(coinArray, change)
end = datetime.datetime.now()
delta = end - start
delta = int(delta.total_seconds() * 1000000)
print " " + str(delta)
runtimeSlow[i - 1] += delta
runtimeGreedy[i - 1] /= numExp
runtimeDP[i - 1] /= numExp
if (slow):
runtimeSlow[i - 1] /= numExp
plt.figure(21)
plt.plot(lenV, runtimeGreedy, 'b-', linewidth=2.0, label='Greedy')
plt.plot(lenV, runtimeDP, 'r--', linewidth=2.0, label='DP')
if (slow):
plt.plot(lenV, runtimeSlow, 'g-.', linewidth=2.0, label='Slow')
plt.legend(loc='upper left')
plt.title('Runtime vs len(V[]) for randomized V[] and A')
plt.ylabel('Avg. Runtime (10^-6 sec)')
plt.xlabel('len(V[])')
plt.grid(True)
if (slow):
plt.savefig('img/Q7slow_runtime.png', bbox_inches='tight')
else:
plt.savefig('img/Q7_runtime.png', bbox_inches='tight')
def main():
Q7(False, 100, 100)
#Q7(True)
if __name__ == "__main__":
main()
| mit |
looker/sentry | tests/sentry/integrations/github/test_issues.py | 1 | 6535 | from __future__ import absolute_import
import responses
from mock import patch
from exam import fixture
from django.test import RequestFactory
from sentry.integrations.github.integration import GitHubIntegration
from sentry.models import Integration, ExternalIssue
from sentry.testutils import TestCase
from sentry.utils import json
class GitHubIssueBasicTest(TestCase):
@fixture
def request(self):
return RequestFactory()
def setUp(self):
self.user = self.create_user()
self.organization = self.create_organization(owner=self.user)
self.model = Integration.objects.create(
provider='github',
external_id='github_external_id',
name='getsentry',
)
self.model.add_organization(self.organization.id)
self.integration = GitHubIntegration(self.model, self.organization.id)
@responses.activate
@patch('sentry.integrations.github.client.get_jwt', return_value='jwt_token_1')
def test_get_allowed_assignees(self, mock_get_jwt):
responses.add(
responses.POST,
'https://api.github.com/installations/github_external_id/access_tokens',
json={'token': 'token_1', 'expires_at': '2018-10-11T22:14:10Z'}
)
responses.add(
responses.GET,
'https://api.github.com/repos/getsentry/sentry/assignees',
json=[{'login': 'MeredithAnya'}]
)
repo = 'getsentry/sentry'
assert self.integration.get_allowed_assignees(repo) == (
('', 'Unassigned'),
('MeredithAnya', 'MeredithAnya')
)
request = responses.calls[0].request
assert request.headers['Authorization'] == 'Bearer jwt_token_1'
request = responses.calls[1].request
assert request.headers['Authorization'] == 'token token_1'
@responses.activate
@patch('sentry.integrations.github.client.get_jwt', return_value='jwt_token_1')
def test_create_issue(self, mock_get_jwt):
responses.add(
responses.POST,
'https://api.github.com/installations/github_external_id/access_tokens',
json={'token': 'token_1', 'expires_at': '2018-10-11T22:14:10Z'}
)
responses.add(
responses.POST,
'https://api.github.com/repos/getsentry/sentry/issues',
json={'number': 321, 'title': 'hello', 'body': 'This is the description'}
)
form_data = {
'repo': 'getsentry/sentry',
'title': 'hello',
'description': 'This is the description',
}
assert self.integration.create_issue(form_data) == {
'key': 321,
'description': 'This is the description',
'title': 'hello',
'repo': 'getsentry/sentry',
}
request = responses.calls[0].request
assert request.headers['Authorization'] == 'Bearer jwt_token_1'
request = responses.calls[1].request
assert request.headers['Authorization'] == 'token token_1'
payload = json.loads(request.body)
assert payload == {'body': 'This is the description', 'assignee': None, 'title': 'hello'}
@responses.activate
@patch('sentry.integrations.github.client.get_jwt', return_value='jwt_token_1')
def test_get_repo_issues(self, mock_get_jwt):
responses.add(
responses.POST,
'https://api.github.com/installations/github_external_id/access_tokens',
json={'token': 'token_1', 'expires_at': '2018-10-11T22:14:10Z'}
)
responses.add(
responses.GET,
'https://api.github.com/repos/getsentry/sentry/issues',
json=[{'number': 321, 'title': 'hello', 'body': 'This is the description'}]
)
repo = 'getsentry/sentry'
assert self.integration.get_repo_issues(repo) == ((321, '#321 hello'),)
request = responses.calls[0].request
assert request.headers['Authorization'] == 'Bearer jwt_token_1'
request = responses.calls[1].request
assert request.headers['Authorization'] == 'token token_1'
@responses.activate
@patch('sentry.integrations.github.client.get_jwt', return_value='jwt_token_1')
def test_link_issue(self, mock_get_jwt):
issue_id = 321
responses.add(
responses.POST,
'https://api.github.com/installations/github_external_id/access_tokens',
json={'token': 'token_1', 'expires_at': '2018-10-11T22:14:10Z'}
)
responses.add(
responses.GET,
'https://api.github.com/repos/getsentry/sentry/issues/321',
json={'number': issue_id, 'title': 'hello', 'body': 'This is the description'}
)
data = {
'repo': 'getsentry/sentry',
'externalIssue': issue_id,
'comment': 'hello',
}
assert self.integration.get_issue(issue_id, data=data) == {
'key': issue_id,
'description': 'This is the description',
'title': 'hello',
'repo': 'getsentry/sentry',
}
request = responses.calls[0].request
assert request.headers['Authorization'] == 'Bearer jwt_token_1'
request = responses.calls[1].request
assert request.headers['Authorization'] == 'token token_1'
@responses.activate
@patch('sentry.integrations.github.client.get_jwt', return_value='jwt_token_1')
def after_link_issue(self, mock_get_jwt):
responses.add(
responses.POST,
'https://api.github.com/installations/github_external_id/access_tokens',
json={'token': 'token_1', 'expires_at': '2018-10-11T22:14:10Z'}
)
responses.add(
responses.POST,
'https://api.github.com/repos/getsentry/sentry/issues/321/comments',
json={'body': 'hello'}
)
data = {'comment': 'hello'}
external_issue = ExternalIssue.objects.create(
organization_id=self.organization.id,
integration_id=self.model.id,
key='hello#321',
)
self.integration.after_link_issue(external_issue, data=data)
request = responses.calls[0].request
assert request.headers['Authorization'] == 'Bearer jwt_token_1'
request = responses.calls[1].request
assert request.headers['Authorization'] == 'token token_1'
payload = json.loads(request.body)
assert payload == {'body': 'hello'}
| bsd-3-clause |
lochiiconnectivity/boto | boto/vpc/dhcpoptions.py | 17 | 2479 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a DHCP Options set
"""
from boto.ec2.ec2object import TaggedEC2Object
class DhcpValueSet(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'value':
self.append(value)
class DhcpConfigSet(dict):
def startElement(self, name, attrs, connection):
if name == 'valueSet':
if self._name not in self:
self[self._name] = DhcpValueSet()
return self[self._name]
def endElement(self, name, value, connection):
if name == 'key':
self._name = value
class DhcpOptions(TaggedEC2Object):
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
self.id = None
self.options = None
def __repr__(self):
return 'DhcpOptions:%s' % self.id
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
if retval is not None:
return retval
if name == 'dhcpConfigurationSet':
self.options = DhcpConfigSet()
return self.options
def endElement(self, name, value, connection):
if name == 'dhcpOptionsId':
self.id = value
else:
setattr(self, name, value)
| mit |
mikedh/trimesh | trimesh/creation.py | 1 | 40606 | """
creation.py
--------------
Create meshes from primitives, or with operations.
"""
from .base import Trimesh
from .constants import log, tol
from .geometry import faces_to_edges, align_vectors, plane_transform
from . import util
from . import grouping
from . import triangles
from . import transformations as tf
import numpy as np
import collections
try:
# shapely is a soft dependency
from shapely.geometry import Polygon
from shapely.wkb import loads as load_wkb
except BaseException as E:
# shapely will sometimes raise OSErrors
# on import rather than just ImportError
from . import exceptions
# re-raise the exception when someone tries
# to use the module that they don't have
Polygon = exceptions.closure(E)
load_wkb = exceptions.closure(E)
def revolve(linestring,
angle=None,
sections=None,
transform=None,
**kwargs):
"""
Revolve a 2D line string around the 2D Y axis, with a result with
the 2D Y axis pointing along the 3D Z axis.
This function is intended to handle the complexity of indexing
and is intended to be used to create all radially symmetric primitives,
eventually including cylinders, annular cylinders, capsules, cones,
and UV spheres.
Note that if your linestring is closed, it needs to be counterclockwise
if you would like face winding and normals facing outwards.
Parameters
-------------
linestring : (n, 2) float
Lines in 2D which will be revolved
angle : None or float
Angle in radians to revolve curve by
sections : None or int
Number of sections result should have
If not specified default is 32 per revolution
transform : None or (4, 4) float
Transform to apply to mesh after construction
**kwargs : dict
Passed to Trimesh constructor
Returns
--------------
revolved : Trimesh
Mesh representing revolved result
"""
linestring = np.asanyarray(linestring, dtype=np.float64)
# linestring must be ordered 2D points
if len(linestring.shape) != 2 or linestring.shape[1] != 2:
raise ValueError('linestring must be 2D!')
if angle is None:
# default to closing the revolution
angle = np.pi * 2
closed = True
else:
# check passed angle value
closed = angle >= ((np.pi * 2) - 1e-8)
if sections is None:
# default to 32 sections for a full revolution
sections = int(angle / (np.pi * 2) * 32)
# change to face count
sections += 1
# create equally spaced angles
theta = np.linspace(0, angle, sections)
# 2D points around the revolution
points = np.column_stack((np.cos(theta), np.sin(theta)))
# how many points per slice
per = len(linestring)
# use the 2D X component as radius
radius = linestring[:, 0]
# use the 2D Y component as the height along revolution
height = linestring[:, 1]
# a lot of tiling to get our 3D vertices
vertices = np.column_stack((
np.tile(points, (1, per)).reshape((-1, 2)) *
np.tile(radius, len(points)).reshape((-1, 1)),
np.tile(height, len(points))))
if closed:
# should be a duplicate set of vertices
assert np.allclose(vertices[:per],
vertices[-per:])
# chop off duplicate vertices
vertices = vertices[:-per]
if transform is not None:
# apply transform to vertices
vertices = tf.transform_points(vertices, transform)
# how many slices of the pie
slices = len(theta) - 1
# start with a quad for every segment
# this is a superset which will then be reduced
quad = np.array([0, per, 1,
1, per, per + 1])
# stack the faces for a single slice of the revolution
single = np.tile(quad, per).reshape((-1, 3))
# `per` is basically the stride of the vertices
single += np.tile(np.arange(per), (2, 1)).T.reshape((-1, 1))
# remove any zero-area triangle
# this covers many cases without having to think too much
single = single[triangles.area(vertices[single]) > tol.merge]
# how much to offset each slice
# note arange multiplied by vertex stride
# but tiled by the number of faces we actually have
offset = np.tile(np.arange(slices) * per,
(len(single), 1)).T.reshape((-1, 1))
# stack a single slice into N slices
stacked = np.tile(single.ravel(), slices).reshape((-1, 3))
if tol.strict:
# make sure we didn't screw up stacking operation
assert np.allclose(stacked.reshape((-1, single.shape[0], 3)) - single, 0)
# offset stacked and wrap vertices
faces = (stacked + offset) % len(vertices)
# create the mesh from our vertices and faces
mesh = Trimesh(vertices=vertices, faces=faces,
**kwargs)
# strict checks run only in unit tests
if (tol.strict and
np.allclose(radius[[0, -1]], 0.0) or
np.allclose(linestring[0], linestring[-1])):
# if revolved curve starts and ends with zero radius
# it should really be a valid volume, unless the sign
# reversed on the input linestring
assert mesh.is_volume
return mesh
def extrude_polygon(polygon,
height,
transform=None,
triangle_args=None,
**kwargs):
"""
Extrude a 2D shapely polygon into a 3D mesh
Parameters
----------
polygon : shapely.geometry.Polygon
2D geometry to extrude
height : float
Distance to extrude polygon along Z
triangle_args : str or None
Passed to triangle
**kwargs:
passed to Trimesh
Returns
----------
mesh : trimesh.Trimesh
Resulting extrusion as watertight body
"""
# create a triangulation from the polygon
vertices, faces = triangulate_polygon(
polygon, triangle_args=triangle_args, **kwargs)
# extrude that triangulation along Z
mesh = extrude_triangulation(vertices=vertices,
faces=faces,
height=height,
transform=transform,
**kwargs)
return mesh
def sweep_polygon(polygon,
path,
angles=None,
**kwargs):
"""
Extrude a 2D shapely polygon into a 3D mesh along an
arbitrary 3D path. Doesn't handle sharp curvature well.
Parameters
----------
polygon : shapely.geometry.Polygon
Profile to sweep along path
path : (n, 3) float
A path in 3D
angles : (n,) float
Optional rotation angle relative to prior vertex
at each vertex
Returns
-------
mesh : trimesh.Trimesh
Geometry of result
"""
path = np.asanyarray(path, dtype=np.float64)
if not util.is_shape(path, (-1, 3)):
raise ValueError('Path must be (n, 3)!')
# Extract 2D vertices and triangulation
verts_2d = np.array(polygon.exterior)[:-1]
base_verts_2d, faces_2d = triangulate_polygon(polygon, **kwargs)
n = len(verts_2d)
# Create basis for first planar polygon cap
x, y, z = util.generate_basis(path[0] - path[1])
tf_mat = np.ones((4, 4))
tf_mat[:3, :3] = np.c_[x, y, z]
tf_mat[:3, 3] = path[0]
# Compute 3D locations of those vertices
verts_3d = np.c_[verts_2d, np.zeros(n)]
verts_3d = tf.transform_points(verts_3d, tf_mat)
base_verts_3d = np.c_[base_verts_2d,
np.zeros(len(base_verts_2d))]
base_verts_3d = tf.transform_points(base_verts_3d,
tf_mat)
# keep matching sequence of vertices and 0- indexed faces
vertices = [base_verts_3d]
faces = [faces_2d]
# Compute plane normals for each turn --
# each turn induces a plane halfway between the two vectors
v1s = util.unitize(path[1:-1] - path[:-2])
v2s = util.unitize(path[1:-1] - path[2:])
norms = np.cross(np.cross(v1s, v2s), v1s + v2s)
norms[(norms == 0.0).all(1)] = v1s[(norms == 0.0).all(1)]
norms = util.unitize(norms)
final_v1 = util.unitize(path[-1] - path[-2])
norms = np.vstack((norms, final_v1))
v1s = np.vstack((v1s, final_v1))
# Create all side walls by projecting the 3d vertices into each plane
# in succession
for i in range(len(norms)):
verts_3d_prev = verts_3d
# Rotate if needed
if angles is not None:
tf_mat = tf.rotation_matrix(angles[i],
norms[i],
path[i])
verts_3d_prev = tf.transform_points(verts_3d_prev,
tf_mat)
# Project vertices onto plane in 3D
ds = np.einsum('ij,j->i', (path[i + 1] - verts_3d_prev), norms[i])
ds = ds / np.dot(v1s[i], norms[i])
verts_3d_new = np.einsum('i,j->ij', ds, v1s[i]) + verts_3d_prev
# Add to face and vertex lists
new_faces = [[i + n, (i + 1) % n, i] for i in range(n)]
new_faces.extend([[(i - 1) % n + n, i + n, i] for i in range(n)])
# save faces and vertices into a sequence
faces.append(np.array(new_faces))
vertices.append(np.vstack((verts_3d, verts_3d_new)))
verts_3d = verts_3d_new
# do the main stack operation from a sequence to (n,3) arrays
# doing one vstack provides a substantial speedup by
# avoiding a bunch of temporary allocations
vertices, faces = util.append_faces(vertices, faces)
# Create final cap
x, y, z = util.generate_basis(path[-1] - path[-2])
vecs = verts_3d - path[-1]
coords = np.c_[np.einsum('ij,j->i', vecs, x),
np.einsum('ij,j->i', vecs, y)]
base_verts_2d, faces_2d = triangulate_polygon(Polygon(coords))
base_verts_3d = (np.einsum('i,j->ij', base_verts_2d[:, 0], x) +
np.einsum('i,j->ij', base_verts_2d[:, 1], y)) + path[-1]
faces = np.vstack((faces, faces_2d + len(vertices)))
vertices = np.vstack((vertices, base_verts_3d))
return Trimesh(vertices, faces)
def extrude_triangulation(vertices,
faces,
height,
transform=None,
**kwargs):
"""
Extrude a 2D triangulation into a watertight mesh.
Parameters
----------
vertices : (n, 2) float
2D vertices
faces : (m, 3) int
Triangle indexes of vertices
height : float
Distance to extrude triangulation
**kwargs : dict
Passed to Trimesh constructor
Returns
---------
mesh : trimesh.Trimesh
Mesh created from extrusion
"""
vertices = np.asanyarray(vertices, dtype=np.float64)
height = float(height)
faces = np.asanyarray(faces, dtype=np.int64)
if not util.is_shape(vertices, (-1, 2)):
raise ValueError('Vertices must be (n,2)')
if not util.is_shape(faces, (-1, 3)):
raise ValueError('Faces must be (n,3)')
if np.abs(height) < tol.merge:
raise ValueError('Height must be nonzero!')
# make sure triangulation winding is pointing up
normal_test = triangles.normals(
[util.stack_3D(vertices[faces[0]])])[0]
normal_dot = np.dot(normal_test,
[0.0, 0.0, np.sign(height)])[0]
# make sure the triangulation is aligned with the sign of
# the height we've been passed
if normal_dot < 0.0:
faces = np.fliplr(faces)
# stack the (n,3) faces into (3*n, 2) edges
edges = faces_to_edges(faces)
edges_sorted = np.sort(edges, axis=1)
# edges which only occur once are on the boundary of the polygon
# since the triangulation may have subdivided the boundary of the
# shapely polygon, we need to find it again
edges_unique = grouping.group_rows(
edges_sorted, require_count=1)
# (n, 2, 2) set of line segments (positions, not references)
boundary = vertices[edges[edges_unique]]
# we are creating two vertical triangles for every 2D line segment
# on the boundary of the 2D triangulation
vertical = np.tile(boundary.reshape((-1, 2)), 2).reshape((-1, 2))
vertical = np.column_stack((vertical,
np.tile([0, height, 0, height],
len(boundary))))
vertical_faces = np.tile([3, 1, 2, 2, 1, 0],
(len(boundary), 1))
vertical_faces += np.arange(len(boundary)).reshape((-1, 1)) * 4
vertical_faces = vertical_faces.reshape((-1, 3))
# stack the (n,2) vertices with zeros to make them (n, 3)
vertices_3D = util.stack_3D(vertices)
# a sequence of zero- indexed faces, which will then be appended
# with offsets to create the final mesh
faces_seq = [faces[:, ::-1],
faces.copy(),
vertical_faces]
vertices_seq = [vertices_3D,
vertices_3D.copy() + [0.0, 0, height],
vertical]
# append sequences into flat nicely indexed arrays
vertices, faces = util.append_faces(vertices_seq, faces_seq)
if transform is not None:
# apply transform here to avoid later bookkeeping
vertices = tf.transform_points(
vertices, transform)
# if the transform flips the winding flip faces back
# so that the normals will be facing outwards
if tf.flips_winding(transform):
# fliplr makes arrays non-contiguous
faces = np.ascontiguousarray(np.fliplr(faces))
# create mesh object with passed keywords
mesh = Trimesh(vertices=vertices,
faces=faces,
**kwargs)
# only check in strict mode (unit tests)
if tol.strict:
assert mesh.volume > 0.0
return mesh
def triangulate_polygon(polygon,
triangle_args=None,
engine=None,
**kwargs):
"""
Given a shapely polygon create a triangulation using a
python interface to `triangle.c` or mapbox-earcut.
> pip install triangle
> pip install mapbox_earcut
Parameters
---------
polygon : Shapely.geometry.Polygon
Polygon object to be triangulated
triangle_args : str or None
Passed to triangle.triangulate i.e: 'p', 'pq30'
engine : None or str
Any value other than 'earcut' will use `triangle`
Returns
--------------
vertices : (n, 2) float
Points in space
faces : (n, 3) int
Index of vertices that make up triangles
"""
if engine == 'earcut':
from mapbox_earcut import triangulate_float64
# get vertices as sequence where exterior is the first value
vertices = [np.array(polygon.exterior)]
vertices.extend(np.array(i) for i in polygon.interiors)
# record the index from the length of each vertex array
rings = np.cumsum([len(v) for v in vertices])
# stack vertices into (n, 2) float array
vertices = np.vstack(vertices)
# run triangulation
faces = triangulate_float64(vertices, rings).reshape(
(-1, 3)).astype(np.int64).reshape((-1, 3))
return vertices, faces
# do the import here for soft requirement
from triangle import triangulate
# set default triangulation arguments if not specified
if triangle_args is None:
triangle_args = 'p'
# turn the polygon in to vertices, segments, and hole points
arg = _polygon_to_kwargs(polygon)
# run the triangulation
result = triangulate(arg, triangle_args)
return result['vertices'], result['triangles']
def _polygon_to_kwargs(polygon):
"""
Given a shapely polygon generate the data to pass to
the triangle mesh generator
Parameters
---------
polygon : Shapely.geometry.Polygon
Input geometry
Returns
--------
result : dict
Has keys: vertices, segments, holes
"""
if not polygon.is_valid:
raise ValueError('invalid shapely polygon passed!')
def round_trip(start, length):
"""
Given a start index and length, create a series of (n, 2) edges which
create a closed traversal.
Examples
---------
start, length = 0, 3
returns: [(0,1), (1,2), (2,0)]
"""
tiled = np.tile(np.arange(start, start + length).reshape((-1, 1)), 2)
tiled = tiled.reshape(-1)[1:-1].reshape((-1, 2))
tiled = np.vstack((tiled, [tiled[-1][-1], tiled[0][0]]))
return tiled
def add_boundary(boundary, start):
# coords is an (n, 2) ordered list of points on the polygon boundary
# the first and last points are the same, and there are no
# guarantees on points not being duplicated (which will
# later cause meshpy/triangle to shit a brick)
coords = np.array(boundary.coords)
# find indices points which occur only once, and sort them
# to maintain order
unique = np.sort(grouping.unique_rows(coords)[0])
cleaned = coords[unique]
vertices.append(cleaned)
facets.append(round_trip(start, len(cleaned)))
# holes require points inside the region of the hole, which we find
# by creating a polygon from the cleaned boundary region, and then
# using a representative point. You could do things like take the mean of
# the points, but this is more robust (to things like concavity), if
# slower.
test = Polygon(cleaned)
holes.append(np.array(test.representative_point().coords)[0])
return len(cleaned)
# sequence of (n,2) points in space
vertices = collections.deque()
# sequence of (n,2) indices of vertices
facets = collections.deque()
# list of (2) vertices in interior of hole regions
holes = collections.deque()
start = add_boundary(polygon.exterior, 0)
for interior in polygon.interiors:
try:
start += add_boundary(interior, start)
except BaseException:
log.warning('invalid interior, continuing')
continue
# create clean (n,2) float array of vertices
# and (m, 2) int array of facets
# by stacking the sequence of (p,2) arrays
vertices = np.vstack(vertices)
facets = np.vstack(facets).tolist()
# shapely polygons can include a Z component
# strip it out for the triangulation
if vertices.shape[1] == 3:
vertices = vertices[:, :2]
result = {'vertices': vertices,
'segments': facets}
# holes in meshpy lingo are a (h, 2) list of (x,y) points
# which are inside the region of the hole
# we added a hole for the exterior, which we slice away here
holes = np.array(holes)[1:]
if len(holes) > 0:
result['holes'] = holes
return result
def box(extents=None, transform=None, **kwargs):
"""
Return a cuboid.
Parameters
------------
extents : float, or (3,) float
Edge lengths
transform: (4, 4) float
Transformation matrix
**kwargs:
passed to Trimesh to create box
Returns
------------
geometry : trimesh.Trimesh
Mesh of a cuboid
"""
# vertices of the cube
vertices = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1,
1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1],
order='C',
dtype=np.float64).reshape((-1, 3))
vertices -= 0.5
# resize cube based on passed extents
if extents is not None:
extents = np.asanyarray(extents, dtype=np.float64)
if extents.shape != (3,):
raise ValueError('Extents must be (3,)!')
vertices *= extents
else:
extents = np.asarray((1.0, 1.0, 1.0), dtype=np.float64)
# hardcoded face indices
faces = [1, 3, 0, 4, 1, 0, 0, 3, 2, 2, 4, 0, 1, 7, 3, 5, 1, 4,
5, 7, 1, 3, 7, 2, 6, 4, 2, 2, 7, 6, 6, 5, 4, 7, 5, 6]
faces = np.array(faces, order='C', dtype=np.int64).reshape((-1, 3))
face_normals = [-1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, -1, 0, 0, 1, 0, -1,
0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, 1, 0, 0]
face_normals = np.asanyarray(face_normals,
order='C',
dtype=np.float64).reshape(-1, 3)
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'box',
'extents': extents})
box = Trimesh(vertices=vertices,
faces=faces,
face_normals=face_normals,
process=False,
**kwargs)
# do the transform here to preserve face normals
if transform is not None:
box.apply_transform(transform)
return box
def icosahedron():
"""
Create an icosahedron, a 20 faced polyhedron.
Returns
-------------
ico : trimesh.Trimesh
Icosahederon centered at the origin.
"""
t = (1.0 + 5.0**.5) / 2.0
vertices = [-1, t, 0, 1, t, 0, -1, -t, 0, 1, -t, 0, 0, -1, t, 0, 1, t,
0, -1, -t, 0, 1, -t, t, 0, -1, t, 0, 1, -t, 0, -1, -t, 0, 1]
faces = [0, 11, 5, 0, 5, 1, 0, 1, 7, 0, 7, 10, 0, 10, 11,
1, 5, 9, 5, 11, 4, 11, 10, 2, 10, 7, 6, 7, 1, 8,
3, 9, 4, 3, 4, 2, 3, 2, 6, 3, 6, 8, 3, 8, 9,
4, 9, 5, 2, 4, 11, 6, 2, 10, 8, 6, 7, 9, 8, 1]
# scale vertices so each vertex radius is 1.0
vertices = np.reshape(vertices, (-1, 3)) / np.sqrt(2.0 + t)
faces = np.reshape(faces, (-1, 3))
mesh = Trimesh(vertices=vertices,
faces=faces,
process=False)
return mesh
def icosphere(subdivisions=3, radius=1.0, color=None):
"""
Create an isophere centered at the origin.
Parameters
----------
subdivisions : int
How many times to subdivide the mesh.
Note that the number of faces will grow as function of
4 ** subdivisions, so you probably want to keep this under ~5
radius : float
Desired radius of sphere
color: (3,) float or uint8
Desired color of sphere
Returns
---------
ico : trimesh.Trimesh
Meshed sphere
"""
def refine_spherical():
vectors = ico.vertices
scalar = (vectors ** 2).sum(axis=1)**.5
unit = vectors / scalar.reshape((-1, 1))
offset = radius - scalar
ico.vertices += unit * offset.reshape((-1, 1))
ico = icosahedron()
ico._validate = False
for j in range(subdivisions):
ico = ico.subdivide()
refine_spherical()
ico._validate = True
if color is not None:
ico.visual.face_colors = color
ico.metadata.update({'shape': 'sphere',
'radius': radius})
return ico
def uv_sphere(radius=1.0,
count=[32, 32],
theta=None,
phi=None):
"""
Create a UV sphere (latitude + longitude) centered at the
origin. Roughly one order of magnitude faster than an
icosphere but slightly uglier.
Parameters
----------
radius : float
Radius of sphere
count : (2,) int
Number of latitude and longitude lines
theta : (n,) float
Optional theta angles in radians
phi : (n,) float
Optional phi angles in radians
Returns
----------
mesh : trimesh.Trimesh
Mesh of UV sphere with specified parameters
"""
count = np.array(count, dtype=np.int64)
count += np.mod(count, 2)
count[1] *= 2
# generate vertices on a sphere using spherical coordinates
if theta is None:
theta = np.linspace(0, np.pi, count[0])
if phi is None:
phi = np.linspace(0, np.pi * 2, count[1])[:-1]
spherical = np.dstack((np.tile(phi, (len(theta), 1)).T,
np.tile(theta, (len(phi), 1)))).reshape((-1, 2))
vertices = util.spherical_to_vector(spherical) * radius
# generate faces by creating a bunch of pie wedges
c = len(theta)
# a quad face as two triangles
pairs = np.array([[c, 0, 1],
[c + 1, c, 1]])
# increment both triangles in each quad face by the same offset
incrementor = np.tile(np.arange(c - 1), (2, 1)).T.reshape((-1, 1))
# create the faces for a single pie wedge of the sphere
strip = np.tile(pairs, (c - 1, 1))
strip += incrementor
# the first and last faces will be degenerate since the first
# and last vertex are identical in the two rows
strip = strip[1:-1]
# tile pie wedges into a sphere
faces = np.vstack([strip + (i * c) for i in range(len(phi))])
# poles are repeated in every strip, so a mask to merge them
mask = np.arange(len(vertices))
# the top pole are all the same vertex
mask[0::c] = 0
# the bottom pole are all the same vertex
mask[c - 1::c] = c - 1
# faces masked to remove the duplicated pole vertices
# and mod to wrap to fill in the last pie wedge
faces = mask[np.mod(faces, len(vertices))]
# we save a lot of time by not processing again
# since we did some bookkeeping mesh is watertight
mesh = Trimesh(vertices=vertices, faces=faces, process=False,
metadata={'shape': 'sphere',
'radius': radius})
return mesh
def capsule(height=1.0,
radius=1.0,
count=[32, 32]):
"""
Create a mesh of a capsule, or a cylinder with hemispheric ends.
Parameters
----------
height : float
Center to center distance of two spheres
radius : float
Radius of the cylinder and hemispheres
count : (2,) int
Number of sections on latitude and longitude
Returns
----------
capsule : trimesh.Trimesh
Capsule geometry with:
- cylinder axis is along Z
- one hemisphere is centered at the origin
- other hemisphere is centered along the Z axis at height
"""
height = float(height)
radius = float(radius)
count = np.array(count, dtype=np.int64)
count += np.mod(count, 2)
# create a theta where there is a double band around the equator
# so that we can offset the top and bottom of a sphere to
# get a nicely meshed capsule
theta = np.linspace(0, np.pi, count[0])
center = np.clip(np.arctan(tol.merge / radius),
tol.merge, np.inf)
offset = np.array([-center, center]) + (np.pi / 2)
theta = np.insert(theta,
int(len(theta) / 2),
offset)
capsule = uv_sphere(radius=radius,
count=count,
theta=theta)
top = capsule.vertices[:, 2] > tol.zero
capsule.vertices[top] += [0, 0, height]
capsule.metadata.update({'shape': 'capsule',
'height': height,
'radius': radius})
return capsule
def cone(radius,
height,
sections=None,
transform=None,
**kwargs):
"""
Create a mesh of a cone along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float
The height of the cylinder
sections : int or None
How many pie wedges per revolution
transform : (4, 4) float or None
Transform to apply after creation
**kwargs : dict
Passed to Trimesh constructor
Returns
----------
cone: trimesh.Trimesh
Resulting mesh of a cone
"""
# create the 2D outline of a cone
linestring = [[0, 0],
[radius, 0],
[0, height]]
# revolve the profile to create a cone
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'cone',
'radius': radius,
'height': height})
cone = revolve(linestring=linestring,
sections=sections,
transform=transform,
**kwargs)
return cone
def cylinder(radius,
height=None,
sections=None,
segment=None,
transform=None,
**kwargs):
"""
Create a mesh of a cylinder along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float or None
The height of the cylinder
sections : int or None
How many pie wedges should the cylinder have
segment : (2, 3) float
Endpoints of axis, overrides transform and height
transform : (4, 4) float
Transform to apply
**kwargs:
passed to Trimesh to create cylinder
Returns
----------
cylinder: trimesh.Trimesh
Resulting mesh of a cylinder
"""
if segment is not None:
# override transform and height with the segment
transform, height = _segment_to_cylinder(segment=segment)
if height is None:
raise ValueError('either `height` or `segment` must be passed!')
half = abs(float(height)) / 2.0
# create a profile to revolve
linestring = [[0, -half],
[radius, -half],
[radius, half],
[0, half]]
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'cylinder',
'height': height,
'radius': radius})
# generate cylinder through simple revolution
return revolve(linestring=linestring,
sections=sections,
transform=transform,
**kwargs)
def annulus(r_min,
r_max,
height=None,
sections=None,
transform=None,
segment=None,
**kwargs):
"""
Create a mesh of an annular cylinder along Z centered at the origin.
Parameters
----------
r_min : float
The inner radius of the annular cylinder
r_max : float
The outer radius of the annular cylinder
height : float
The height of the annular cylinder
sections : int or None
How many pie wedges should the annular cylinder have
transform : (4, 4) float or None
Transform to apply to move result from the origin
segment : None or (2, 3) float
Override transform and height with a line segment
**kwargs:
passed to Trimesh to create annulus
Returns
----------
annulus : trimesh.Trimesh
Mesh of annular cylinder
"""
if segment is not None:
# override transform and height with the segment if passed
transform, height = _segment_to_cylinder(segment=segment)
if height is None:
raise ValueError('either `height` or `segment` must be passed!')
r_min = abs(float(r_min))
# if center radius is zero this is a cylinder
if r_min < tol.merge:
return cylinder(radius=r_max,
height=height,
sections=sections,
transform=transform)
r_max = abs(float(r_max))
# we're going to center at XY plane so take half the height
half = abs(float(height)) / 2.0
# create counter-clockwise rectangle
linestring = [[r_min, -half],
[r_max, -half],
[r_max, half],
[r_min, half],
[r_min, -half]]
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'annulus',
'r_min': r_min,
'r_max': r_max,
'height': height})
# revolve the curve
annulus = revolve(linestring=linestring,
sections=sections,
transform=transform,
**kwargs)
return annulus
def _segment_to_cylinder(segment):
"""
Convert a line segment to a transform and height for a cylinder
or cylinder-like primitive.
Parameters
-----------
segment : (2, 3) float
3D line segment in space
Returns
-----------
transform : (4, 4) float
Matrix to move a Z-extruded origin cylinder to segment
height : float
The height of the cylinder needed
"""
segment = np.asanyarray(segment, dtype=np.float64)
if segment.shape != (2, 3):
raise ValueError('segment must be 2 3D points!')
vector = segment[1] - segment[0]
# override height with segment length
height = np.linalg.norm(vector)
# point in middle of line
midpoint = segment[0] + (vector * 0.5)
# align Z with our desired direction
rotation = align_vectors([0, 0, 1], vector)
# translate to midpoint of segment
translation = tf.translation_matrix(midpoint)
# compound the rotation and translation
transform = np.dot(translation, rotation)
return transform, height
def random_soup(face_count=100):
"""
Return random triangles as a Trimesh
Parameters
-----------
face_count : int
Number of faces desired in mesh
Returns
-----------
soup : trimesh.Trimesh
Geometry with face_count random faces
"""
vertices = np.random.random((face_count * 3, 3)) - 0.5
faces = np.arange(face_count * 3).reshape((-1, 3))
soup = Trimesh(vertices=vertices, faces=faces)
return soup
def axis(origin_size=0.04,
transform=None,
origin_color=None,
axis_radius=None,
axis_length=None):
"""
Return an XYZ axis marker as a Trimesh, which represents position
and orientation. If you set the origin size the other parameters
will be set relative to it.
Parameters
----------
transform : (4, 4) float
Transformation matrix
origin_size : float
Radius of sphere that represents the origin
origin_color : (3,) float or int, uint8 or float
Color of the origin
axis_radius : float
Radius of cylinder that represents x, y, z axis
axis_length: float
Length of cylinder that represents x, y, z axis
Returns
-------
marker : trimesh.Trimesh
Mesh geometry of axis indicators
"""
# the size of the ball representing the origin
origin_size = float(origin_size)
# set the transform and use origin-relative
# sized for other parameters if not specified
if transform is None:
transform = np.eye(4)
if origin_color is None:
origin_color = [255, 255, 255, 255]
if axis_radius is None:
axis_radius = origin_size / 5.0
if axis_length is None:
axis_length = origin_size * 10.0
# generate a ball for the origin
axis_origin = uv_sphere(radius=origin_size,
count=[10, 10])
axis_origin.apply_transform(transform)
# apply color to the origin ball
axis_origin.visual.face_colors = origin_color
# create the cylinder for the z-axis
translation = tf.translation_matrix(
[0, 0, axis_length / 2])
z_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(translation))
# XYZ->RGB, Z is blue
z_axis.visual.face_colors = [0, 0, 255]
# create the cylinder for the y-axis
translation = tf.translation_matrix(
[0, 0, axis_length / 2])
rotation = tf.rotation_matrix(np.radians(-90),
[1, 0, 0])
y_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, Y is green
y_axis.visual.face_colors = [0, 255, 0]
# create the cylinder for the x-axis
translation = tf.translation_matrix(
[0, 0, axis_length / 2])
rotation = tf.rotation_matrix(np.radians(90),
[0, 1, 0])
x_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, X is red
x_axis.visual.face_colors = [255, 0, 0]
# append the sphere and three cylinders
marker = util.concatenate([axis_origin,
x_axis,
y_axis,
z_axis])
return marker
def camera_marker(camera,
marker_height=0.4,
origin_size=None):
"""
Create a visual marker for a camera object, including an axis and FOV.
Parameters
---------------
camera : trimesh.scene.Camera
Camera object with FOV and transform defined
marker_height : float
How far along the camera Z should FOV indicators be
origin_size : float
Sphere radius of the origin (default: marker_height / 10.0)
Returns
------------
meshes : list
Contains Trimesh and Path3D objects which can be visualized
"""
# create sane origin size from marker height
if origin_size is None:
origin_size = marker_height / 10.0
# append the visualizations to an array
meshes = [axis(origin_size=origin_size)]
try:
# path is a soft dependency
from .path.exchange.load import load_path
except ImportError:
# they probably don't have shapely installed
log.warning('unable to create FOV visualization!',
exc_info=True)
return meshes
# calculate vertices from camera FOV angles
x = marker_height * np.tan(np.deg2rad(camera.fov[0]) / 2.0)
y = marker_height * np.tan(np.deg2rad(camera.fov[1]) / 2.0)
z = marker_height
# combine the points into the vertices of an FOV visualization
points = np.array(
[(0, 0, 0),
(-x, -y, z),
(x, -y, z),
(x, y, z),
(-x, y, z)],
dtype=float)
# create line segments for the FOV visualization
# a segment from the origin to each bound of the FOV
segments = np.column_stack(
(np.zeros_like(points), points)).reshape(
(-1, 3))
# add a loop for the outside of the FOV then reshape
# the whole thing into multiple line segments
segments = np.vstack((segments,
points[[1, 2,
2, 3,
3, 4,
4, 1]])).reshape((-1, 2, 3))
# add a single Path3D object for all line segments
meshes.append(load_path(segments))
return meshes
def truncated_prisms(tris, origin=None, normal=None):
"""
Return a mesh consisting of multiple watertight prisms below
a list of triangles, truncated by a specified plane.
Parameters
-------------
triangles : (n, 3, 3) float
Triangles in space
origin : None or (3,) float
Origin of truncation plane
normal : None or (3,) float
Unit normal vector of truncation plane
Returns
-----------
mesh : trimesh.Trimesh
Triangular mesh
"""
if origin is None:
transform = np.eye(4)
else:
transform = plane_transform(origin=origin, normal=normal)
# transform the triangles to the specified plane
transformed = tf.transform_points(
tris.reshape((-1, 3)), transform).reshape((-1, 9))
# stack triangles such that every other one is repeated
vs = np.column_stack((transformed, transformed)).reshape((-1, 3, 3))
# set the Z value of the second triangle to zero
vs[1::2, :, 2] = 0
# reshape triangles to a flat array of points and transform back to original frame
vertices = tf.transform_points(
vs.reshape((-1, 3)), matrix=np.linalg.inv(transform))
# face indexes for a *single* truncated triangular prism
f = np.array([[2, 1, 0],
[3, 4, 5],
[0, 1, 4],
[1, 2, 5],
[2, 0, 3],
[4, 3, 0],
[5, 4, 1],
[3, 5, 2]])
# find the projection of each triangle with the normal vector
cross = np.dot([0, 0, 1], triangles.cross(transformed.reshape((-1, 3, 3))).T)
# stack faces into one prism per triangle
f_seq = np.tile(f, (len(transformed), 1)).reshape((-1, len(f), 3))
# if the normal of the triangle was positive flip the winding
f_seq[cross > 0] = np.fliplr(f)
# offset stacked faces to create correct indices
faces = (f_seq + (np.arange(len(f_seq)) * 6).reshape((-1, 1, 1))).reshape((-1, 3))
# create a mesh from the data
mesh = Trimesh(vertices=vertices, faces=faces, process=False)
return mesh
| mit |
zstackorg/zstack-woodpecker | integrationtest/vm/multihosts/ha/test_2nfs_vm_ha_net_discon_host_vm_killed.py | 2 | 5911 | '''
New Integration Test for KVM VM ha network disconnect and check vm has been killed on original host
In addition, this test is sepcific for 2nfs.
@author: turnyouon
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.operations.ha_operations as ha_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import apibinding.inventory as inventory
import time
import os
vm = None
host_uuid = None
host_ip = None
max_attempts = None
storagechecker_timeout = None
test_stub = test_lib.lib_get_test_stub()
def test():
global vm
global host_uuid
global host_ip
global max_attempts
global storagechecker_timeout
must_ps_list = [inventory.NFS_PRIMARY_STORAGE_TYPE]
test_lib.skip_test_if_any_ps_not_deployed(must_ps_list)
if test_lib.lib_get_ha_enable() != 'true':
test_util.test_skip("vm ha not enabled. Skip test")
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
#l3_name = os.environ.get('l3NoVlanNetworkName1')
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
test_lib.clean_up_all_vr()
#vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
#vr_host_ips = []
#for vr in vrs:
# vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp)
# if test_lib.lib_is_vm_running(vr) != True:
# vm_ops.start_vm(vr.uuid)
#time.sleep(60)
mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions)
conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions)
#for vr_host_ip in vr_host_ips:
# conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions)
host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
vm_creation_option.set_host_uuid(host_uuid)
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('multihost_basic_vm')
vm = test_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
vm_creation_option.set_name('multihost_basic_vm2')
vm2 = test_vm_header.ZstackTestVm()
vm2.set_creation_option(vm_creation_option)
vm2.create()
vm_creation_option.set_name('multihost_basic_vm3')
vm3 = test_vm_header.ZstackTestVm()
vm3.set_creation_option(vm_creation_option)
vm3.create()
vr_hosts = test_stub.get_host_has_vr()
mn_hosts = test_stub.get_host_has_mn()
nfs_hosts = test_stub.get_host_has_nfs()
if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts+mn_hosts+nfs_hosts):
test_util.test_fail("Not find out a suitable host")
#vm.check()
host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
host_port = test_lib.lib_get_host_port(host_ip)
test_util.test_logger("host %s is disconnecting" %(host_ip))
ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
ha_ops.set_vm_instance_ha_level(vm2.get_vm().uuid, "NeverStop")
ha_ops.set_vm_instance_ha_level(vm3.get_vm().uuid, "NeverStop")
test_stub.down_host_network(host_ip, test_lib.all_scenario_config)
#Here we wait for 180 seconds for all vms have been killed, but test result show:
#no need to wait, the reaction of killing the vm is very quickly.
test_util.test_logger("wait for 30 seconds")
time.sleep(30)
if test_stub.check_vm_running_on_host(vm.vm.uuid, host_ip):
test_util.test_fail("VM1 is expected to start running on another host")
if test_stub.check_vm_running_on_host(vm2.vm.uuid, host_ip):
test_util.test_fail("VM2 is expected to start running on another host")
if test_stub.check_vm_running_on_host(vm3.vm.uuid, host_ip):
test_util.test_fail("VM3 is expected to start running on another host")
test_stub.up_host_network(host_ip, test_lib.all_scenario_config)
conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip)
kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
host_ops.reconnect_host(kvm_host_uuid)
vm.set_state(vm_header.RUNNING)
vm2.set_state(vm_header.RUNNING)
vm3.set_state(vm_header.RUNNING)
time.sleep(60)
vm.check()
vm2.check()
vm3.check()
vm.destroy()
vm2.destroy()
vm3.destroy()
test_util.test_pass('Test VM ha on host failure Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
global host_uuid
global host_ip
global max_attempts
global storagechecker_timeout
if vm:
try:
vm.destroy()
except:
pass
def env_recover():
global host_ip
test_util.test_logger("recover host: %s" % (test_host.ip_))
try:
test_stub.up_host_network(host_ip, test_lib.all_scenario_config)
except:
pass
conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip)
kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
host_ops.reconnect_host(kvm_host_uuid)
| apache-2.0 |
marcwebbie/youtube-dl | youtube_dl/extractor/francetv.py | 7 | 16004 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
parse_duration,
determine_ext,
)
from .dailymotion import (
DailymotionIE,
DailymotionCloudIE,
)
class FranceTVBaseInfoExtractor(InfoExtractor):
def _extract_video(self, video_id, catalogue):
info = self._download_json(
'http://webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=%s&catalogue=%s'
% (video_id, catalogue),
video_id, 'Downloading video JSON')
if info.get('status') == 'NOK':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, info['message']), expected=True)
allowed_countries = info['videos'][0].get('geoblocage')
if allowed_countries:
georestricted = True
geo_info = self._download_json(
'http://geo.francetv.fr/ws/edgescape.json', video_id,
'Downloading geo restriction info')
country = geo_info['reponse']['geo_info']['country_code']
if country not in allowed_countries:
raise ExtractorError(
'The video is not available from your location',
expected=True)
else:
georestricted = False
formats = []
for video in info['videos']:
if video['statut'] != 'ONLINE':
continue
video_url = video['url']
if not video_url:
continue
format_id = video['format']
ext = determine_ext(video_url)
if ext == 'f4m':
if georestricted:
# See https://github.com/rg3/youtube-dl/issues/3963
# m3u8 urls work fine
continue
f4m_url = self._download_webpage(
'http://hdfauth.francetv.fr/esi/TA?url=%s' % video_url,
video_id, 'Downloading f4m manifest token', fatal=False)
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44',
video_id, f4m_id=format_id, fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False))
elif video_url.startswith('rtmp'):
formats.append({
'url': video_url,
'format_id': 'rtmp-%s' % format_id,
'ext': 'flv',
})
else:
if self._is_valid_url(video_url, video_id, format_id):
formats.append({
'url': video_url,
'format_id': format_id,
})
self._sort_formats(formats)
title = info['titre']
subtitle = info.get('sous_titre')
if subtitle:
title += ' - %s' % subtitle
title = title.strip()
subtitles = {}
subtitles_list = [{
'url': subformat['url'],
'ext': subformat.get('format'),
} for subformat in info.get('subtitles', []) if subformat.get('url')]
if subtitles_list:
subtitles['fr'] = subtitles_list
return {
'id': video_id,
'title': title,
'description': clean_html(info['synopsis']),
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
'timestamp': int_or_none(info['diffusion']['timestamp']),
'formats': formats,
'subtitles': subtitles,
}
class PluzzIE(FranceTVBaseInfoExtractor):
IE_NAME = 'pluzz.francetv.fr'
_VALID_URL = r'https?://(?:m\.)?pluzz\.francetv\.fr/videos/(?P<id>.+?)\.html'
# Can't use tests, videos expire in 7 days
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._html_search_meta(
'id_video', webpage, 'video id', default=None)
if not video_id:
video_id = self._search_regex(
r'data-diffusion=["\'](\d+)', webpage, 'video id')
return self._extract_video(video_id, 'Pluzz')
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetvinfo.fr'
_VALID_URL = r'https?://(?:www|mobile|france3-regions)\.francetvinfo\.fr/(?:[^/]+/)*(?P<title>[^/?#&.]+)'
_TESTS = [{
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
'info_dict': {
'id': '84981923',
'ext': 'mp4',
'title': 'Soir 3',
'upload_date': '20130826',
'timestamp': 1377548400,
'subtitles': {
'fr': 'mincount:2',
},
},
'params': {
# m3u8 downloads
'skip_download': True,
},
}, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
'info_dict': {
'id': 'EV_20019',
'ext': 'mp4',
'title': 'Débat des candidats à la Commission européenne',
'description': 'Débat des candidats à la Commission européenne',
},
'params': {
'skip_download': 'HLS (reqires ffmpeg)'
},
'skip': 'Ce direct est terminé et sera disponible en rattrapage dans quelques minutes.',
}, {
'url': 'http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html',
'md5': 'f485bda6e185e7d15dbc69b72bae993e',
'info_dict': {
'id': 'NI_173343',
'ext': 'mp4',
'title': 'Les entreprises familiales : le secret de la réussite',
'thumbnail': 're:^https?://.*\.jpe?g$',
'timestamp': 1433273139,
'upload_date': '20150602',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
}, {
'url': 'http://france3-regions.francetvinfo.fr/bretagne/cotes-d-armor/thalassa-echappee-breizh-ce-venredi-dans-les-cotes-d-armor-954961.html',
'md5': 'f485bda6e185e7d15dbc69b72bae993e',
'info_dict': {
'id': 'NI_657393',
'ext': 'mp4',
'title': 'Olivier Monthus, réalisateur de "Bretagne, le choix de l’Armor"',
'description': 'md5:a3264114c9d29aeca11ced113c37b16c',
'thumbnail': 're:^https?://.*\.jpe?g$',
'timestamp': 1458300695,
'upload_date': '20160318',
},
'params': {
'skip_download': True,
},
}, {
# Dailymotion embed
'url': 'http://www.francetvinfo.fr/politique/notre-dame-des-landes/video-sur-france-inter-cecile-duflot-denonce-le-regard-meprisant-de-patrick-cohen_1520091.html',
'md5': 'ee7f1828f25a648addc90cb2687b1f12',
'info_dict': {
'id': 'x4iiko0',
'ext': 'mp4',
'title': 'NDDL, référendum, Brexit : Cécile Duflot répond à Patrick Cohen',
'description': 'Au lendemain de la victoire du "oui" au référendum sur l\'aéroport de Notre-Dame-des-Landes, l\'ancienne ministre écologiste est l\'invitée de Patrick Cohen. Plus d\'info : https://www.franceinter.fr/emissions/le-7-9/le-7-9-27-juin-2016',
'timestamp': 1467011958,
'upload_date': '20160627',
'uploader': 'France Inter',
'uploader_id': 'x2q2ez',
},
'add_ie': ['Dailymotion'],
}, {
'url': 'http://france3-regions.francetvinfo.fr/limousin/emissions/jt-1213-limousin',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
if dmcloud_url:
return self.url_result(dmcloud_url, DailymotionCloudIE.ie_key())
dailymotion_urls = DailymotionIE._extract_urls(webpage)
if dailymotion_urls:
return self.playlist_result([
self.url_result(dailymotion_url, DailymotionIE.ie_key())
for dailymotion_url in dailymotion_urls])
video_id, catalogue = self._search_regex(
(r'id-video=([^@]+@[^"]+)',
r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"'),
webpage, 'video id').split('@')
return self._extract_video(video_id, catalogue)
class FranceTVIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetv'
IE_DESC = 'France 2, 3, 4, 5 and Ô'
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?france[2345o]\.fr/
(?:
emissions/[^/]+/(?:videos|diffusions)|
emission/[^/]+|
videos|
jt
)
/|
embed\.francetv\.fr/\?ue=
)
(?P<id>[^/?]+)
'''
_TESTS = [
# france2
{
'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
'md5': 'c03fc87cb85429ffd55df32b9fc05523',
'info_dict': {
'id': '109169362',
'ext': 'flv',
'title': '13h15, le dimanche...',
'description': 'md5:9a0932bb465f22d377a449be9d1a0ff7',
'upload_date': '20140914',
'timestamp': 1410693600,
},
},
# france3
{
'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575',
'md5': '679bb8f8921f8623bd658fa2f8364da0',
'info_dict': {
'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au',
'ext': 'mp4',
'title': 'Le scandale du prix des médicaments',
'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce',
'upload_date': '20131113',
'timestamp': 1384380000,
},
},
# france4
{
'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
'md5': 'a182bf8d2c43d88d46ec48fbdd260c1c',
'info_dict': {
'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
'ext': 'mp4',
'title': 'Hero Corp Making of - Extrait 1',
'description': 'md5:c87d54871b1790679aec1197e73d650a',
'upload_date': '20131106',
'timestamp': 1383766500,
},
},
# france5
{
'url': 'http://www.france5.fr/emissions/c-a-dire/videos/quels_sont_les_enjeux_de_cette_rentree_politique__31-08-2015_908948?onglet=tous&page=1',
'md5': 'f6c577df3806e26471b3d21631241fd0',
'info_dict': {
'id': '123327454',
'ext': 'flv',
'title': 'C à dire ?! - Quels sont les enjeux de cette rentrée politique ?',
'description': 'md5:4a0d5cb5dce89d353522a84462bae5a4',
'upload_date': '20150831',
'timestamp': 1441035120,
},
},
# franceo
{
'url': 'http://www.franceo.fr/jt/info-soir/18-07-2015',
'md5': '47d5816d3b24351cdce512ad7ab31da8',
'info_dict': {
'id': '125377621',
'ext': 'flv',
'title': 'Infô soir',
'description': 'md5:01b8c6915a3d93d8bbbd692651714309',
'upload_date': '20150718',
'timestamp': 1437241200,
'duration': 414,
},
},
{
# francetv embed
'url': 'http://embed.francetv.fr/?ue=8d7d3da1e3047c42ade5a5d7dfd3fc87',
'info_dict': {
'id': 'EV_30231',
'ext': 'flv',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
},
{
'url': 'http://www.france4.fr/emission/highlander/diffusion-du-17-07-2015-04h05',
'only_matching': True,
},
{
'url': 'http://www.franceo.fr/videos/125377617',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id, catalogue = self._html_search_regex(
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
webpage, 'video ID').split('@')
return self._extract_video(video_id, catalogue)
class GenerationQuoiIE(InfoExtractor):
IE_NAME = 'france2.fr:generation-quoi'
_VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://generation-quoi.france2.fr/portrait/garde-a-vous',
'info_dict': {
'id': 'k7FJX8VBcvvLmX4wA5Q',
'ext': 'mp4',
'title': 'Génération Quoi - Garde à Vous',
'uploader': 'Génération Quoi',
},
'params': {
# It uses Dailymotion
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
info_url = compat_urlparse.urljoin(url, '/medias/video/%s.json' % display_id)
info_json = self._download_webpage(info_url, display_id)
info = json.loads(info_json)
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
ie='Dailymotion')
class CultureboxIE(FranceTVBaseInfoExtractor):
IE_NAME = 'culturebox.francetvinfo.fr'
_VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
_TEST = {
'url': 'http://culturebox.francetvinfo.fr/live/musique/musique-classique/le-livre-vermeil-de-montserrat-a-la-cathedrale-delne-214511',
'md5': '9b88dc156781c4dbebd4c3e066e0b1d6',
'info_dict': {
'id': 'EV_50111',
'ext': 'flv',
'title': "Le Livre Vermeil de Montserrat à la Cathédrale d'Elne",
'description': 'md5:f8a4ad202e8fe533e2c493cc12e739d9',
'upload_date': '20150320',
'timestamp': 1426892400,
'duration': 2760.9,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
webpage = self._download_webpage(url, name)
if ">Ce live n'est plus disponible en replay<" in webpage:
raise ExtractorError('Video %s is not available' % name, expected=True)
video_id, catalogue = self._search_regex(
r'"http://videos\.francetv\.fr/video/([^@]+@[^"]+)"', webpage, 'video id').split('@')
return self._extract_video(video_id, catalogue)
| unlicense |
stormvirux/vturra-cli | vturra/asys.py | 1 | 1936 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# from scipy import stats
# import statsmodels.api as sm
# from numpy.random import randn
import matplotlib as mpl
# import seaborn as sns
# sns.set_color_palette("deep", desat=.6)
mpl.rc("figure", figsize=(8, 4))
def Compavg():
data=Total()
markMax=[]
markAvg=[]
N = 5
ind = np.arange(N)
width = 0.35
fig = plt.figure()
ax = fig.add_subplot(111)
markMax.extend((data["Total"].max(),data["Total.1"].max(),data["Total.2"].max(),data["Total.3"].max(),data["Total.4"].max()))
markAvg.extend((data["Total"].mean(),data["Total.1"].mean(),data["Total.2"].mean(),data["Total.3"].mean(),data["Total.4"].mean()))
rects1 = ax.bar(ind, markMax, width, color='black')
rects2 = ax.bar(ind+width, markAvg, width, color='green')
ax.set_xlim(-width,len(ind)+width)
ax.set_ylim(0,120)
ax.set_ylabel('Marks')
ax.set_title('Max, Mean and Your Marks')
xTickMarks = ['Subject'+str(i) for i in range(1,6)]
ax.set_xticks(ind+width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=10, fontsize=10)
ax.legend( (rects1[0], rects2[0]), ('Max', 'Mean') )
plt.show()
def compSub():
# max_data = np.r_[data["Total"]].max()
# bins = np.linspace(0, max_data, max_data + 1)
data=Total()
plt.hist(data['Total'],linewidth=0, alpha=.7)
plt.hist(data['Total.1'],linewidth=0,alpha=.7)
plt.hist(data['Total.2'],linewidth=0,alpha=.7)
plt.hist(data['Total.3'],linewidth=0,alpha=.7)
plt.hist(data['Total.4'],linewidth=0,alpha=.7)
plt.title("Total marks Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
def Total():
data=pd.read_csv("output10cs.csv")
df3=data[['Total','Total.1','Total.2','Total.3','Total.4','Total.5','Total.6','Total.7']]
data["Main Total"]=df3.sum(axis=1)
data = data.dropna()
data.reset_index(drop=True)
return data
#compSub()
# Compavg()
| mit |
Axam/nsx-web | fuelmenu/fuelmenu/fuelmenu.py | 2 | 15653 | #!/usr/bin/env python
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import operator
from optparse import OptionParser
import os
from settings import Settings
import subprocess
import sys
import urwid
import urwid.raw_display
import urwid.web_display
# set up logging
logging.basicConfig(filename='/var/log/fuelmenu.log',
format="%(asctime)s %(levelname)s %(message)s",
level=logging.DEBUG)
log = logging.getLogger('fuelmenu.loader')
class Loader(object):
def __init__(self, parent):
self.modlist = []
self.choices = []
self.child = None
self.children = []
self.childpage = None
self.parent = parent
def load_modules(self, module_dir):
if module_dir not in sys.path:
sys.path.append(module_dir)
modules = [os.path.splitext(f)[0] for f in os.listdir(module_dir)
if f.endswith('.py')]
for module in modules:
log.info('loading module %s' % module)
try:
imported = __import__(module)
pass
except ImportError as e:
log.error('module could not be imported: %s' % e)
continue
clsobj = getattr(imported, module, None)
modobj = clsobj(self.parent)
# add the module to the list
self.modlist.append(modobj)
# sort modules
self.modlist.sort(key=operator.attrgetter('priority'))
for module in self.modlist:
self.choices.append(module.name)
return (self.modlist, self.choices)
class FuelSetup(object):
def __init__(self):
self.footer = None
self.frame = None
self.screen = None
self.defaultsettingsfile = "%s/settings.yaml" \
% (os.path.dirname(__file__))
self.settingsfile = "/etc/fuel/astute.yaml"
self.managediface = "eth0"
#Set to true to move all settings to end
self.globalsave = True
self.version = self.getVersion("/etc/fuel/version.yaml")
self.main()
self.choices = []
def menu(self, title, choices):
body = [urwid.Text(title), urwid.Divider()]
for c in choices:
button = urwid.Button(c)
urwid.connect_signal(button, 'click', self.menu_chosen, c)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
return urwid.ListBox(urwid.SimpleListWalker(body))
#return urwid.ListBox(urwid.SimpleFocusListWalker(body))
def menu_chosen(self, button, choice):
size = self.screen.get_cols_rows()
self.screen.draw_screen(size, self.frame.render(size))
for item in self.menuitems.body.contents:
try:
if item.original_widget and \
item.original_widget.get_label() == choice:
item.set_attr_map({None: 'header'})
else:
item.set_attr_map({None: None})
except Exception as e:
log.info("%s" % item)
log.error("%s" % e)
self.setChildScreen(name=choice)
def setChildScreen(self, name=None):
if name is None:
self.child = self.children[0]
else:
self.child = self.children[int(self.choices.index(name))]
if not self.child.screen:
self.child.screen = self.child.screenUI()
self.childpage = self.child.screen
self.childfill = urwid.Filler(self.childpage, 'top', 40)
self.childbox = urwid.BoxAdapter(self.childfill, 40)
self.cols = urwid.Columns(
[
('fixed', 20, urwid.Pile([
urwid.AttrMap(self.menubox, 'bright'),
urwid.Divider(" ")])),
('weight', 3, urwid.Pile([
urwid.Divider(" "),
self.childbox,
urwid.Divider(" ")]))
], 1)
self.child.refresh()
self.listwalker[:] = [self.cols]
def refreshScreen(self):
size = self.screen.get_cols_rows()
self.screen.draw_screen(size, self.frame.render(size))
def refreshChildScreen(self, name):
child = self.children[int(self.choices.index(name))]
#Refresh child listwalker
child.listwalker[:] = child.listbox_content
#reassign childpage top level objects
self.childpage = urwid.ListBox(child.listwalker)
self.childfill = urwid.Filler(self.childpage, 'middle', 22)
self.childbox = urwid.BoxAdapter(self.childfill, 22)
self.cols = urwid.Columns(
[
('fixed', 20, urwid.Pile([
urwid.AttrMap(self.menubox, 'bright'),
urwid.Divider(" ")])),
('weight', 3, urwid.Pile([
urwid.Divider(" "),
self.childbox,
urwid.Divider(" ")]))
], 1)
#Refresh top level listwalker
#self.listwalker[:] = [self.cols]
def getVersion(self, versionfile):
try:
versiondata = Settings().read(versionfile)
return versiondata['release']
except (IOError, KeyError):
log.error("Unable to set Fuel version from %s" % versionfile)
return ""
def main(self):
#Disable kernel print messages. They make our UI ugly
noout = open('/dev/null', 'w')
subprocess.call(["sysctl", "-w", "kernel.printk=4 1 1 7"],
stdout=noout, stderr=noout)
text_header = (u"Fuel %s setup "
u"Use Up/Down/Left/Right to navigate. F8 exits."
% self.version)
text_footer = (u"Status messages go here.")
#Top and bottom lines of frame
self.header = urwid.AttrWrap(urwid.Text(text_header), 'header')
self.footer = urwid.AttrWrap(urwid.Text(text_footer), 'footer')
#Prepare submodules
loader = Loader(self)
moduledir = "%s/modules" % (os.path.dirname(__file__))
self.children, self.choices = loader.load_modules(module_dir=moduledir)
if len(self.children) == 0:
import sys
sys.exit(1)
#Build list of choices excluding visible
self.visiblechoices = []
for child, choice in zip(self.children, self.choices):
if child.visible:
self.visiblechoices.append(choice)
self.menuitems = self.menu(u'Menu', self.visiblechoices)
menufill = urwid.Filler(self.menuitems, 'top', 40)
self.menubox = urwid.BoxAdapter(menufill, 40)
self.child = self.children[0]
self.childpage = self.child.screenUI()
self.childfill = urwid.Filler(self.childpage, 'top', 22)
self.childbox = urwid.BoxAdapter(self.childfill, 22)
self.cols = urwid.Columns(
[
('fixed', 20, urwid.Pile([
urwid.AttrMap(self.menubox, 'bright'),
urwid.Divider(" ")])),
('weight', 3, urwid.Pile([
urwid.Divider(" "),
self.childbox,
urwid.Divider(" ")]))
], 1)
self.listwalker = urwid.SimpleListWalker([self.cols])
#self.listwalker = urwid.TreeWalker([self.cols])
self.listbox = urwid.ListBox(self.listwalker)
#listbox = urwid.ListBox(urwid.SimpleListWalker(listbox_content))
self.frame = urwid.Frame(urwid.AttrWrap(self.listbox, 'body'),
header=self.header, footer=self.footer)
palette = \
[
('body', 'black', 'light gray', 'standout'),
('reverse', 'light gray', 'black'),
('header', 'white', 'dark red', 'bold'),
('important', 'dark blue', 'light gray',
('standout', 'underline')),
('editfc', 'white', 'dark blue', 'bold'),
('editbx', 'light gray', 'dark blue'),
('editcp', 'black', 'light gray', 'standout'),
('bright', 'dark gray', 'light gray', ('bold', 'standout')),
('buttn', 'black', 'dark cyan'),
('buttnf', 'white', 'dark blue', 'bold'),
('light gray', 'white', 'light gray', 'bold'),
('red', 'dark red', 'light gray', 'bold'),
('black', 'black', 'black', 'bold'),
]
# use appropriate Screen class
if urwid.web_display.is_web_request():
self.screen = urwid.web_display.Screen()
else:
self.screen = urwid.raw_display.Screen()
def unhandled(key):
if key == 'f8':
raise urwid.ExitMainLoop()
if key == 'shift tab':
self.child.walker.tab_prev()
if key == 'tab':
self.child.walker.tab_next()
self.mainloop = urwid.MainLoop(self.frame, palette, self.screen,
unhandled_input=unhandled)
#Initialize each module completely before any events are handled
for child in reversed(self.children):
self.setChildScreen(name=child.name)
#Prepare DNS for resolution
dnsobj = self.children[int(self.choices.index("DNS & Hostname"))]
dnsobj.setEtcResolv()
self.mainloop.run()
def exit_program(self, button):
#return kernel logging to normal
noout = open('/dev/null', 'w')
subprocess.call(["sysctl", "-w", "kernel.printk=7 4 1 7"],
stdout=noout, stderr=noout)
#Fix /etc/hosts and /etc/resolv.conf before quitting
dnsobj = self.children[int(self.choices.index("DNS & Hostname"))]
dnsobj.fixEtcHosts()
dnsobj.setEtcResolv('127.0.0.1')
raise urwid.ExitMainLoop()
def global_save(self):
#Runs save function for every module
for module, modulename in zip(self.children, self.choices):
#Run invisible modules. They may not have screen methods
if not module.visible:
try:
module.apply(None)
except Exception as e:
log.error("Unable to save module %s: %s" % (modulename, e))
continue
else:
try:
log.info("Checking and applying module: %s"
% modulename)
self.footer.set_text("Checking and applying module: %s"
% modulename)
self.refreshScreen()
module.refresh()
if module.apply(None):
log.info("Saving module: %s" % modulename)
else:
return False, modulename
except AttributeError as e:
log.debug("Module %s does not have save function: %s"
% (modulename, e))
return True, None
def setup():
urwid.web_display.set_preferences("Fuel Setup")
# try to handle short web requests quickly
if urwid.web_display.handle_short_request():
return
FuelSetup()
def save_only(iface):
import common.network as network
from common import pwgen
import netifaces
#Calculate and set Static/DHCP pool fields
#Max IPs = net size - 2 (master node + bcast)
try:
ip = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']
netmask = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['netmask']
mac = netifaces.ifaddresses(iface)[netifaces.AF_LINK][0]['addr']
except Exception:
print("Interface %s is missing either IP address or netmask"
% (iface))
sys.exit(1)
net_ip_list = network.getNetwork(ip, netmask)
try:
half = int(len(net_ip_list) / 2)
#In most cases, skip 10.XXX.0.1
static_pool = list(net_ip_list[1:half])
dhcp_pool = list(net_ip_list[half:])
static_start = str(static_pool[0])
static_end = str(static_pool[-1])
dynamic_start = str(dhcp_pool[0])
dynamic_end = str(dhcp_pool[-1])
except Exception:
print("Unable to define DHCP pools")
sys.exit(1)
try:
hostname, sep, domain = os.uname()[1].partition('.')
except Exception:
print("Unable to calculate hostname and domain")
sys.exit(1)
settings = \
{
"ADMIN_NETWORK/interface": iface,
"ADMIN_NETWORK/ipaddress": ip,
"ADMIN_NETWORK/netmask": netmask,
"ADMIN_NETWORK/mac": mac,
"ADMIN_NETWORK/dhcp_pool_start": dynamic_start,
"ADMIN_NETWORK/dhcp_pool_end": dynamic_end,
"ADMIN_NETWORK/static_pool_start": static_start,
"ADMIN_NETWORK/static_pool_end": static_end,
"HOSTNAME": hostname,
"DNS_DOMAIN": domain,
"DNS_SEARCH": domain,
"astute/user": "naily",
"astute/password": pwgen.password(),
"cobbler/user": "cobbler",
"cobbler/password": pwgen.password(),
"keystone/admin_token": pwgen.password(),
"mcollective/user": "mcollective",
"mcollective/password": pwgen.password(),
"postgres/keystone_dbname": "keystone",
"postgres/keystone_user": "keystone",
"postgres/keystone_password": pwgen.password(),
"postgres/nailgun_dbname": "nailgun",
"postgres/nailgun_user": "nailgun",
"postgres/nailgun_password": pwgen.password(),
"postgres/ostf_dbname": "ostf",
"postgres/ostf_user": "ostf",
"postgres/ostf_password": pwgen.password(),
"FUEL_ACCESS/user": "admin",
"FUEL_ACCESS/password": "admin",
}
newsettings = dict()
for setting in settings.keys():
if "/" in setting:
part1, part2 = setting.split("/")
if part1 not in newsettings.keys():
newsettings[part1] = {}
newsettings[part1][part2] = settings[setting]
else:
newsettings[setting] = settings[setting]
#Write astute.yaml
Settings().write(newsettings, defaultsfile=None,
outfn="/etc/fuel/astute.yaml")
def main(*args, **kwargs):
if urwid.VERSION < (1, 1, 0):
print("This program requires urwid 1.1.0 or greater.")
parser = OptionParser()
parser.add_option("-s", "--save-only", dest="save_only",
action="store_true",
help="Save default values and exit.")
parser.add_option("-i", "--iface", dest="iface", metavar="IFACE",
default="eth0", help="Set IFACE as primary.")
options, args = parser.parse_args()
if options.save_only:
save_only(options.iface)
else:
setup()
if '__main__' == __name__ or urwid.web_display.is_web_request():
setup()
| apache-2.0 |
hall1467/wikidata_usage_tracking | wbc_usage/utilities/determine_wikis.py | 1 | 2123 | """
Prints all wikis to stdout.
Usage:
determine_wikis (-h|--help)
determine_wikis [--debug]
[--verbose]
Options:
-h, --help This help message is printed
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import logging
import mwapi
import sys
import json
import docopt
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.WARNING if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
verbose = args['--verbose']
run(verbose)
# Contacts API to return list of wikis
# Code credit: https://github.com/WikiEducationFoundation/academic_classification/blob/master/pageclassifier/revgather.py
def run(verbose):
session = mwapi.Session(
'https://en.wikipedia.org',
user_agent='hall1467'
)
results = session.get(
action='sitematrix'
)
for database_dictionary in extract_query_results(results):
if verbose:
sys.stderr.write("Printing json for the database: " +
database_dictionary['dbname'] + "\n")
sys.stderr.flush()
sys.stdout.write(json.dumps(database_dictionary) + "\n")
# Code credit: https://github.com/WikiEducationFoundation/academic_classification/blob/master/pageclassifier/revgather.py
def extract_query_results(results):
results = results['sitematrix']
for entry in results:
if entry == 'count':
continue
if entry == 'specials':
for special_entry in results[entry]:
yield ({
"dbname" : special_entry['dbname'],
"wikiurl" : special_entry['url']
})
continue
for wiki in results[entry]['site']:
yield {
"dbname" : wiki['dbname'],
"wikiurl" : wiki['url']
}
| mit |
simod/geonode | geonode/layers/views.py | 1 | 58343 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
import sys
import logging
import shutil
import base64
import traceback
import uuid
import decimal
import re
from django.db.models import Q
from celery.exceptions import TimeoutError
from django.contrib.gis.geos import GEOSGeometry
from django.template.response import TemplateResponse
from requests import Request
from itertools import chain
from six import string_types
from owslib.wfs import WebFeatureService
from owslib.feature.schema import get_schema
from guardian.shortcuts import get_perms
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.conf import settings
from django.utils.translation import ugettext as _
from geonode import geoserver, qgis_server
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.utils.html import escape
from django.template.defaultfilters import slugify
from django.forms.models import inlineformset_factory
from django.db import transaction
from django.db.models import F
from django.forms.utils import ErrorList
from geonode.services.models import Service
from geonode.layers.forms import LayerForm, LayerUploadForm, NewLayerUploadForm, LayerAttributeForm
from geonode.base.forms import CategoryForm, TKeywordForm
from geonode.layers.models import Layer, Attribute, UploadSession
from geonode.base.enumerations import CHARSETS
from geonode.base.models import TopicCategory
from geonode.groups.models import GroupProfile
from geonode.utils import (resolve_object,
default_map_config,
check_ogc_backend,
llbbox_to_mercator,
bbox_to_projection,
GXPLayer,
GXPMap)
from geonode.layers.utils import file_upload, is_raster, is_vector
from geonode.people.forms import ProfileForm, PocForm
from geonode.security.views import _perms_info_json
from geonode.documents.models import get_related_documents
from geonode.utils import build_social_links
from geonode.base.views import batch_modify
from geonode.base.models import Thesaurus
from geonode.maps.models import Map
from geonode.geoserver.helpers import (gs_catalog,
ogc_server_settings,
set_layer_style) # cascading_delete
from .tasks import delete_layer
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
from geonode.geoserver.helpers import _render_thumbnail
if check_ogc_backend(qgis_server.BACKEND_PACKAGE):
from geonode.qgis_server.models import QGISServerLayer
CONTEXT_LOG_FILE = ogc_server_settings.LOG_FILE
logger = logging.getLogger("geonode.layers.views")
DEFAULT_SEARCH_BATCH_SIZE = 10
MAX_SEARCH_BATCH_SIZE = 25
GENERIC_UPLOAD_ERROR = _("There was an error while attempting to upload your data. \
Please try again, or contact and administrator if the problem continues.")
METADATA_UPLOADED_PRESERVE_ERROR = _("Note: this layer's orginal metadata was \
populated and preserved by importing a metadata XML file. This metadata cannot be edited.")
_PERMISSION_MSG_DELETE = _("You are not permitted to delete this layer")
_PERMISSION_MSG_GENERIC = _('You do not have permissions for this layer.')
_PERMISSION_MSG_MODIFY = _("You are not permitted to modify this layer")
_PERMISSION_MSG_METADATA = _(
"You are not permitted to modify this layer's metadata")
_PERMISSION_MSG_VIEW = _("You are not permitted to view this layer")
def log_snippet(log_file):
if not log_file or not os.path.isfile(log_file):
return "No log file at %s" % log_file
with open(log_file, "r") as f:
f.seek(0, 2) # Seek @ EOF
fsize = f.tell() # Get Size
f.seek(max(fsize - 10024, 0), 0) # Set pos @ last n chars
return f.read()
def _resolve_layer(request, alternate, permission='base.view_resourcebase',
msg=_PERMISSION_MSG_GENERIC, **kwargs):
"""
Resolve the layer by the provided typename (which may include service name) and check the optional permission.
"""
service_typename = alternate.split(":", 1)
if Service.objects.filter(name=service_typename[0]).exists():
service = Service.objects.filter(name=service_typename[0])
return resolve_object(
request,
Layer,
{
'alternate': service_typename[1] if service[0].method != "C" else alternate},
permission=permission,
permission_msg=msg,
**kwargs)
else:
return resolve_object(request,
Layer,
{'alternate': alternate},
permission=permission,
permission_msg=msg,
**kwargs)
# Basic Layer Views #
@login_required
def layer_upload(request, template='upload/layer_upload.html'):
if request.method == 'GET':
mosaics = Layer.objects.filter(is_mosaic=True).order_by('name')
ctx = {
'mosaics': mosaics,
'charsets': CHARSETS,
'is_layer': True,
}
return render(request, template, context=ctx)
elif request.method == 'POST':
name = None
form = NewLayerUploadForm(request.POST, request.FILES)
tempdir = None
saved_layer = None
errormsgs = []
out = {'success': False}
if form.is_valid():
title = form.cleaned_data["layer_title"]
# Replace dots in filename - GeoServer REST API upload bug
# and avoid any other invalid characters.
# Use the title if possible, otherwise default to the filename
if title is not None and len(title) > 0:
name_base = title
else:
name_base, __ = os.path.splitext(
form.cleaned_data["base_file"].name)
title = slugify(name_base.replace(".", "_"))
name = slugify(name_base.replace(".", "_"))
if form.cleaned_data["abstract"] is not None and len(
form.cleaned_data["abstract"]) > 0:
abstract = form.cleaned_data["abstract"]
else:
abstract = "No abstract provided."
try:
# Moved this inside the try/except block because it can raise
# exceptions when unicode characters are present.
# This should be followed up in upstream Django.
tempdir, base_file = form.write_files()
if not form.cleaned_data["style_upload_form"]:
saved_layer = file_upload(
base_file,
name=name,
user=request.user,
overwrite=False,
charset=form.cleaned_data["charset"],
abstract=abstract,
title=title,
metadata_uploaded_preserve=form.cleaned_data[
"metadata_uploaded_preserve"],
metadata_upload_form=form.cleaned_data["metadata_upload_form"])
else:
saved_layer = Layer.objects.get(alternate=title)
if not saved_layer:
msg = 'Failed to process. Could not find matching layer.'
raise Exception(msg)
sld = open(base_file).read()
set_layer_style(saved_layer, title, base_file, sld)
except Exception as e:
exception_type, error, tb = sys.exc_info()
logger.exception(e)
out['success'] = False
try:
out['errors'] = u''.join(error).encode('utf-8')
except BaseException:
try:
out['errors'] = str(error)
except BaseException:
try:
tb = traceback.format_exc()
out['errors'] = tb
except BaseException:
pass
# Assign the error message to the latest UploadSession from
# that user.
latest_uploads = UploadSession.objects.filter(
user=request.user).order_by('-date')
if latest_uploads.count() > 0:
upload_session = latest_uploads[0]
upload_session.error = str(error)
upload_session.traceback = traceback.format_exc(tb)
upload_session.context = log_snippet(CONTEXT_LOG_FILE)
upload_session.save()
out['traceback'] = upload_session.traceback
out['context'] = upload_session.context
out['upload_session'] = upload_session.id
else:
out['success'] = True
if hasattr(saved_layer, 'info'):
out['info'] = saved_layer.info
out['url'] = reverse(
'layer_detail', args=[
saved_layer.service_typename])
if hasattr(saved_layer, 'bbox_string'):
out['bbox'] = saved_layer.bbox_string
if hasattr(saved_layer, 'srid'):
out['crs'] = {
'type': 'name',
'properties': saved_layer.srid
}
out['ogc_backend'] = settings.OGC_SERVER['default']['BACKEND']
upload_session = saved_layer.upload_session
if upload_session:
upload_session.processed = True
upload_session.save()
permissions = form.cleaned_data["permissions"]
if permissions is not None and len(permissions.keys()) > 0:
saved_layer.set_permissions(permissions)
saved_layer.handle_moderated_uploads()
finally:
if tempdir is not None:
shutil.rmtree(tempdir)
else:
for e in form.errors.values():
errormsgs.extend([escape(v) for v in e])
out['errors'] = form.errors
out['errormsgs'] = errormsgs
if out['success']:
status_code = 200
else:
status_code = 400
if settings.MONITORING_ENABLED:
if saved_layer or name:
layer_name = saved_layer.alternate if hasattr(
saved_layer, 'alternate') else name
request.add_resource('layer', layer_name)
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=status_code)
def layer_detail(request, layername, template='layers/layer_detail.html'):
layer = _resolve_layer(
request,
layername,
'base.view_resourcebase',
_PERMISSION_MSG_VIEW)
# assert False, str(layer_bbox)
config = layer.attribute_config()
# Add required parameters for GXP lazy-loading
layer_bbox = layer.bbox[0:4]
bbox = layer_bbox[:]
bbox[0] = float(layer_bbox[0])
bbox[1] = float(layer_bbox[2])
bbox[2] = float(layer_bbox[1])
bbox[3] = float(layer_bbox[3])
def decimal_encode(bbox):
import decimal
_bbox = []
for o in [float(coord) for coord in bbox]:
if isinstance(o, decimal.Decimal):
o = (str(o) for o in [o])
_bbox.append(o)
return _bbox
def sld_definition(style):
from urllib import quote
_sld = {
"title": style.sld_title or style.name,
"legend": {
"height": "40",
"width": "22",
"href": layer.ows_url +
"?service=wms&request=GetLegendGraphic&format=image%2Fpng&width=20&height=20&layer=" +
quote(layer.service_typename, safe=''),
"format": "image/png"
},
"name": style.name
}
return _sld
if hasattr(layer, 'srid'):
config['crs'] = {
'type': 'name',
'properties': layer.srid
}
# Add required parameters for GXP lazy-loading
attribution = "%s %s" % (layer.owner.first_name,
layer.owner.last_name) if layer.owner.first_name or layer.owner.last_name else str(
layer.owner)
srs = getattr(settings, 'DEFAULT_MAP_CRS', 'EPSG:3857')
srs_srid = int(srs.split(":")[1]) if srs != "EPSG:900913" else 3857
config["attribution"] = "<span class='gx-attribution-title'>%s</span>" % attribution
config["format"] = getattr(
settings, 'DEFAULT_LAYER_FORMAT', 'image/png')
config["title"] = layer.title
config["wrapDateLine"] = True
config["visibility"] = True
config["srs"] = srs
config["bbox"] = decimal_encode(
bbox_to_projection([float(coord) for coord in layer_bbox] + [layer.srid, ],
target_srid=int(srs.split(":")[1]))[:4])
config["capability"] = {
"abstract": layer.abstract,
"name": layer.alternate,
"title": layer.title,
"queryable": True,
"storeType": layer.storeType,
"bbox": {
layer.srid: {
"srs": layer.srid,
"bbox": decimal_encode(bbox)
},
srs: {
"srs": srs,
"bbox": decimal_encode(
bbox_to_projection([float(coord) for coord in layer_bbox] + [layer.srid, ],
target_srid=srs_srid)[:4])
},
"EPSG:4326": {
"srs": "EPSG:4326",
"bbox": decimal_encode(bbox) if layer.srid == 'EPSG:4326' else
decimal_encode(bbox_to_projection(
[float(coord) for coord in layer_bbox] + [layer.srid, ], target_srid=4326)[:4])
},
"EPSG:900913": {
"srs": "EPSG:900913",
"bbox": decimal_encode(bbox) if layer.srid == 'EPSG:900913' else
decimal_encode(bbox_to_projection(
[float(coord) for coord in layer_bbox] + [layer.srid, ], target_srid=3857)[:4])
}
},
"srs": {
srs: True
},
"formats": ["image/png", "application/atom xml", "application/atom+xml", "application/json;type=utfgrid",
"application/openlayers", "application/pdf", "application/rss xml", "application/rss+xml",
"application/vnd.google-earth.kml", "application/vnd.google-earth.kml xml",
"application/vnd.google-earth.kml+xml", "application/vnd.google-earth.kml+xml;mode=networklink",
"application/vnd.google-earth.kmz", "application/vnd.google-earth.kmz xml",
"application/vnd.google-earth.kmz+xml", "application/vnd.google-earth.kmz;mode=networklink",
"atom", "image/geotiff", "image/geotiff8", "image/gif", "image/gif;subtype=animated",
"image/jpeg", "image/png8", "image/png; mode=8bit", "image/svg", "image/svg xml",
"image/svg+xml", "image/tiff", "image/tiff8", "image/vnd.jpeg-png",
"kml", "kmz", "openlayers", "rss", "text/html; subtype=openlayers", "utfgrid"],
"attribution": {
"title": attribution
},
"infoFormats": ["text/plain", "application/vnd.ogc.gml", "text/xml", "application/vnd.ogc.gml/3.1.1",
"text/xml; subtype=gml/3.1.1", "text/html", "application/json"],
"styles": [sld_definition(s) for s in layer.styles.all()],
"prefix": layer.alternate.split(":")[0] if ":" in layer.alternate else "",
"keywords": [k.name for k in layer.keywords.all()] if layer.keywords else [],
"llbbox": decimal_encode(bbox) if layer.srid == 'EPSG:4326' else
decimal_encode(bbox_to_projection(
[float(coord) for coord in layer_bbox] + [layer.srid, ], target_srid=4326)[:4])
}
all_times = None
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
from geonode.geoserver.views import get_capabilities
workspace, layername = layer.alternate.split(
":") if ":" in layer.alternate else (None, layer.alternate)
# WARNING Please make sure to have enabled DJANGO CACHE as per
# https://docs.djangoproject.com/en/2.0/topics/cache/#filesystem-caching
wms_capabilities_resp = get_capabilities(
request, layer.id, tolerant=True)
if wms_capabilities_resp.status_code >= 200 and wms_capabilities_resp.status_code < 400:
wms_capabilities = wms_capabilities_resp.getvalue()
if wms_capabilities:
import xml.etree.ElementTree as ET
e = ET.fromstring(wms_capabilities)
for atype in e.findall(
"./[Name='%s']/Extent[@name='time']" % (layername)):
dim_name = atype.get('name')
if dim_name:
dim_name = str(dim_name).lower()
if dim_name == 'time':
dim_values = atype.text
if dim_values:
all_times = dim_values.split(",")
break
if all_times:
config["capability"]["dimensions"] = {
"time": {
"name": "time",
"units": "ISO8601",
"unitsymbol": None,
"nearestVal": False,
"multipleVal": False,
"current": False,
"default": "current",
"values": all_times
}
}
if layer.storeType == "remoteStore":
service = layer.remote_service
source_params = {}
if service.type in ('REST_MAP', 'REST_IMG'):
source_params = {
"ptype": service.ptype,
"remote": True,
"url": service.service_url,
"name": service.name,
"title": "[R] %s" % service.title}
maplayer = GXPLayer(
name=layer.alternate,
ows_url=layer.ows_url,
layer_params=json.dumps(config),
source_params=json.dumps(source_params)
)
else:
maplayer = GXPLayer(
name=layer.alternate,
ows_url=layer.ows_url,
layer_params=json.dumps(config)
)
# Update count for popularity ranking,
# but do not includes admins or resource owners
layer.view_count_up(request.user)
# center/zoom don't matter; the viewer will center on the layer bounds
map_obj = GXPMap(
sender=Layer,
projection=getattr(
settings,
'DEFAULT_MAP_CRS',
'EPSG:3857'))
NON_WMS_BASE_LAYERS = [
la for la in default_map_config(request)[1] if la.ows_url is None]
metadata = layer.link_set.metadata().filter(
name__in=settings.DOWNLOAD_FORMATS_METADATA)
granules = None
all_granules = None
all_times = None
filter = None
if layer.is_mosaic:
try:
cat = gs_catalog
cat._cache.clear()
store = cat.get_store(layer.name)
coverages = cat.mosaic_coverages(store)
filter = None
try:
if request.GET["filter"]:
filter = request.GET["filter"]
except BaseException:
pass
offset = 10 * (request.page - 1)
granules = cat.mosaic_granules(
coverages['coverages']['coverage'][0]['name'],
store,
limit=10,
offset=offset,
filter=filter)
all_granules = cat.mosaic_granules(
coverages['coverages']['coverage'][0]['name'], store, filter=filter)
except BaseException:
granules = {"features": []}
all_granules = {"features": []}
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
from geonode.geoserver.views import get_capabilities
workspace, layername = layer.alternate.split(
":") if ":" in layer.alternate else (None, layer.alternate)
# WARNING Please make sure to have enabled DJANGO CACHE as per
# https://docs.djangoproject.com/en/2.0/topics/cache/#filesystem-caching
wms_capabilities_resp = get_capabilities(
request, layer.id, tolerant=True)
if wms_capabilities_resp.status_code >= 200 and wms_capabilities_resp.status_code < 400:
wms_capabilities = wms_capabilities_resp.getvalue()
if wms_capabilities:
import xml.etree.ElementTree as ET
e = ET.fromstring(wms_capabilities)
for atype in e.findall(
"./[Name='%s']/Extent[@name='time']" % (layername)):
dim_name = atype.get('name')
if dim_name:
dim_name = str(dim_name).lower()
if dim_name == 'time':
dim_values = atype.text
if dim_values:
all_times = dim_values.split(",")
break
group = None
if layer.group:
try:
group = GroupProfile.objects.get(slug=layer.group.name)
except GroupProfile.DoesNotExist:
group = None
# a flag to be used for qgis server
show_popup = False
if 'show_popup' in request.GET and request.GET["show_popup"]:
show_popup = True
context_dict = {
'resource': layer,
'group': group,
'perms_list': get_perms(request.user, layer.get_self_resource()),
"permissions_json": _perms_info_json(layer),
"documents": get_related_documents(layer),
"metadata": metadata,
"is_layer": True,
"wps_enabled": settings.OGC_SERVER['default']['WPS_ENABLED'],
"granules": granules,
"all_granules": all_granules,
"all_times": all_times,
"show_popup": show_popup,
"filter": filter,
"storeType": layer.storeType,
# "online": (layer.remote_service.probe == 200) if layer.storeType == "remoteStore" else True
}
if request and 'access_token' in request.session:
access_token = request.session['access_token']
else:
u = uuid.uuid1()
access_token = u.hex
context_dict["viewer"] = json.dumps(map_obj.viewer_json(
request, * (NON_WMS_BASE_LAYERS + [maplayer])))
context_dict["preview"] = getattr(
settings,
'GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY',
'geoext')
context_dict["crs"] = getattr(
settings,
'DEFAULT_MAP_CRS',
'EPSG:3857')
# provide bbox in EPSG:4326 for leaflet
if context_dict["preview"] == 'leaflet':
srid, wkt = layer.geographic_bounding_box.split(';')
srid = re.findall(r'\d+', srid)
geom = GEOSGeometry(wkt, srid=int(srid[0]))
geom.transform(4326)
context_dict["layer_bbox"] = ','.join([str(c) for c in geom.extent])
if layer.storeType == 'dataStore':
links = layer.link_set.download().filter(
Q(name__in=settings.DOWNLOAD_FORMATS_VECTOR) |
Q(link_type='original'))
else:
links = layer.link_set.download().filter(
Q(name__in=settings.DOWNLOAD_FORMATS_RASTER) |
Q(link_type='original'))
links_view = [item for idx, item in enumerate(links) if
item.url and 'wms' in item.url or 'gwc' in item.url]
links_download = [item for idx, item in enumerate(
links) if item.url and 'wms' not in item.url and 'gwc' not in item.url]
for item in links_view:
if item.url and access_token and 'access_token' not in item.url:
params = {'access_token': access_token}
item.url = Request('GET', item.url, params=params).prepare().url
for item in links_download:
if item.url and access_token and 'access_token' not in item.url:
params = {'access_token': access_token}
item.url = Request('GET', item.url, params=params).prepare().url
if request.user.has_perm('view_resourcebase', layer.get_self_resource()):
context_dict["links"] = links_view
if request.user.has_perm(
'download_resourcebase',
layer.get_self_resource()):
if layer.storeType == 'dataStore':
links = layer.link_set.download().filter(
name__in=settings.DOWNLOAD_FORMATS_VECTOR)
else:
links = layer.link_set.download().filter(
name__in=settings.DOWNLOAD_FORMATS_RASTER)
context_dict["links_download"] = links_download
if settings.SOCIAL_ORIGINS:
context_dict["social_links"] = build_social_links(request, layer)
layers_names = layer.alternate
try:
if settings.DEFAULT_WORKSPACE and settings.DEFAULT_WORKSPACE in layers_names:
workspace, name = layers_names.split(':', 1)
else:
name = layers_names
except BaseException:
logger.error("Can not identify workspace type and layername")
context_dict["layer_name"] = json.dumps(layers_names)
try:
# get type of layer (raster or vector)
if layer.storeType == 'coverageStore':
context_dict["layer_type"] = "raster"
elif layer.storeType == 'dataStore':
if layer.has_time:
context_dict["layer_type"] = "vector_time"
else:
context_dict["layer_type"] = "vector"
location = "{location}{service}".format(** {
'location': settings.OGC_SERVER['default']['LOCATION'],
'service': 'wms',
})
# get schema for specific layer
username = settings.OGC_SERVER['default']['USER']
password = settings.OGC_SERVER['default']['PASSWORD']
schema = get_schema(
location,
name,
username=username,
password=password)
# get the name of the column which holds the geometry
if 'the_geom' in schema['properties']:
schema['properties'].pop('the_geom', None)
elif 'geom' in schema['properties']:
schema['properties'].pop("geom", None)
# filter the schema dict based on the values of layers_attributes
layer_attributes_schema = []
for key in schema['properties'].keys():
layer_attributes_schema.append(key)
filtered_attributes = layer_attributes_schema
context_dict["schema"] = schema
context_dict["filtered_attributes"] = filtered_attributes
except BaseException:
logger.error(
"Possible error with OWSLib. Turning all available properties to string")
if settings.GEOTIFF_IO_ENABLED:
from geonode.contrib.geotiffio import create_geotiff_io_url
context_dict["link_geotiff_io"] = create_geotiff_io_url(layer, access_token)
# maps owned by user needed to fill the "add to existing map section" in template
if request.user.is_authenticated():
context_dict["maps"] = Map.objects.filter(owner=request.user)
return TemplateResponse(
request, template, context=context_dict)
# Loads the data using the OWS lib when the "Do you want to filter it"
# button is clicked.
def load_layer_data(request, template='layers/layer_detail.html'):
context_dict = {}
data_dict = json.loads(request.POST.get('json_data'))
layername = data_dict['layer_name']
filtered_attributes = [x for x in data_dict['filtered_attributes'] if '/load_layer_data' not in x]
workspace, name = layername.split(':')
location = "{location}{service}".format(** {
'location': settings.OGC_SERVER['default']['LOCATION'],
'service': 'wms',
})
try:
# TODO: should be improved by using OAuth2 token (or at least user
# related to it) instead of super-powers
username = settings.OGC_SERVER['default']['USER']
password = settings.OGC_SERVER['default']['PASSWORD']
wfs = WebFeatureService(
location,
version='1.1.0',
username=username,
password=password)
response = wfs.getfeature(
typename=name,
propertyname=filtered_attributes,
outputFormat='application/json')
x = response.read()
x = json.loads(x)
features_response = json.dumps(x)
decoded = json.loads(features_response)
decoded_features = decoded['features']
properties = {}
for key in decoded_features[0]['properties']:
properties[key] = []
# loop the dictionary based on the values on the list and add the properties
# in the dictionary (if doesn't exist) together with the value
from collections import Iterable
for i in range(len(decoded_features)):
for key, value in decoded_features[i]['properties'].iteritems():
if value != '' and isinstance(value, (string_types, int, float)) and (
(isinstance(value, Iterable) and '/load_layer_data' not in value) or value):
properties[key].append(value)
for key in properties:
properties[key] = list(set(properties[key]))
properties[key].sort()
context_dict["feature_properties"] = properties
except BaseException:
import traceback
traceback.print_exc()
logger.error("Possible error with OWSLib.")
return HttpResponse(json.dumps(context_dict),
content_type="application/json")
def layer_feature_catalogue(
request,
layername,
template='../../catalogue/templates/catalogue/feature_catalogue.xml'):
layer = _resolve_layer(request, layername)
if layer.storeType != 'dataStore':
out = {
'success': False,
'errors': 'layer is not a feature type'
}
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=400)
attributes = []
for attrset in layer.attribute_set.order_by('display_order'):
attr = {
'name': attrset.attribute,
'type': attrset.attribute_type
}
attributes.append(attr)
context_dict = {
'layer': layer,
'attributes': attributes,
'metadata': settings.PYCSW['CONFIGURATION']['metadata:main']
}
return render(
request,
template,
context=context_dict,
content_type='application/xml')
@login_required
def layer_metadata(
request,
layername,
template='layers/layer_metadata.html',
ajax=True):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase_metadata',
_PERMISSION_MSG_METADATA)
layer_attribute_set = inlineformset_factory(
Layer,
Attribute,
extra=0,
form=LayerAttributeForm,
)
topic_category = layer.category
poc = layer.poc
metadata_author = layer.metadata_author
# assert False, str(layer_bbox)
config = layer.attribute_config()
# Add required parameters for GXP lazy-loading
layer_bbox = layer.bbox
bbox = [float(coord) for coord in list(layer_bbox[0:4])]
if hasattr(layer, 'srid'):
config['crs'] = {
'type': 'name',
'properties': layer.srid
}
config["srs"] = getattr(settings, 'DEFAULT_MAP_CRS', 'EPSG:3857')
config["bbox"] = bbox if config["srs"] != 'EPSG:3857' \
else llbbox_to_mercator([float(coord) for coord in bbox])
config["title"] = layer.title
config["queryable"] = True
if layer.storeType == "remoteStore":
service = layer.remote_service
source_params = {}
if service.type in ('REST_MAP', 'REST_IMG'):
source_params = {
"ptype": service.ptype,
"remote": True,
"url": service.service_url,
"name": service.name,
"title": "[R] %s" % service.title}
maplayer = GXPLayer(
name=layer.alternate,
ows_url=layer.ows_url,
layer_params=json.dumps(config),
source_params=json.dumps(source_params)
)
else:
maplayer = GXPLayer(
name=layer.alternate,
ows_url=layer.ows_url,
layer_params=json.dumps(config))
# Update count for popularity ranking,
# but do not includes admins or resource owners
if request.user != layer.owner and not request.user.is_superuser:
Layer.objects.filter(
id=layer.id).update(popular_count=F('popular_count') + 1)
# center/zoom don't matter; the viewer will center on the layer bounds
map_obj = GXPMap(
projection=getattr(
settings,
'DEFAULT_MAP_CRS',
'EPSG:3857'))
NON_WMS_BASE_LAYERS = [
la for la in default_map_config(request)[1] if la.ows_url is None]
if request.method == "POST":
if layer.metadata_uploaded_preserve: # layer metadata cannot be edited
out = {
'success': False,
'errors': METADATA_UPLOADED_PRESERVE_ERROR
}
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=400)
layer_form = LayerForm(request.POST, instance=layer, prefix="resource")
if not layer_form.is_valid():
out = {
'success': False,
'errors': layer_form.errors
}
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=400)
attribute_form = layer_attribute_set(
request.POST,
instance=layer,
prefix="layer_attribute_set",
queryset=Attribute.objects.order_by('display_order'))
category_form = CategoryForm(request.POST, prefix="category_choice_field", initial=int(
request.POST["category_choice_field"]) if "category_choice_field" in request.POST else None)
tkeywords_form = TKeywordForm(
request.POST,
prefix="tkeywords")
else:
layer_form = LayerForm(instance=layer, prefix="resource")
attribute_form = layer_attribute_set(
instance=layer,
prefix="layer_attribute_set",
queryset=Attribute.objects.order_by('display_order'))
category_form = CategoryForm(
prefix="category_choice_field",
initial=topic_category.id if topic_category else None)
# Keywords from THESAURI management
layer_tkeywords = layer.tkeywords.all()
tkeywords_list = ''
lang = 'en' # TODO: use user's language
if layer_tkeywords and len(layer_tkeywords) > 0:
tkeywords_ids = layer_tkeywords.values_list('id', flat=True)
if hasattr(settings, 'THESAURI'):
for el in settings.THESAURI:
thesaurus_name = el['name']
try:
t = Thesaurus.objects.get(identifier=thesaurus_name)
for tk in t.thesaurus.filter(pk__in=tkeywords_ids):
tkl = tk.keyword.filter(lang=lang)
if len(tkl) > 0:
tkl_ids = ",".join(
map(str, tkl.values_list('id', flat=True)))
tkeywords_list += "," + \
tkl_ids if len(
tkeywords_list) > 0 else tkl_ids
except BaseException:
tb = traceback.format_exc()
logger.error(tb)
tkeywords_form = TKeywordForm(
prefix="tkeywords",
initial={'tkeywords': tkeywords_list})
if request.method == "POST" and layer_form.is_valid() and attribute_form.is_valid(
) and category_form.is_valid() and tkeywords_form.is_valid():
new_poc = layer_form.cleaned_data['poc']
new_author = layer_form.cleaned_data['metadata_author']
if new_poc is None:
if poc is None:
poc_form = ProfileForm(
request.POST,
prefix="poc",
instance=poc)
else:
poc_form = ProfileForm(request.POST, prefix="poc")
if poc_form.is_valid():
if len(poc_form.cleaned_data['profile']) == 0:
# FIXME use form.add_error in django > 1.7
errors = poc_form._errors.setdefault(
'profile', ErrorList())
errors.append(
_('You must set a point of contact for this resource'))
poc = None
if poc_form.has_changed and poc_form.is_valid():
new_poc = poc_form.save()
if new_author is None:
if metadata_author is None:
author_form = ProfileForm(request.POST, prefix="author",
instance=metadata_author)
else:
author_form = ProfileForm(request.POST, prefix="author")
if author_form.is_valid():
if len(author_form.cleaned_data['profile']) == 0:
# FIXME use form.add_error in django > 1.7
errors = author_form._errors.setdefault(
'profile', ErrorList())
errors.append(
_('You must set an author for this resource'))
metadata_author = None
if author_form.has_changed and author_form.is_valid():
new_author = author_form.save()
new_category = TopicCategory.objects.get(
id=category_form.cleaned_data['category_choice_field'])
for form in attribute_form.cleaned_data:
la = Attribute.objects.get(id=int(form['id'].id))
la.description = form["description"]
la.attribute_label = form["attribute_label"]
la.visible = True if form["attribute_label"] else False # form["visible"]
la.display_order = form["display_order"]
la.save()
if new_poc is not None or new_author is not None:
if new_poc is not None:
layer.poc = new_poc
if new_author is not None:
layer.metadata_author = new_author
new_keywords = layer_form.cleaned_data['keywords']
if new_keywords is not None:
layer.keywords.clear()
layer.keywords.add(*new_keywords)
new_regions = [x.strip() for x in layer_form.cleaned_data['regions']]
if new_regions is not None:
layer.regions.clear()
layer.regions.add(*new_regions)
the_layer = layer_form.instance
the_layer.save()
up_sessions = UploadSession.objects.filter(layer=the_layer.id)
if up_sessions.count() > 0 and up_sessions[0].user != the_layer.owner:
up_sessions.update(user=the_layer.owner)
if new_category is not None:
Layer.objects.filter(id=the_layer.id).update(
category=new_category
)
if getattr(settings, 'SLACK_ENABLED', False):
try:
from geonode.contrib.slack.utils import build_slack_message_layer, send_slack_messages
send_slack_messages(
build_slack_message_layer(
"layer_edit", the_layer))
except BaseException:
logger.error("Could not send slack message.")
if not ajax:
return HttpResponseRedirect(
reverse(
'layer_detail',
args=(
layer.service_typename,
)))
message = layer.alternate
try:
# Keywords from THESAURI management
tkeywords_to_add = []
tkeywords_cleaned = tkeywords_form.clean()
if tkeywords_cleaned and len(tkeywords_cleaned) > 0:
tkeywords_ids = []
for i, val in enumerate(tkeywords_cleaned):
try:
cleaned_data = [value for key, value in tkeywords_cleaned[i].items(
) if 'tkeywords-tkeywords' in key.lower() and 'autocomplete' not in key.lower()]
tkeywords_ids.extend(map(int, cleaned_data[0]))
except BaseException:
pass
if hasattr(settings, 'THESAURI'):
for el in settings.THESAURI:
thesaurus_name = el['name']
try:
t = Thesaurus.objects.get(
identifier=thesaurus_name)
for tk in t.thesaurus.all():
tkl = tk.keyword.filter(pk__in=tkeywords_ids)
if len(tkl) > 0:
tkeywords_to_add.append(tkl[0].keyword_id)
except BaseException:
tb = traceback.format_exc()
logger.error(tb)
layer.tkeywords.add(*tkeywords_to_add)
except BaseException:
tb = traceback.format_exc()
logger.error(tb)
return HttpResponse(json.dumps({'message': message}))
if settings.ADMIN_MODERATE_UPLOADS:
if not request.user.is_superuser:
layer_form.fields['is_published'].widget.attrs.update(
{'disabled': 'true'})
can_change_metadata = request.user.has_perm(
'change_resourcebase_metadata',
layer.get_self_resource())
try:
is_manager = request.user.groupmember_set.all().filter(role='manager').exists()
except BaseException:
is_manager = False
if not is_manager or not can_change_metadata:
layer_form.fields['is_approved'].widget.attrs.update(
{'disabled': 'true'})
if poc is not None:
layer_form.fields['poc'].initial = poc.id
poc_form = ProfileForm(prefix="poc")
poc_form.hidden = True
else:
poc_form = ProfileForm(prefix="poc")
poc_form.hidden = False
if metadata_author is not None:
layer_form.fields['metadata_author'].initial = metadata_author.id
author_form = ProfileForm(prefix="author")
author_form.hidden = True
else:
author_form = ProfileForm(prefix="author")
author_form.hidden = False
viewer = json.dumps(map_obj.viewer_json(
request, * (NON_WMS_BASE_LAYERS + [maplayer])))
metadataxsl = False
if "geonode.contrib.metadataxsl" in settings.INSTALLED_APPS:
metadataxsl = True
metadata_author_groups = []
if request.user.is_superuser or request.user.is_staff:
metadata_author_groups = GroupProfile.objects.all()
else:
try:
all_metadata_author_groups = chain(
request.user.group_list_all().distinct(),
GroupProfile.objects.exclude(
access="private").exclude(access="public-invite"))
except BaseException:
all_metadata_author_groups = GroupProfile.objects.exclude(
access="private").exclude(access="public-invite")
[metadata_author_groups.append(item) for item in all_metadata_author_groups
if item not in metadata_author_groups]
return render(request, template, context={
"resource": layer,
"layer": layer,
"layer_form": layer_form,
"poc_form": poc_form,
"author_form": author_form,
"attribute_form": attribute_form,
"category_form": category_form,
"tkeywords_form": tkeywords_form,
"viewer": viewer,
"preview": getattr(settings, 'GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY', 'geoext'),
"crs": getattr(settings, 'DEFAULT_MAP_CRS', 'EPSG:3857'),
"metadataxsl": metadataxsl,
"freetext_readonly": getattr(
settings,
'FREETEXT_KEYWORDS_READONLY',
False),
"metadata_author_groups": metadata_author_groups,
"GROUP_MANDATORY_RESOURCES":
getattr(settings, 'GROUP_MANDATORY_RESOURCES', False),
})
@login_required
def layer_metadata_advanced(request, layername):
return layer_metadata(
request,
layername,
template='layers/layer_metadata_advanced.html')
@login_required
def layer_change_poc(request, ids, template='layers/layer_change_poc.html'):
layers = Layer.objects.filter(id__in=ids.split('_'))
if settings.MONITORING_ENABLED:
for _l in layers:
if hasattr(_l, 'alternate'):
request.add_resource('layer', _l.alternate)
if request.method == 'POST':
form = PocForm(request.POST)
if form.is_valid():
for layer in layers:
layer.poc = form.cleaned_data['contact']
layer.save()
# Process the data in form.cleaned_data
# ...
# Redirect after POST
return HttpResponseRedirect('/admin/maps/layer')
else:
form = PocForm() # An unbound form
return render(
request, template, context={'layers': layers, 'form': form})
@login_required
def layer_replace(request, layername, template='layers/layer_replace.html'):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_MODIFY)
if request.method == 'GET':
ctx = {
'charsets': CHARSETS,
'resource': layer,
'is_featuretype': layer.is_vector(),
'is_layer': True,
}
return render(request, template, context=ctx)
elif request.method == 'POST':
form = LayerUploadForm(request.POST, request.FILES)
tempdir = None
out = {}
if form.is_valid():
try:
tempdir, base_file = form.write_files()
if layer.is_vector() and is_raster(base_file):
out['success'] = False
out['errors'] = _(
"You are attempting to replace a vector layer with a raster.")
elif (not layer.is_vector()) and is_vector(base_file):
out['success'] = False
out['errors'] = _(
"You are attempting to replace a raster layer with a vector.")
else:
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
# delete geoserver's store before upload
# cascading_delete(gs_catalog, layer.alternate)
out['ogc_backend'] = geoserver.BACKEND_PACKAGE
elif check_ogc_backend(qgis_server.BACKEND_PACKAGE):
try:
qgis_layer = QGISServerLayer.objects.get(
layer=layer)
qgis_layer.delete()
except QGISServerLayer.DoesNotExist:
pass
out['ogc_backend'] = qgis_server.BACKEND_PACKAGE
saved_layer = file_upload(
base_file,
title=layer.title,
abstract=layer.abstract,
is_approved=layer.is_approved,
is_published=layer.is_published,
name=layer.name,
user=layer.owner,
# user=request.user,
license=layer.license.name if layer.license else None,
category=layer.category,
keywords=list(layer.keywords.all()),
regions=list(layer.regions.values_list('name', flat=True)),
# date=layer.date,
overwrite=True,
charset=form.cleaned_data["charset"],
)
out['success'] = True
out['url'] = reverse(
'layer_detail', args=[
saved_layer.service_typename])
except BaseException as e:
logger.exception(e)
tb = traceback.format_exc()
out['success'] = False
out['errors'] = str(tb)
finally:
if tempdir is not None:
shutil.rmtree(tempdir)
else:
errormsgs = []
for e in form.errors.values():
errormsgs.append([escape(v) for v in e])
out['errors'] = form.errors
out['errormsgs'] = errormsgs
if out['success']:
status_code = 200
else:
status_code = 400
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=status_code)
@login_required
def layer_remove(request, layername, template='layers/layer_remove.html'):
layer = _resolve_layer(
request,
layername,
'base.delete_resourcebase',
_PERMISSION_MSG_DELETE)
if (request.method == 'GET'):
return render(request, template, context={
"layer": layer
})
if (request.method == 'POST'):
try:
with transaction.atomic():
# Using Tastypie
# from geonode.api.resourcebase_api import LayerResource
# res = LayerResource()
# request_bundle = res.build_bundle(request=request)
# layer_bundle = res.build_bundle(request=request, obj=layer)
# layer_json = res.serialize(None,
# res.full_dehydrate(layer_bundle),
# "application/json")
# delete_layer.delay(instance=layer_json)
result = delete_layer.delay(layer_id=layer.id)
result.wait(10)
except TimeoutError:
# traceback.print_exc()
pass
except Exception as e:
traceback.print_exc()
message = '{0}: {1}.'.format(
_('Unable to delete layer'), layer.alternate)
if 'referenced by layer group' in getattr(e, 'message', ''):
message = _(
'This layer is a member of a layer group, you must remove the layer from the group '
'before deleting.')
messages.error(request, message)
return render(
request, template, context={"layer": layer})
return HttpResponseRedirect(reverse("layer_browse"))
else:
return HttpResponse("Not allowed", status=403)
@login_required
def layer_granule_remove(
request,
granule_id,
layername,
template='layers/layer_granule_remove.html'):
layer = _resolve_layer(
request,
layername,
'base.delete_resourcebase',
_PERMISSION_MSG_DELETE)
if (request.method == 'GET'):
return render(request, template, context={
"granule_id": granule_id,
"layer": layer
})
if (request.method == 'POST'):
try:
cat = gs_catalog
cat._cache.clear()
store = cat.get_store(layer.name)
coverages = cat.mosaic_coverages(store)
cat.mosaic_delete_granule(
coverages['coverages']['coverage'][0]['name'], store, granule_id)
except Exception as e:
traceback.print_exc()
message = '{0}: {1}.'.format(
_('Unable to delete layer'), layer.alternate)
if 'referenced by layer group' in getattr(e, 'message', ''):
message = _(
'This layer is a member of a layer group, you must remove the layer from the group '
'before deleting.')
messages.error(request, message)
return render(
request, template, context={"layer": layer})
return HttpResponseRedirect(
reverse(
'layer_detail', args=(
layer.service_typename,)))
else:
return HttpResponse("Not allowed", status=403)
def layer_thumbnail(request, layername):
if request.method == 'POST':
layer_obj = _resolve_layer(request, layername)
try:
try:
preview = json.loads(request.body).get('preview', None)
except BaseException:
preview = None
if preview and preview == 'react':
format, image = json.loads(
request.body)['image'].split(';base64,')
image = base64.b64decode(image)
else:
image = _render_thumbnail(request.body)
if not image:
return
filename = "layer-%s-thumb.png" % layer_obj.uuid
layer_obj.save_thumbnail(filename, image)
return HttpResponse('Thumbnail saved')
except BaseException:
return HttpResponse(
content='error saving thumbnail',
status=500,
content_type='text/plain'
)
def get_layer(request, layername):
"""Get Layer object as JSON"""
# Function to treat Decimal in json.dumps.
# http://stackoverflow.com/a/16957370/1198772
def decimal_default(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
logger.debug('Call get layer')
if request.method == 'GET':
layer_obj = _resolve_layer(request, layername)
logger.debug(layername)
response = {
'typename': layername,
'name': layer_obj.name,
'title': layer_obj.title,
'url': layer_obj.get_tiles_url(),
'bbox_string': layer_obj.bbox_string,
'bbox_x0': layer_obj.bbox_x0,
'bbox_x1': layer_obj.bbox_x1,
'bbox_y0': layer_obj.bbox_y0,
'bbox_y1': layer_obj.bbox_y1,
}
return HttpResponse(json.dumps(
response,
ensure_ascii=False,
default=decimal_default
),
content_type='application/javascript')
def layer_metadata_detail(
request,
layername,
template='layers/layer_metadata_detail.html'):
layer = _resolve_layer(
request,
layername,
'view_resourcebase',
_PERMISSION_MSG_METADATA)
group = None
if layer.group:
try:
group = GroupProfile.objects.get(slug=layer.group.name)
except GroupProfile.DoesNotExist:
group = None
site_url = settings.SITEURL.rstrip('/') if settings.SITEURL.startswith('http') else settings.SITEURL
return render(request, template, context={
"resource": layer,
"group": group,
'SITEURL': site_url
})
def layer_metadata_upload(
request,
layername,
template='layers/layer_metadata_upload.html'):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_METADATA)
site_url = settings.SITEURL.rstrip('/') if settings.SITEURL.startswith('http') else settings.SITEURL
return render(request, template, context={
"resource": layer,
"layer": layer,
'SITEURL': site_url
})
def layer_sld_upload(
request,
layername,
template='layers/layer_style_upload.html'):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_METADATA)
site_url = settings.SITEURL.rstrip('/') if settings.SITEURL.startswith('http') else settings.SITEURL
return render(request, template, context={
"resource": layer,
"layer": layer,
'SITEURL': site_url
})
@login_required
def layer_batch_metadata(request, ids):
return batch_modify(request, ids, 'Layer')
def layer_view_counter(layer_id, viewer):
_l = Layer.objects.get(id=layer_id)
_u = get_user_model().objects.get(username=viewer)
_l.view_count_up(_u, do_local=True)
| gpl-3.0 |
Helais/herostats | lib/heroprotocol/protocol36693.py | 18 | 29680 | # Copyright (c) 2015 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,5)]), #2
('_int',[(0,6)]), #3
('_int',[(0,14)]), #4
('_int',[(0,22)]), #5
('_int',[(0,32)]), #6
('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7
('_struct',[[('m_userId',2,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11
('_int',[(0,3)]), #12
('_bool',[]), #13
('_array',[(16,0),10]), #14
('_optional',[14]), #15
('_blob',[(16,0)]), #16
('_struct',[[('m_dataDeprecated',15,0),('m_data',16,1)]]), #17
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4),('m_ngdpRootKey',17,5),('m_dataBuildNum',6,6)]]), #18
('_fourcc',[]), #19
('_blob',[(0,7)]), #20
('_int',[(0,64)]), #21
('_struct',[[('m_region',10,0),('m_programId',19,1),('m_realm',6,2),('m_name',20,3),('m_id',21,4)]]), #22
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #23
('_int',[(0,2)]), #24
('_optional',[10]), #25
('_struct',[[('m_name',9,0),('m_toon',22,1),('m_race',9,2),('m_color',23,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',24,7),('m_result',24,8),('m_workingSetSlotId',25,9),('m_hero',9,10)]]), #26
('_array',[(0,5),26]), #27
('_optional',[27]), #28
('_blob',[(0,10)]), #29
('_blob',[(0,11)]), #30
('_struct',[[('m_file',30,0)]]), #31
('_optional',[13]), #32
('_int',[(-9223372036854775808,64)]), #33
('_blob',[(0,12)]), #34
('_blob',[(40,0)]), #35
('_array',[(0,6),35]), #36
('_optional',[36]), #37
('_array',[(0,6),30]), #38
('_optional',[38]), #39
('_struct',[[('m_playerList',28,0),('m_title',29,1),('m_difficulty',9,2),('m_thumbnail',31,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',32,16),('m_timeUTC',33,5),('m_timeLocalOffset',33,6),('m_description',34,7),('m_imageFilePath',30,8),('m_campaignIndex',10,15),('m_mapFileName',30,9),('m_cacheHandles',37,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',39,14)]]), #40
('_optional',[9]), #41
('_optional',[35]), #42
('_optional',[6]), #43
('_struct',[[('m_race',25,-1)]]), #44
('_struct',[[('m_team',25,-1)]]), #45
('_blob',[(0,9)]), #46
('_struct',[[('m_name',9,-18),('m_clanTag',41,-17),('m_clanLogo',42,-16),('m_highestLeague',25,-15),('m_combinedRaceLevels',43,-14),('m_randomSeed',6,-13),('m_racePreference',44,-12),('m_teamPreference',45,-11),('m_testMap',13,-10),('m_testAuto',13,-9),('m_examine',13,-8),('m_customInterface',13,-7),('m_testType',6,-6),('m_observe',24,-5),('m_hero',46,-4),('m_skin',46,-3),('m_mount',46,-2),('m_toonHandle',20,-1)]]), #47
('_array',[(0,5),47]), #48
('_struct',[[('m_lockTeams',13,-16),('m_teamsTogether',13,-15),('m_advancedSharedControl',13,-14),('m_randomRaces',13,-13),('m_battleNet',13,-12),('m_amm',13,-11),('m_ranked',13,-10),('m_competitive',13,-9),('m_practice',13,-8),('m_cooperative',13,-7),('m_noVictoryOrDefeat',13,-6),('m_heroDuplicatesAllowed',13,-5),('m_fog',24,-4),('m_observers',24,-3),('m_userDifficulty',24,-2),('m_clientDebugFlags',21,-1)]]), #49
('_int',[(1,4)]), #50
('_int',[(1,8)]), #51
('_bitarray',[(0,6)]), #52
('_bitarray',[(0,8)]), #53
('_bitarray',[(0,2)]), #54
('_bitarray',[(0,7)]), #55
('_struct',[[('m_allowedColors',52,-6),('m_allowedRaces',53,-5),('m_allowedDifficulty',52,-4),('m_allowedControls',53,-3),('m_allowedObserveTypes',54,-2),('m_allowedAIBuilds',55,-1)]]), #56
('_array',[(0,5),56]), #57
('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',29,-25),('m_gameOptions',49,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',50,-18),('m_maxColors',3,-17),('m_maxRaces',51,-16),('m_maxControls',10,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',30,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',57,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',36,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #58
('_optional',[1]), #59
('_optional',[2]), #60
('_struct',[[('m_color',60,-1)]]), #61
('_array',[(0,4),46]), #62
('_array',[(0,17),6]), #63
('_array',[(0,9),6]), #64
('_struct',[[('m_control',10,-21),('m_userId',59,-20),('m_teamId',1,-19),('m_colorPref',61,-18),('m_racePref',44,-17),('m_difficulty',3,-16),('m_aiBuild',0,-15),('m_handicap',0,-14),('m_observe',24,-13),('m_logoIndex',6,-12),('m_hero',46,-11),('m_skin',46,-10),('m_mount',46,-9),('m_artifacts',62,-8),('m_workingSetSlotId',25,-7),('m_rewards',63,-6),('m_toonHandle',20,-5),('m_licenses',64,-4),('m_tandemLeaderUserId',59,-3),('m_commander',46,-2),('m_commanderLevel',6,-1)]]), #65
('_array',[(0,5),65]), #66
('_struct',[[('m_phase',12,-11),('m_maxUsers',2,-10),('m_maxObservers',2,-9),('m_slots',66,-8),('m_randomSeed',6,-7),('m_hostUserId',59,-6),('m_isSinglePlayer',13,-5),('m_pickedMapTag',10,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #67
('_struct',[[('m_userInitialData',48,-3),('m_gameDescription',58,-2),('m_lobbyState',67,-1)]]), #68
('_struct',[[('m_syncLobbyState',68,-1)]]), #69
('_struct',[[('m_name',20,-1)]]), #70
('_blob',[(0,6)]), #71
('_struct',[[('m_name',71,-1)]]), #72
('_struct',[[('m_name',71,-3),('m_type',6,-2),('m_data',20,-1)]]), #73
('_struct',[[('m_type',6,-3),('m_name',71,-2),('m_data',34,-1)]]), #74
('_array',[(0,5),10]), #75
('_struct',[[('m_signature',75,-2),('m_toonHandle',20,-1)]]), #76
('_struct',[[('m_gameFullyDownloaded',13,-15),('m_developmentCheatsEnabled',13,-14),('m_testCheatsEnabled',13,-13),('m_multiplayerCheatsEnabled',13,-12),('m_syncChecksummingEnabled',13,-11),('m_isMapToMapTransition',13,-10),('m_startingRally',13,-9),('m_debugPauseEnabled',13,-8),('m_useGalaxyAsserts',13,-7),('m_platformMac',13,-6),('m_cameraFollow',13,-5),('m_baseBuildNum',6,-4),('m_buildNum',6,-3),('m_versionFlags',6,-2),('m_hotkeyProfile',46,-1)]]), #77
('_struct',[[]]), #78
('_int',[(0,16)]), #79
('_struct',[[('x',79,-2),('y',79,-1)]]), #80
('_struct',[[('m_which',12,-2),('m_target',80,-1)]]), #81
('_struct',[[('m_fileName',30,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',29,-1)]]), #82
('_struct',[[('m_sequence',6,-1)]]), #83
('_int',[(-2147483648,32)]), #84
('_struct',[[('x',84,-2),('y',84,-1)]]), #85
('_struct',[[('m_point',85,-4),('m_time',84,-3),('m_verb',29,-2),('m_arguments',29,-1)]]), #86
('_struct',[[('m_data',86,-1)]]), #87
('_int',[(0,23)]), #88
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',25,-1)]]), #89
('_optional',[89]), #90
('_null',[]), #91
('_int',[(0,20)]), #92
('_struct',[[('x',92,-3),('y',92,-2),('z',84,-1)]]), #93
('_struct',[[('m_targetUnitFlags',79,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',79,-4),('m_snapshotControlPlayerId',59,-3),('m_snapshotUpkeepPlayerId',59,-2),('m_snapshotPoint',93,-1)]]), #94
('_choice',[(0,2),{0:('None',91),1:('TargetPoint',93),2:('TargetUnit',94),3:('Data',6)}]), #95
('_int',[(1,32)]), #96
('_struct',[[('m_cmdFlags',88,-6),('m_abil',90,-5),('m_data',95,-4),('m_sequence',96,-3),('m_otherUnit',43,-2),('m_unitGroup',43,-1)]]), #97
('_int',[(0,9)]), #98
('_bitarray',[(0,9)]), #99
('_array',[(0,9),98]), #100
('_choice',[(0,2),{0:('None',91),1:('Mask',99),2:('OneIndices',100),3:('ZeroIndices',100)}]), #101
('_struct',[[('m_unitLink',79,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',98,-1)]]), #102
('_array',[(0,9),102]), #103
('_struct',[[('m_subgroupIndex',98,-4),('m_removeMask',101,-3),('m_addSubgroups',103,-2),('m_addUnitTags',64,-1)]]), #104
('_struct',[[('m_controlGroupId',1,-2),('m_delta',104,-1)]]), #105
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',24,-2),('m_mask',101,-1)]]), #106
('_struct',[[('m_count',98,-6),('m_subgroupCount',98,-5),('m_activeSubgroupIndex',98,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #107
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',107,-1)]]), #108
('_array',[(0,3),84]), #109
('_struct',[[('m_recipientId',1,-2),('m_resources',109,-1)]]), #110
('_struct',[[('m_chatMessage',29,-1)]]), #111
('_int',[(-128,8)]), #112
('_struct',[[('x',84,-3),('y',84,-2),('z',84,-1)]]), #113
('_struct',[[('m_beacon',112,-9),('m_ally',112,-8),('m_flags',112,-7),('m_build',112,-6),('m_targetUnitTag',6,-5),('m_targetUnitSnapshotUnitLink',79,-4),('m_targetUnitSnapshotUpkeepPlayerId',112,-3),('m_targetUnitSnapshotControlPlayerId',112,-2),('m_targetPoint',113,-1)]]), #114
('_struct',[[('m_speed',12,-1)]]), #115
('_struct',[[('m_delta',112,-1)]]), #116
('_struct',[[('m_point',85,-4),('m_unit',6,-3),('m_pingedMinimap',13,-2),('m_option',84,-1)]]), #117
('_struct',[[('m_verb',29,-2),('m_arguments',29,-1)]]), #118
('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #119
('_struct',[[('m_unitTag',6,-1)]]), #120
('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #121
('_struct',[[('m_conversationId',84,-2),('m_replyId',84,-1)]]), #122
('_optional',[20]), #123
('_struct',[[('m_gameUserId',1,-6),('m_observe',24,-5),('m_name',9,-4),('m_toonHandle',123,-3),('m_clanTag',41,-2),('m_clanLogo',42,-1)]]), #124
('_array',[(0,5),124]), #125
('_int',[(0,1)]), #126
('_struct',[[('m_userInfos',125,-2),('m_method',126,-1)]]), #127
('_struct',[[('m_purchaseItemId',84,-1)]]), #128
('_struct',[[('m_difficultyLevel',84,-1)]]), #129
('_choice',[(0,3),{0:('None',91),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',84),4:('TextChanged',30),5:('MouseButton',6)}]), #130
('_struct',[[('m_controlId',84,-3),('m_eventType',84,-2),('m_eventData',130,-1)]]), #131
('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #132
('_array',[(0,7),6]), #133
('_struct',[[('m_soundHash',133,-2),('m_length',133,-1)]]), #134
('_struct',[[('m_syncInfo',134,-1)]]), #135
('_struct',[[('m_queryId',79,-3),('m_lengthMs',6,-2),('m_finishGameLoop',6,-1)]]), #136
('_struct',[[('m_queryId',79,-2),('m_lengthMs',6,-1)]]), #137
('_struct',[[('m_animWaitQueryId',79,-1)]]), #138
('_struct',[[('m_sound',6,-1)]]), #139
('_struct',[[('m_transmissionId',84,-2),('m_thread',6,-1)]]), #140
('_struct',[[('m_transmissionId',84,-1)]]), #141
('_optional',[80]), #142
('_optional',[79]), #143
('_optional',[112]), #144
('_struct',[[('m_target',142,-6),('m_distance',143,-5),('m_pitch',143,-4),('m_yaw',143,-3),('m_reason',144,-2),('m_follow',13,-1)]]), #145
('_struct',[[('m_skipType',126,-1)]]), #146
('_int',[(0,11)]), #147
('_struct',[[('x',147,-2),('y',147,-1)]]), #148
('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',148,-3),('m_posWorld',93,-2),('m_flags',112,-1)]]), #149
('_struct',[[('m_posUI',148,-3),('m_posWorld',93,-2),('m_flags',112,-1)]]), #150
('_struct',[[('m_achievementLink',79,-1)]]), #151
('_struct',[[('m_hotkey',6,-2),('m_down',13,-1)]]), #152
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_state',112,-1)]]), #153
('_struct',[[('m_soundtrack',6,-1)]]), #154
('_struct',[[('m_planetId',84,-1)]]), #155
('_struct',[[('m_key',112,-2),('m_flags',112,-1)]]), #156
('_struct',[[('m_resources',109,-1)]]), #157
('_struct',[[('m_fulfillRequestId',84,-1)]]), #158
('_struct',[[('m_cancelRequestId',84,-1)]]), #159
('_struct',[[('m_researchItemId',84,-1)]]), #160
('_struct',[[('m_mercenaryId',84,-1)]]), #161
('_struct',[[('m_battleReportId',84,-2),('m_difficultyLevel',84,-1)]]), #162
('_struct',[[('m_battleReportId',84,-1)]]), #163
('_int',[(0,19)]), #164
('_struct',[[('m_decrementMs',164,-1)]]), #165
('_struct',[[('m_portraitId',84,-1)]]), #166
('_struct',[[('m_functionName',20,-1)]]), #167
('_struct',[[('m_result',84,-1)]]), #168
('_struct',[[('m_gameMenuItemIndex',84,-1)]]), #169
('_struct',[[('m_purchaseCategoryId',84,-1)]]), #170
('_struct',[[('m_button',79,-1)]]), #171
('_struct',[[('m_cutsceneId',84,-2),('m_bookmarkName',20,-1)]]), #172
('_struct',[[('m_cutsceneId',84,-1)]]), #173
('_struct',[[('m_cutsceneId',84,-3),('m_conversationLine',20,-2),('m_altConversationLine',20,-1)]]), #174
('_struct',[[('m_cutsceneId',84,-2),('m_conversationLine',20,-1)]]), #175
('_struct',[[('m_leaveReason',1,-1)]]), #176
('_struct',[[('m_observe',24,-7),('m_name',9,-6),('m_toonHandle',123,-5),('m_clanTag',41,-4),('m_clanLogo',42,-3),('m_hijack',13,-2),('m_hijackCloneGameUserId',59,-1)]]), #177
('_optional',[96]), #178
('_struct',[[('m_state',24,-2),('m_sequence',178,-1)]]), #179
('_struct',[[('m_target',93,-1)]]), #180
('_struct',[[('m_target',94,-1)]]), #181
('_struct',[[('m_catalog',10,-4),('m_entry',79,-3),('m_field',9,-2),('m_value',9,-1)]]), #182
('_struct',[[('m_index',6,-1)]]), #183
('_struct',[[('m_shown',13,-1)]]), #184
('_struct',[[('m_recipient',12,-2),('m_string',30,-1)]]), #185
('_struct',[[('m_recipient',12,-2),('m_point',85,-1)]]), #186
('_struct',[[('m_progress',84,-1)]]), #187
('_struct',[[('m_status',24,-1)]]), #188
('_struct',[[('m_scoreValueMineralsCurrent',84,0),('m_scoreValueVespeneCurrent',84,1),('m_scoreValueMineralsCollectionRate',84,2),('m_scoreValueVespeneCollectionRate',84,3),('m_scoreValueWorkersActiveCount',84,4),('m_scoreValueMineralsUsedInProgressArmy',84,5),('m_scoreValueMineralsUsedInProgressEconomy',84,6),('m_scoreValueMineralsUsedInProgressTechnology',84,7),('m_scoreValueVespeneUsedInProgressArmy',84,8),('m_scoreValueVespeneUsedInProgressEconomy',84,9),('m_scoreValueVespeneUsedInProgressTechnology',84,10),('m_scoreValueMineralsUsedCurrentArmy',84,11),('m_scoreValueMineralsUsedCurrentEconomy',84,12),('m_scoreValueMineralsUsedCurrentTechnology',84,13),('m_scoreValueVespeneUsedCurrentArmy',84,14),('m_scoreValueVespeneUsedCurrentEconomy',84,15),('m_scoreValueVespeneUsedCurrentTechnology',84,16),('m_scoreValueMineralsLostArmy',84,17),('m_scoreValueMineralsLostEconomy',84,18),('m_scoreValueMineralsLostTechnology',84,19),('m_scoreValueVespeneLostArmy',84,20),('m_scoreValueVespeneLostEconomy',84,21),('m_scoreValueVespeneLostTechnology',84,22),('m_scoreValueMineralsKilledArmy',84,23),('m_scoreValueMineralsKilledEconomy',84,24),('m_scoreValueMineralsKilledTechnology',84,25),('m_scoreValueVespeneKilledArmy',84,26),('m_scoreValueVespeneKilledEconomy',84,27),('m_scoreValueVespeneKilledTechnology',84,28),('m_scoreValueFoodUsed',84,29),('m_scoreValueFoodMade',84,30),('m_scoreValueMineralsUsedActiveForces',84,31),('m_scoreValueVespeneUsedActiveForces',84,32),('m_scoreValueMineralsFriendlyFireArmy',84,33),('m_scoreValueMineralsFriendlyFireEconomy',84,34),('m_scoreValueMineralsFriendlyFireTechnology',84,35),('m_scoreValueVespeneFriendlyFireArmy',84,36),('m_scoreValueVespeneFriendlyFireEconomy',84,37),('m_scoreValueVespeneFriendlyFireTechnology',84,38)]]), #189
('_struct',[[('m_playerId',1,0),('m_stats',189,1)]]), #190
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #191
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',59,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',43,5),('m_killerUnitTagRecycle',43,6)]]), #192
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #193
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2)]]), #194
('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',29,1),('m_count',84,2)]]), #195
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #196
('_array',[(0,10),84]), #197
('_struct',[[('m_firstUnitIndex',6,0),('m_items',197,1)]]), #198
('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',43,2),('m_slotId',43,3)]]), #199
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (78, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (77, 'NNet.Game.SUserOptionsEvent'),
9: (70, 'NNet.Game.SBankFileEvent'),
10: (72, 'NNet.Game.SBankSectionEvent'),
11: (73, 'NNet.Game.SBankKeyEvent'),
12: (74, 'NNet.Game.SBankValueEvent'),
13: (76, 'NNet.Game.SBankSignatureEvent'),
14: (81, 'NNet.Game.SCameraSaveEvent'),
21: (82, 'NNet.Game.SSaveGameEvent'),
22: (78, 'NNet.Game.SSaveGameDoneEvent'),
23: (78, 'NNet.Game.SLoadGameDoneEvent'),
25: (83, 'NNet.Game.SCommandManagerResetEvent'),
26: (87, 'NNet.Game.SGameCheatEvent'),
27: (97, 'NNet.Game.SCmdEvent'),
28: (105, 'NNet.Game.SSelectionDeltaEvent'),
29: (106, 'NNet.Game.SControlGroupUpdateEvent'),
30: (108, 'NNet.Game.SSelectionSyncCheckEvent'),
31: (110, 'NNet.Game.SResourceTradeEvent'),
32: (111, 'NNet.Game.STriggerChatMessageEvent'),
33: (114, 'NNet.Game.SAICommunicateEvent'),
34: (115, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (116, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (117, 'NNet.Game.STriggerPingEvent'),
37: (118, 'NNet.Game.SBroadcastCheatEvent'),
38: (119, 'NNet.Game.SAllianceEvent'),
39: (120, 'NNet.Game.SUnitClickEvent'),
40: (121, 'NNet.Game.SUnitHighlightEvent'),
41: (122, 'NNet.Game.STriggerReplySelectedEvent'),
43: (127, 'NNet.Game.SHijackReplayGameEvent'),
44: (78, 'NNet.Game.STriggerSkippedEvent'),
45: (132, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (139, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (140, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (141, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (145, 'NNet.Game.SCameraUpdateEvent'),
50: (78, 'NNet.Game.STriggerAbortMissionEvent'),
51: (128, 'NNet.Game.STriggerPurchaseMadeEvent'),
52: (78, 'NNet.Game.STriggerPurchaseExitEvent'),
53: (129, 'NNet.Game.STriggerPlanetMissionLaunchedEvent'),
54: (78, 'NNet.Game.STriggerPlanetPanelCanceledEvent'),
55: (131, 'NNet.Game.STriggerDialogControlEvent'),
56: (135, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (146, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (149, 'NNet.Game.STriggerMouseClickedEvent'),
59: (150, 'NNet.Game.STriggerMouseMovedEvent'),
60: (151, 'NNet.Game.SAchievementAwardedEvent'),
61: (152, 'NNet.Game.STriggerHotkeyPressedEvent'),
62: (153, 'NNet.Game.STriggerTargetModeUpdateEvent'),
63: (78, 'NNet.Game.STriggerPlanetPanelReplayEvent'),
64: (154, 'NNet.Game.STriggerSoundtrackDoneEvent'),
65: (155, 'NNet.Game.STriggerPlanetMissionSelectedEvent'),
66: (156, 'NNet.Game.STriggerKeyPressedEvent'),
67: (167, 'NNet.Game.STriggerMovieFunctionEvent'),
68: (78, 'NNet.Game.STriggerPlanetPanelBirthCompleteEvent'),
69: (78, 'NNet.Game.STriggerPlanetPanelDeathCompleteEvent'),
70: (157, 'NNet.Game.SResourceRequestEvent'),
71: (158, 'NNet.Game.SResourceRequestFulfillEvent'),
72: (159, 'NNet.Game.SResourceRequestCancelEvent'),
73: (78, 'NNet.Game.STriggerResearchPanelExitEvent'),
74: (78, 'NNet.Game.STriggerResearchPanelPurchaseEvent'),
75: (160, 'NNet.Game.STriggerResearchPanelSelectionChangedEvent'),
77: (78, 'NNet.Game.STriggerMercenaryPanelExitEvent'),
78: (78, 'NNet.Game.STriggerMercenaryPanelPurchaseEvent'),
79: (161, 'NNet.Game.STriggerMercenaryPanelSelectionChangedEvent'),
80: (78, 'NNet.Game.STriggerVictoryPanelExitEvent'),
81: (78, 'NNet.Game.STriggerBattleReportPanelExitEvent'),
82: (162, 'NNet.Game.STriggerBattleReportPanelPlayMissionEvent'),
83: (163, 'NNet.Game.STriggerBattleReportPanelPlaySceneEvent'),
84: (163, 'NNet.Game.STriggerBattleReportPanelSelectionChangedEvent'),
85: (129, 'NNet.Game.STriggerVictoryPanelPlayMissionAgainEvent'),
86: (78, 'NNet.Game.STriggerMovieStartedEvent'),
87: (78, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (165, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (166, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (168, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (169, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
93: (128, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseItemChangedEvent'),
94: (170, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseCategoryChangedEvent'),
95: (171, 'NNet.Game.STriggerButtonPressedEvent'),
96: (78, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (172, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (173, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (174, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (175, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
101: (176, 'NNet.Game.SGameUserLeaveEvent'),
102: (177, 'NNet.Game.SGameUserJoinEvent'),
103: (179, 'NNet.Game.SCommandManagerStateEvent'),
104: (180, 'NNet.Game.SCmdUpdateTargetPointEvent'),
105: (181, 'NNet.Game.SCmdUpdateTargetUnitEvent'),
106: (136, 'NNet.Game.STriggerAnimLengthQueryByNameEvent'),
107: (137, 'NNet.Game.STriggerAnimLengthQueryByPropsEvent'),
108: (138, 'NNet.Game.STriggerAnimOffsetEvent'),
109: (182, 'NNet.Game.SCatalogModifyEvent'),
110: (183, 'NNet.Game.SHeroTalentTreeSelectedEvent'),
111: (78, 'NNet.Game.STriggerProfilerLoggingFinishedEvent'),
112: (184, 'NNet.Game.SHeroTalentTreeSelectionPanelToggledEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (185, 'NNet.Game.SChatMessage'),
1: (186, 'NNet.Game.SPingMessage'),
2: (187, 'NNet.Game.SLoadingProgressMessage'),
3: (78, 'NNet.Game.SServerPingMessage'),
4: (188, 'NNet.Game.SReconnectNotifyMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
0: (190, 'NNet.Replay.Tracker.SPlayerStatsEvent'),
1: (191, 'NNet.Replay.Tracker.SUnitBornEvent'),
2: (192, 'NNet.Replay.Tracker.SUnitDiedEvent'),
3: (193, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),
4: (194, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),
5: (195, 'NNet.Replay.Tracker.SUpgradeEvent'),
6: (191, 'NNet.Replay.Tracker.SUnitInitEvent'),
7: (196, 'NNet.Replay.Tracker.SUnitDoneEvent'),
8: (198, 'NNet.Replay.Tracker.SUnitPositionsEvent'),
9: (199, 'NNet.Replay.Tracker.SPlayerSetupEvent'),
}
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = 2
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 7
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 18
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 40
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 69
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for k,v in value.iteritems():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip('\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| mit |
nboley/grit | grit/simulator/reads_simulator.py | 1 | 21238 | """
Copyright (c) 2011-2015 Nathan Boley
This file is part of GRIT.
GRIT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GRIT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GRIT. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import os.path
import numpy
import pickle
import pysam
import math
from random import random
from collections import defaultdict
import tempfile
DEFAULT_QUALITY_SCORE = 'r'
DEFAULT_BASE = 'A'
DEFAULT_FRAG_LENGTH = 150
DEFAULT_READ_LENGTH = 100
DEFAULT_NUM_FRAGS = 100
NUM_NORM_SDS = 4
FREQ_GTF_STRINGS = [ 'freq', 'frac' ]
# add slide dir to sys.path and import frag_len mod
#sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), ".." ))
sys.path.insert(0, "/home/nboley/grit/grit/")
import grit.frag_len as frag_len
from grit.files.gtf import load_gtf
from grit.files.reads import clean_chr_name
def fix_chr_name(x):
return "chr" + clean_chr_name(x)
def get_transcript_sequence(transcript, fasta):
""" get the mRNA sequence of the transcript from the gene seq
"""
trans_seq = []
for start, stop in transcript.exons:
seq = fasta.fetch(fix_chr_name(transcript.chrm), start, stop+1)
trans_seq.append( seq.upper() )
trans_seq = "".join(trans_seq)
return trans_seq
def get_cigar( transcript, start, stop ):
"""loop through introns within the read and add #N to the cigar for each
intron add #M for portions of read which map to exons
"""
def calc_len(interval):
return interval[1]-interval[0]+1
cigar = []
# find the exon index of the start
genome_start = transcript.genome_pos(start)
start_exon = next(i for i, (e_start, e_stop) in enumerate(transcript.exons)
if genome_start >= e_start and genome_start <= e_stop)
genome_stop = transcript.genome_pos(stop-1)
stop_exon = next(i for i, (e_start, e_stop) in enumerate(transcript.exons)
if genome_stop >= e_start and genome_stop <= e_stop)
if start_exon == stop_exon:
return "%iM" % (stop-start)
tl = 0
# add the first overlap match
skipped_bases = sum(calc_len(e) for e in transcript.exons[:start_exon+1])
cigar.append("%iM" % (skipped_bases-start))
tl += skipped_bases-start
# add the first overlap intron
cigar.append("%iN" % calc_len(transcript.introns[start_exon]))
# add the internal exon and intron matches
for i in xrange(start_exon+1, stop_exon):
cigar.append("%iM" % calc_len(transcript.exons[i]))
cigar.append("%iN" % calc_len(transcript.introns[i]))
tl += calc_len(transcript.exons[i])
# add the last overlap match
skipped_bases = sum(e[1]-e[0]+1 for e in transcript.exons[:stop_exon])
cigar.append("%iM" % (stop-skipped_bases))
tl += stop - skipped_bases
assert tl == (stop-start)
return "".join(cigar)
def build_sam_line( transcript, read_len, offset, read_identifier, quality_string ):
"""build a single ended SAM formatted line with given inforamtion
"""
# set flag to indcate strandedness of read matching that of the transcript
flag = 0
if transcript.strand == '+': flag += 16
# adjust start position to correct genomic position
start = transcript.genome_pos(offset)
# set cigar string corresponding to transcript and read offset
cigar = get_cigar( transcript, offset, (offset + read_len) )
# calculate insert size by difference of genomic offset and genomic offset+read_len
insert_size = transcript.genome_pos(offset+read_len) - transcript.genome_pos(offset)
# get slice of seq from transcript
seq = ( transcript.seq[ offset : (offset + read_len) ]
if transcript.seq != None else '*' )
# initialize sam lines with read identifiers and then add appropriate fields
sam_line = '\t'.join( (
read_identifier, str( flag ), fix_chr_name(transcript.chrm),
str(start+1),
'255', cigar, "*", '0', str( insert_size ), seq, quality_string,
"NM:i:0", "NH:i:1" ) ) + "\n"
return sam_line
def build_sam_lines( transcript, read_len, frag_len, offset,
read_identifier, read_quals ):
"""build paired end SAM formatted lines with given information
"""
# set ordered quals and reverse the qualities for the read on the negative strand
ordered_quals = read_quals
# determine whether read1 should be the 5' read or visa verses
# and initialize attributes that are specific to a read number
# instead of 5' or 3' attribute
if transcript.strand == '+':
up_strm_read, dn_strm_read = (0, 1)
flag = [ 99, 147 ]
ordered_quals[1] = ordered_quals[1][::-1]
else:
up_strm_read, dn_strm_read = (1, 0)
flag = [ 83, 163 ]
ordered_quals[0] = ordered_quals[0][::-1]
# get slice of seq from transcript
seq = ['*', '*']
if transcript.seq != None:
seq[ up_strm_read ] = transcript.seq[offset:(offset + read_len)]
seq[ dn_strm_read ] = transcript.seq[
(offset + frag_len - read_len):(offset + frag_len)]
# adjust five and three prime read start positions to correct genomic positions
start = [ transcript.start, transcript.start ]
start[ up_strm_read ] = transcript.genome_pos(offset)
start[ dn_strm_read ] = transcript.genome_pos(offset + frag_len - read_len)
# set cigar string for five and three prime reads
cigar = [ None, None ]
cigar[ up_strm_read ] = get_cigar( transcript, offset, (offset+read_len) )
cigar[ dn_strm_read ] = get_cigar(
transcript, (offset+frag_len-read_len), (offset + frag_len))
# calculate insert size by difference of the mapped start and end
insert_size = (
transcript.genome_pos(offset+read_len) - transcript.genome_pos(offset))
insert_size = [ insert_size, insert_size ]
insert_size[ dn_strm_read ] *= -1
# initialize sam lines with read identifiers and then add appropriate fields
sam_lines = [ read_identifier + '\t', read_identifier + '\t' ]
for i in (0,1):
other_i = 0 if i else 1
sam_lines[i] += '\t'.join( (
str( flag[i] ), fix_chr_name(transcript.chrm),
str( start[i]+1 ),"255",
cigar[i], "=", str( start[other_i]+1 ), str( insert_size[i] ),
seq[i], ordered_quals[i], "NM:i:0", "NH:i:1" ) ) + "\n"
return sam_lines
def write_fastq_lines( fp1, fp2, transcript, read_len, frag_len, offset,
read_identifier ):
"""STUB for writing fastq lines to running through alignment pipeline
"""
pass
def simulate_reads( genes, fl_dist, fasta, quals, num_frags, single_end,
full_fragment, read_len, assay='RNAseq'):
"""write a SAM format file with the specified options
"""
# global variable that stores the current read number, we use this to
# generate a unique id for each read.
global curr_read_index
curr_read_index = 1
def sample_fragment_length( fl_dist, transcript ):
"""Choose a random fragment length from fl_dist
"""
if assay == 'CAGE':
return read_len
# if the fl_dist is constant
if isinstance( fl_dist, int ):
assert fl_dist <= transcript.calc_length(), 'Transcript which ' + \
'cannot contain a valid fragment was included in transcripts.'
return fl_dist
# Choose a valid fragment length from the distribution
while True:
fl_index = fl_dist.fl_density_cumsum.searchsorted( random() ) - 1
fl = fl_index + fl_dist.fl_min
# if fragment_length is valid return it
if fl <= transcript.calc_length():
return fl
assert False
def sample_read_offset( transcript, fl ):
# calculate maximum offset
max_offset = transcript.calc_length() - fl
if assay in ('CAGE', 'RAMPAGE'):
if transcript.strand == '+': return 0
else: return max_offset
elif assay == 'RNAseq':
return int( random() * max_offset )
elif assay == 'PASseq':
if transcript.strand == '-': return 0
else: return max_offset
def get_random_qual_score( read_len ):
# if no quality score were provided
if not quals:
return DEFAULT_QUALITY_SCORE * read_len
# else return quality string from input quality file
# scores are concatenated to match read_len if necessary
else:
qual_string = ''
while len( qual_string ) < read_len:
qual_string += str( quals[ int(random() * len(quals) ) ] )
return qual_string[0:read_len]
def get_random_read_pos( transcript ):
while True:
# find a valid fragment length
fl = sample_fragment_length( fl_dist, transcript )
if (fl >= read_len) or full_fragment: break
# find a valid random read start position
offset = sample_read_offset( transcript, fl )
# get a unique string for this fragment
global curr_read_index
read_identifier = 'SIM:%015d:%s' % (curr_read_index, transcript.id)
curr_read_index += 1
return fl, offset, read_identifier
def build_random_sam_line( transcript, read_len ):
"""build a random single ended sam line
"""
fl, offset, read_identifier = get_random_read_pos( transcript )
if full_fragment:
read_len = fl
# get a random quality scores
if transcript.seq == None:
read_qual = '*'
else:
read_qual = get_random_qual_score( read_len )
# build the sam lines
return build_sam_line(
transcript, read_len, offset, read_identifier, read_qual )
def build_random_sam_lines( transcript, read_len ):
"""build random paired end sam lines
"""
fl, offset, read_identifier = get_random_read_pos( transcript )
# adjust read length so that paired end read covers the entire fragment
if full_fragment:
read_len = int( math.ceil( fl / float(2) ) )
# get two random quality scores
if transcript.seq == None:
read_quals = ['*', '*']
else:
read_quals = [ get_random_qual_score( read_len ),
get_random_qual_score( read_len ) ]
sam_lines = build_sam_lines(
transcript, read_len, fl, offset, read_identifier, read_quals )
return sam_lines
def get_fl_min():
if isinstance( fl_dist, int ):
return fl_dist
else:
return fl_dist.fl_min
def calc_scale_factor(t):
if assay in ('RNAseq',):
length = t.calc_length()
if length < fl_dist.fl_min: return 0
fl_min, fl_max = fl_dist.fl_min, min(length, fl_dist.fl_max)
allowed_fl_lens = numpy.arange(fl_min, fl_max+1)
weights = fl_dist.fl_density[
fl_min-fl_dist.fl_min:fl_max-fl_dist.fl_min+1]
mean_fl_len = float((allowed_fl_lens*weights).sum())
return length - mean_fl_len
elif assay in ('CAGE', 'RAMPAGE', 'PASseq'):
return 1.0
# initialize the transcript objects, and calculate their relative weights
transcript_weights = []
transcripts = []
contig_lens = defaultdict(int)
min_transcript_length = get_fl_min()
for gene in genes:
contig_lens[fix_chr_name(gene.chrm)] = max(
gene.stop+1000, contig_lens[fix_chr_name(gene.chrm)])
for transcript in gene.transcripts:
if fasta != None:
transcript.seq = get_transcript_sequence(transcript, fasta)
else:
transcript.seq = None
if transcript.fpkm != None:
weight = transcript.fpkm*calc_scale_factor(transcript)
elif transcript.frac != None:
assert len(genes) == 1
weight = transcript.frac
else:
weight = 1./len(gene.transcripts)
#assert False, "Transcript has neither an FPKM nor a frac"
transcripts.append( transcript )
transcript_weights.append( weight )
#assert False
assert len( transcripts ) > 0, "No valid trancripts."
# normalize the transcript weights to be on 0,1
transcript_weights = numpy.array(transcript_weights, dtype=float)
transcript_weights = transcript_weights/transcript_weights.sum()
transcript_weights_cumsum = transcript_weights.cumsum()
# update the contig lens from the fasta file, if available
if fasta != None:
for name, length in zip(fasta.references, fasta.lengths):
if fix_chr_name(name) in contig_lens:
contig_lens[fix_chr_name(name)] = max(
length, contig_lens[name])
# create the output directory
bam_prefix = assay + ".sorted"
with tempfile.NamedTemporaryFile( mode='w+' ) as sam_fp:
# write out the header
for contig, contig_len in contig_lens.iteritems():
data = ["@SQ", "SN:%s" % contig, "LN:%i" % contig_len]
sam_fp.write("\t".join(data) + "\n")
while curr_read_index <= num_frags:
# pick a transcript to randomly take a read from. Note that they
# should be chosen in proportion to the *expected number of reads*,
# not their relative frequencies.
transcript_index = \
transcript_weights_cumsum.searchsorted( random(), side='left' )
transcript = transcripts[ transcript_index ]
if single_end:
sam_line_s = build_random_sam_line( transcript, read_len )
else:
sam_line_s = build_random_sam_lines( transcript, read_len )
sam_fp.writelines( sam_line_s )
# create sorted bam file and index it
sam_fp.flush()
#sam_fp.seek(0)
#print sam_fp.read()
call = 'samtools view -bS {} | samtools sort - {}'
os.system( call.format( sam_fp.name, bam_prefix ) )
os.system( 'samtools index {}.bam'.format( bam_prefix ) )
return
def build_objs( gtf_fp, fl_dist_const,
fl_dist_norm, full_fragment,
read_len, fasta_fn, qual_fn ):
genes = load_gtf( gtf_fp )
gtf_fp.close()
def build_normal_fl_dist( fl_mean, fl_sd ):
fl_min = max( 0, fl_mean - (fl_sd * NUM_NORM_SDS) )
fl_max = fl_mean + (fl_sd * NUM_NORM_SDS)
fl_dist = frag_len.build_normal_density( fl_min, fl_max, fl_mean, fl_sd )
return fl_dist
if fl_dist_norm:
fl_dist = build_normal_fl_dist( fl_dist_norm[0], fl_dist_norm[1] )
assert fl_dist.fl_max > read_len or full_fragment, \
'Invalid fragment length distribution and read length!!!'
else:
assert read_len < fl_dist_const or full_fragment, \
'Invalid read length and constant fragment length!!!'
fl_dist = fl_dist_const
if fasta_fn:
# create indexed fasta file handle object with pysam
fasta = pysam.Fastafile( fasta_fn )
else:
fasta = None
# if quals_fn is None, quals remains empty and reads will default to
# all base qualities of DEFAULT_BASE_QUALITY_SCORE
quals = []
if qual_fn:
with open( quals_fn ) as quals_fp:
for line in quals_fp:
quals.append( line.strip() )
quals = numpy.array( quals )
return genes, fl_dist, fasta, quals
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(\
description='Produce simulated reads in a perfecty aligned BAM file.' )
# gtf is the only required argument
parser.add_argument( 'gtf', type=file, \
help='GTF file from which to produce simulated reads ' + \
'(Note: Only the first trascript from this file will ' + \
'be simulated)' )
parser.add_argument(
'--assay', choices=['RNAseq', 'RAMPAGE', 'CAGE', 'PASseq'],
default='RNAseq', help='Which assay type to simulate from' )
# fragment length distribution options
parser.add_argument( '--fl-dist-const', type=int, default=DEFAULT_FRAG_LENGTH, \
help='Constant length fragments. (default: ' + \
'%(default)s)' )
parser.add_argument( '--fl-dist-norm', \
help='Mean and standard deviation (format "mn:sd") ' + \
'used to create normally distributed fragment lengths.' )
# files providing quality and sequnce information
parser.add_argument( '--fasta', '-f', \
help='Fasta file from which to create reads ' + \
'(default: all sequences are "' + DEFAULT_BASE + \
'" * length of sequence)' )
parser.add_argument( '--quality', '-q', \
help='Flat file containing one FASTQ quality score ' + \
'per line, created with get_quals.sh. (default: ' + \
'quality strings are "' + str(DEFAULT_QUALITY_SCORE) + \
'" * length of sequence.)' )
# type and number of fragments requested
parser.add_argument(
'--num-frags', '-n', type=int, default=1000,
help='Total number of fragments to create across all trascripts')
parser.add_argument('--single-end', action='store_true', default=False,
help='Produce single-end reads.' )
parser.add_argument('--paired-end', dest='single_end', action='store_false',
help='Produce paired-end reads. (default)' )
# XXX not sure if this works
#parser.add_argument(
# '--full-fragment', action='store_true', default=False,
# help='Produce reads spanning the entire fragment.')
parser.add_argument( '--read-len', '-r', type=int, default=DEFAULT_READ_LENGTH, \
help='Length of reads to produce in base pairs ' + \
'(default: %(default)s)' )
# output options
parser.add_argument( '--out_prefix', '-o', default='simulated_reads', \
help='Prefix for output FASTQ/BAM file ' + \
'(default: %(default)s)' )
parser.add_argument( '--verbose', '-v', default=False, action='store_true', \
help='Print status information.' )
args = parser.parse_args()
# set to false, but we may want to bring this option back
args.full_fragment = False
global VERBOSE
VERBOSE = args.verbose
if args.assay == 'CAGE':
args.read_len = 28
args.single_end = True
# parse normal distribution argument
if args.fl_dist_norm:
try:
mean, sd = args.fl_dist_norm.split( ':' )
args.fl_dist_norm = [ int( mean ), int( sd ) ]
except ValueError:
args.fl_dist_norm = None
print >> sys.stderr, \
"WARNING: User input mean and sd are not formatted correctly.\n"+\
"\tUsing default values.\n"
return ( args.gtf, args.fl_dist_const, args.fl_dist_norm,
args.fasta, args.quality, args.num_frags,
args.single_end, args.full_fragment,
args.read_len, args.out_prefix, args.assay )
def main():
( gtf_fp, fl_dist_const, fl_dist_norm, fasta_fn, qual_fn,
num_frags, single_end, full_fragment, read_len, out_prefix, assay )\
= parse_arguments()
try: os.mkdir(out_prefix)
except OSError:
ofname = os.path.join(out_prefix, assay + '.sorted.bam')
if os.path.isfile(ofname):
raise OSError, "File '%s' already exists" % ofname
os.chdir(out_prefix)
genes, fl_dist, fasta, quals = build_objs(
gtf_fp, fl_dist_const,
fl_dist_norm, full_fragment, read_len,
fasta_fn, qual_fn )
"""
for gene in genes:
for t in gene.transcripts:
t.chrm = "chr" + t.chrm
print t.build_gtf_lines(gene.id, {})
assert False
"""
simulate_reads( genes, fl_dist, fasta, quals, num_frags, single_end,
full_fragment, read_len, assay=assay )
if __name__ == "__main__":
main()
| gpl-3.0 |
allenlavoie/tensorflow | tensorflow/contrib/estimator/python/estimator/head.py | 2 | 31202 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def multi_class_head(n_classes,
weight_column=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None,
name=None):
"""Creates a `_Head` for multi class classification.
Uses `sparse_softmax_cross_entropy` loss.
The head expects `logits` with shape `[D0, D1, ... DN, n_classes]`.
In many applications, the shape is `[batch_size, n_classes]`.
`labels` must be a dense `Tensor` with shape matching `logits`, namely
`[D0, D1, ... DN, 1]`. If `label_vocabulary` given, `labels` must be a string
`Tensor` with values from the vocabulary. If `label_vocabulary` is not given,
`labels` must be an integer `Tensor` with values specifying the class index.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
The loss is the weighted sum over the input dimensions. Namely, if the input
labels have shape `[batch_size, 1]`, the loss is the weighted sum over
`batch_size`.
Also supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, 1]`. `loss_fn` must support integer `labels` with
shape `[D0, D1, ... DN, 1]`. Namely, the head applies `label_vocabulary` to
the input labels before passing them to `loss_fn`.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`binary_classification_head`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_vocabulary: A list or tuple of strings representing possible label
values. If it is not given, that means labels are already encoded as an
integer within [0, n_classes). If given, labels must be of string type and
have any value in `label_vocabulary`. Note that errors will be raised if
`label_vocabulary` is not provided but labels are strings.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`, namely
weighted sum of losses divided by batch size. See `tf.losses.Reduction`.
loss_fn: Optional loss function.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for multi class classification.
Raises:
ValueError: if `n_classes`, `label_vocabulary` or `loss_reduction` is
invalid.
"""
return head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint:disable=protected-access
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction,
loss_fn=loss_fn,
name=name)
def binary_classification_head(
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None,
name=None):
"""Creates a `_Head` for single label binary classification.
This head uses `sigmoid_cross_entropy_with_logits` loss.
The head expects `logits` with shape `[D0, D1, ... DN, 1]`.
In many applications, the shape is `[batch_size, 1]`.
`labels` must be a dense `Tensor` with shape matching `logits`, namely
`[D0, D1, ... DN, 1]`. If `label_vocabulary` given, `labels` must be a string
`Tensor` with values from the vocabulary. If `label_vocabulary` is not given,
`labels` must be float `Tensor` with values in the interval `[0, 1]`.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
The loss is the weighted sum over the input dimensions. Namely, if the input
labels have shape `[batch_size, 1]`, the loss is the weighted sum over
`batch_size`.
Also supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, 1]`. `loss_fn` must support float `labels` with
shape `[D0, D1, ... DN, 1]`. Namely, the head applies `label_vocabulary` to
the input labels before passing them to `loss_fn`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
thresholds: Iterable of floats in the range `(0, 1)`. For binary
classification metrics such as precision and recall, an eval metric is
generated for each threshold value. This threshold is applied to the
logistic values to determine the binary classification (i.e., above the
threshold is `true`, below is `false`.
label_vocabulary: A list or tuple of strings representing possible label
values. If it is not given, labels must be float with values within
[0, 1]. If given, labels must be string type and have any value in
`label_vocabulary`. Note that errors will be raised if `label_vocabulary`
is not provided but labels are strings.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`, namely
weighted sum of losses divided by batch size. See `tf.losses.Reduction`.
loss_fn: Optional loss function.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for binary classification.
Raises:
ValueError: If `thresholds` contains a value outside of `(0, 1)`.
ValueError: If `loss_reduction` is invalid.
"""
return head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint:disable=protected-access
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction,
loss_fn=loss_fn,
name=name)
def regression_head(weight_column=None,
label_dimension=1,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None,
inverse_link_fn=None,
name=None):
"""Creates a `_Head` for regression using the `mean_squared_error` loss.
The loss is the weighted sum over all input dimensions. Namely, if the input
labels have shape `[batch_size, label_dimension]`, the loss is the weighted
sum over both `batch_size` and `label_dimension`.
The head expects `logits` with shape `[D0, D1, ... DN, label_dimension]`.
In many applications, the shape is `[batch_size, label_dimension]`.
The `labels` shape must match `logits`, namely
`[D0, D1, ... DN, label_dimension]`. If `label_dimension=1`, shape
`[D0, D1, ... DN]` is also supported.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, `[D0, D1, ... DN, 1]` or
`[D0, D1, ... DN, label_dimension]`.
Supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, label_dimension]`.
Also supports custom `inverse_link_fn`, also known as 'mean function'.
`inverse_link_fn` takes `logits` as argument and returns predicted values.
This function is the inverse of the link function defined in
https://en.wikipedia.org/wiki/Generalized_linear_model#Link_function
Namely, for poisson regression, set `inverse_link_fn=tf.exp`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch and label dimension. Defaults to
`SUM_OVER_BATCH_SIZE`, namely weighted sum of losses divided by
`batch size * label_dimension`. See `tf.losses.Reduction`.
loss_fn: Optional loss function. Defaults to `mean_squared_error`.
inverse_link_fn: Optional inverse link function, also known as 'mean
function'. Defaults to identity.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for linear regression.
Raises:
ValueError: If `label_dimension` or `loss_reduction` is invalid.
"""
return head_lib._regression_head_with_mean_squared_error_loss( # pylint:disable=protected-access
weight_column=weight_column,
label_dimension=label_dimension,
loss_reduction=loss_reduction,
loss_fn=loss_fn,
inverse_link_fn=inverse_link_fn,
name=name)
def poisson_regression_head(
weight_column=None,
label_dimension=1,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
compute_full_loss=True,
name=None):
"""Creates a `_Head` for poisson regression using `tf.nn.log_poisson_loss`.
The loss is the weighted sum over all input dimensions. Namely, if the input
labels have shape `[batch_size, label_dimension]`, the loss is the weighted
sum over both `batch_size` and `label_dimension`.
The head expects `logits` with shape `[D0, D1, ... DN, label_dimension]`.
In many applications, the shape is `[batch_size, label_dimension]`.
The `labels` shape must match `logits`, namely
`[D0, D1, ... DN, label_dimension]`. If `label_dimension=1`, shape
`[D0, D1, ... DN]` is also supported.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, `[D0, D1, ... DN, 1]` or
`[D0, D1, ... DN, label_dimension]`.
This is implemented as a generalized linear model, see
https://en.wikipedia.org/wiki/Generalized_linear_model.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch and label dimension. Defaults to
`SUM_OVER_BATCH_SIZE`, namely weighted sum of losses divided by
`batch size * label_dimension`. See `tf.losses.Reduction`.
compute_full_loss: Whether to include the constant `log(z!)` term in
computing the poisson loss. See `tf.nn.log_poisson_loss` for the full
documentation.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for poisson regression.
Raises:
ValueError: If `label_dimension` or `loss_reduction` is invalid.
"""
def _poisson_loss(labels, logits):
return nn.log_poisson_loss(
targets=labels, log_input=logits, compute_full_loss=compute_full_loss)
return head_lib._regression_head_with_mean_squared_error_loss( # pylint:disable=protected-access
weight_column=weight_column,
label_dimension=label_dimension,
loss_reduction=loss_reduction,
loss_fn=_poisson_loss,
inverse_link_fn=math_ops.exp,
name=name)
def multi_label_head(n_classes,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None,
name=None):
"""Creates a `_Head` for multi-label classification.
Multi-label classification handles the case where each example may have zero
or more associated labels, from a discrete set. This is distinct from
`multi_class_head` which has exactly one label per example.
Uses `sigmoid_cross_entropy` loss average over classes and weighted sum over
the batch. Namely, if the input logits have shape `[batch_size, n_classes]`,
the loss is the average over `n_classes` and the weighted sum over
`batch_size`.
The head expects `logits` with shape `[D0, D1, ... DN, n_classes]`. In many
applications, the shape is `[batch_size, n_classes]`.
Labels can be:
* A multi-hot tensor of shape `[D0, D1, ... DN, n_classes]`
* An integer `SparseTensor` of class indices. The `dense_shape` must be
`[D0, D1, ... DN, ?]` and the values within `[0, n_classes)`.
* If `label_vocabulary` is given, a string `SparseTensor`. The `dense_shape`
must be `[D0, D1, ... DN, ?]` and the values within `label_vocabulary`.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
Also supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, 1]`. `loss_fn` must support indicator `labels` with
shape `[D0, D1, ... DN, n_classes]`. Namely, the head applies
`label_vocabulary` to the input labels before passing them to `loss_fn`.
Args:
n_classes: Number of classes, must be greater than 1 (for 1 class, use
`binary_classification_head`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. Per-class weighting is
not supported.
thresholds: Iterable of floats in the range `(0, 1)`. Accuracy, precision
and recall metrics are evaluated for each threshold value. The threshold
is applied to the predicted probabilities, i.e. above the threshold is
`true`, below is `false`.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded as integer within
[0, n_classes) or multi-hot Tensor. If given, labels must be SparseTensor
string type and have any value in `label_vocabulary`. Also there will be
errors if vocabulary is not provided and labels are string.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`, namely
weighted sum of losses divided by batch size. See `tf.losses.Reduction`.
loss_fn: Optional loss function.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for multi-label classification.
Raises:
ValueError: if `n_classes`, `thresholds`, `loss_reduction` or `loss_fn` is
invalid.
"""
thresholds = tuple(thresholds) if thresholds else tuple()
if n_classes is None or n_classes < 2:
raise ValueError(
'n_classes must be > 1 for multi-class classification. '
'Given: {}'.format(n_classes))
for threshold in thresholds:
if (threshold <= 0.0) or (threshold >= 1.0):
raise ValueError(
'thresholds must be in (0, 1) range. Given: {}'.format(threshold))
if label_vocabulary is not None:
if not isinstance(label_vocabulary, (list, tuple)):
raise ValueError(
'label_vocabulary must be a list or tuple. '
'Given type: {}'.format(type(label_vocabulary)))
if len(label_vocabulary) != n_classes:
raise ValueError(
'Length of label_vocabulary must be n_classes ({}). '
'Given: {}'.format(n_classes, len(label_vocabulary)))
if loss_fn:
head_lib._validate_loss_fn_args(loss_fn) # pylint:disable=protected-access
if (loss_reduction not in losses.Reduction.all() or
loss_reduction == losses.Reduction.NONE):
raise ValueError('Invalid loss_reduction: {}'.format(loss_reduction))
return _MultiLabelHead(
n_classes=n_classes, weight_column=weight_column, thresholds=thresholds,
label_vocabulary=label_vocabulary, loss_reduction=loss_reduction,
loss_fn=loss_fn, name=name)
class _MultiLabelHead(head_lib._Head): # pylint:disable=protected-access
"""`_Head` for multi-label classification."""
def __init__(self,
n_classes,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None,
name=None):
self._n_classes = n_classes
self._weight_column = weight_column
self._thresholds = thresholds
self._label_vocabulary = label_vocabulary
self._loss_reduction = loss_reduction
self._loss_fn = loss_fn
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return self._n_classes
def _process_labels(self, labels):
if labels is None:
raise ValueError(
'You must provide a labels Tensor. Given: None. '
'Suggested troubleshooting steps: Check that your data contain '
'your label feature. Check that your input_fn properly parses and '
'returns labels.')
if isinstance(labels, sparse_tensor.SparseTensor):
if labels.dtype == dtypes.string:
label_ids_values = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels.values)
label_ids = sparse_tensor.SparseTensor(
indices=labels.indices,
values=label_ids_values,
dense_shape=labels.dense_shape)
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(label_ids, self._n_classes))
else:
err_msg = (
r'labels must be an integer SparseTensor with values in '
r'[0, {})'.format(self._n_classes))
assert_int = check_ops.assert_integer(
labels.values, message=err_msg)
assert_less = check_ops.assert_less(
labels.values,
ops.convert_to_tensor(self._n_classes, dtype=labels.dtype),
message=err_msg)
assert_greater = check_ops.assert_non_negative(
labels.values, message=err_msg)
with ops.control_dependencies(
[assert_int, assert_less, assert_greater]):
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(labels, self._n_classes))
err_msg = (
r'labels must be an integer indicator Tensor with values in [0, 1]')
return head_lib._assert_range(labels, 2, message=err_msg) # pylint:disable=protected-access,
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode # Unused for this head.
logits = ops.convert_to_tensor(logits)
processed_labels = self._process_labels(labels)
processed_labels = head_lib._check_dense_labels_match_logits_and_reshape( # pylint:disable=protected-access
labels=processed_labels, logits=logits,
expected_labels_dimension=self.logits_dimension)
if self._loss_fn:
unweighted_loss = head_lib._call_loss_fn( # pylint:disable=protected-access
loss_fn=self._loss_fn, labels=processed_labels, logits=logits,
features=features, expected_loss_dim=1)
else:
unweighted_loss = losses.sigmoid_cross_entropy(
multi_class_labels=processed_labels, logits=logits,
reduction=losses.Reduction.NONE)
# Averages loss over classes.
unweighted_loss = math_ops.reduce_mean(
unweighted_loss, axis=-1, keep_dims=True)
weights = head_lib._get_weights_and_check_match_logits( # pylint:disable=protected-access,
features=features, weight_column=self._weight_column, logits=logits)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=self._loss_reduction)
return head_lib.LossSpec(
training_loss=training_loss,
unreduced_loss=unweighted_loss,
weights=weights,
processed_labels=processed_labels)
def create_estimator_spec(
self, features, mode, logits, labels=None, optimizer=None,
train_op_fn=None, regularization_losses=None):
"""Returns an `EstimatorSpec`.
Args:
features: Input `dict` of `Tensor` or `SparseTensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` with shape `[D0, D1, ... DN, n_classes]`.
For many applications, the shape is `[batch_size, n_classes]`.
labels: Labels with shape matching `logits`. Can be multi-hot `Tensor`
with shape `[D0, D1, ... DN, n_classes]` or `SparseTensor` with
`dense_shape` `[D0, D1, ... DN, ?]`. `labels` is required argument when
`mode` equals `TRAIN` or `EVAL`.
optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
updates variables and increments `global_step`.
train_op_fn: Function that takes a scalar loss `Tensor` and returns
`train_op`. Used if `optimizer` is `None`.
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses. These losses are
usually expressed as a batch average, so for best results users need to
set `loss_reduction=SUM_OVER_BATCH_SIZE` or
`loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
avoid scaling errors.
Returns:
`EstimatorSpec`.
Raises:
ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
mode, or if both are set.
"""
with ops.name_scope(self._name, 'head'):
logits = head_lib._check_logits_final_dim(logits, self.logits_dimension) # pylint:disable=protected-access
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
predictions = {
pred_keys.LOGITS: logits,
pred_keys.PROBABILITIES: probabilities,
}
if mode == model_fn.ModeKeys.PREDICT:
classifier_output = head_lib._classification_output( # pylint:disable=protected-access
scores=probabilities, n_classes=self._n_classes,
label_vocabulary=self._label_vocabulary)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: classifier_output,
head_lib._CLASSIFY_SERVING_KEY: classifier_output, # pylint:disable=protected-access
head_lib._PREDICT_SERVING_KEY: ( # pylint:disable=protected-access
export_output.PredictOutput(predictions))
})
(training_loss, unreduced_loss, weights,
processed_labels) = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
if regularization_losses:
regularization_loss = math_ops.add_n(regularization_losses)
regularized_training_loss = math_ops.add_n(
[training_loss, regularization_loss])
else:
regularization_loss = None
regularized_training_loss = training_loss
# Eval.
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=regularized_training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=processed_labels,
probabilities=probabilities,
weights=weights,
unreduced_loss=unreduced_loss,
regularization_loss=regularization_loss))
# Train.
if optimizer is not None:
if train_op_fn is not None:
raise ValueError('train_op_fn and optimizer cannot both be set.')
train_op = optimizer.minimize(
regularized_training_loss,
global_step=training_util.get_global_step())
elif train_op_fn is not None:
train_op = train_op_fn(regularized_training_loss)
else:
raise ValueError('train_op_fn and optimizer cannot both be None.')
# Only summarize mean_loss for SUM reduction to preserve backwards
# compatibility. Otherwise skip it to avoid unnecessary computation.
if self._loss_reduction == losses.Reduction.SUM:
example_weight_sum = math_ops.reduce_sum(
weights * array_ops.ones_like(unreduced_loss))
mean_loss = training_loss / example_weight_sum
else:
mean_loss = None
with ops.name_scope(''):
keys = metric_keys.MetricKeys
summary.scalar(
head_lib._summary_key(self._name, keys.LOSS), # pylint:disable=protected-access
regularized_training_loss)
if mean_loss is not None:
summary.scalar(
head_lib._summary_key(self._name, keys.LOSS_MEAN), # pylint:disable=protected-access
mean_loss)
if regularization_loss is not None:
summary.scalar(
head_lib._summary_key(self._name, keys.LOSS_REGULARIZATION), # pylint:disable=protected-access
regularization_loss)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=regularized_training_loss,
train_op=train_op)
def _eval_metric_ops(
self, labels, probabilities, weights, unreduced_loss,
regularization_loss):
"""Returns a dict of metrics for eval_metric_ops."""
with ops.name_scope(
None, 'metrics',
[labels, probabilities, weights, unreduced_loss, regularization_loss]):
keys = metric_keys.MetricKeys
metric_ops = {
# Estimator already adds a metric for loss.
head_lib._summary_key(self._name, keys.LOSS_MEAN): # pylint:disable=protected-access
metrics_lib.mean(
values=unreduced_loss,
weights=weights,
name=keys.LOSS_MEAN),
head_lib._summary_key(self._name, keys.AUC): # pylint:disable=protected-access
metrics_lib.auc(labels=labels, predictions=probabilities,
weights=weights, name=keys.AUC),
head_lib._summary_key(self._name, keys.AUC_PR): # pylint:disable=protected-access
metrics_lib.auc(labels=labels, predictions=probabilities,
weights=weights, curve='PR',
name=keys.AUC_PR),
}
if regularization_loss is not None:
loss_regularization_key = head_lib._summary_key( # pylint:disable=protected-access
self._name, keys.LOSS_REGULARIZATION)
metric_ops[loss_regularization_key] = (
metrics_lib.mean(
values=regularization_loss,
name=keys.LOSS_REGULARIZATION))
for threshold in self._thresholds:
accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
metric_ops[head_lib._summary_key(self._name, accuracy_key)] = ( # pylint:disable=protected-access
head_lib._accuracy_at_threshold( # pylint:disable=protected-access
labels=labels,
predictions=probabilities,
weights=weights,
threshold=threshold,
name=accuracy_key))
# Precision for positive examples.
precision_key = keys.PRECISION_AT_THRESHOLD % threshold
metric_ops[head_lib._summary_key(self._name, precision_key)] = ( # pylint:disable=protected-access
head_lib._precision_at_threshold( # pylint:disable=protected-access
labels=labels,
predictions=probabilities,
weights=weights,
threshold=threshold,
name=precision_key))
# Recall for positive examples.
recall_key = keys.RECALL_AT_THRESHOLD % threshold
metric_ops[head_lib._summary_key(self._name, recall_key)] = ( # pylint:disable=protected-access
head_lib._recall_at_threshold( # pylint:disable=protected-access
labels=labels,
predictions=probabilities,
weights=weights,
threshold=threshold,
name=recall_key))
return metric_ops
| apache-2.0 |
michaelhkw/incubator-impala | tests/query_test/test_query_opts.py | 3 | 2932 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Tests for exercising query options that can be set in various ways.
# TODO: Add custom cluster tests for process default_query_options, but we need
# to make it easier to handle startup failures (right now it waits 60sec to
# timeout).
from TCLIService import TCLIService
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_exec_option_dimension
from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
class TestQueryOptions(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestQueryOptions, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text')
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0]))
def test_set_invalid_query_option(self, vector):
ex = self.execute_query_expect_failure(self.client, "select 1", {'foo':'bar'})
assert "invalid query option: foo" in str(ex).lower()
class TestQueryOptionsHS2(HS2TestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestQueryOptions, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text')
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0]))
@needs_session()
def test_set_invalid_query_option(self):
"""Tests that GetOperationStatus returns a valid result for a running query"""
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.confOverlay = {"foo":"bar"}
execute_statement_req.statement = "select 1"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
TestQueryOptionsHS2.check_response(execute_statement_resp,
TCLIService.TStatusCode.ERROR_STATUS, "Invalid query option: foo")
| apache-2.0 |
Netflix-Skunkworks/iep-apps | atlas-slotting/src/scripts/lift-data.py | 1 | 4221 | #!/usr/bin/env python3
# Copyright 2014-2019 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gzip
import json
import pprint
from argparse import Namespace
from datetime import datetime
from typing import Dict, List
import boto3
import requests
import sys
from boto3.dynamodb.types import Binary
from botocore.exceptions import ClientError, ProfileNotFound
def parse_args() -> Namespace:
parser = argparse.ArgumentParser(description='Lift slotting data from Edda into DynamoDB')
parser.add_argument('--profile', type=str, required=True,
help='AWS credentials profile used to write to the Atlas Slotting DynamoDB table')
parser.add_argument('--region', type=str, nargs='+', required=True,
choices=['eu-west-1', 'us-east-1', 'us-west-1', 'us-west-2'],
help='List of AWS regions where data will be lifted from Edda into DynamoDB')
parser.add_argument('--edda_name', type=str, required=True,
help='Edda DNS name, with a region placeholder, where data will be read')
parser.add_argument('--slotting_table', type=str, required=True,
help='Atlas Slotting DynamoDB table name, where data will be written')
parser.add_argument('--app_name', type=str, nargs='+', required=True,
help='List of application names that will be lifted')
parser.add_argument('--dryrun', action='store_true', required=False, default=False,
help='Enable dryrun mode, to preview changes')
return parser.parse_args()
def get_edda_data(args: Namespace, region: str) -> List[Dict]:
url = f'http://{args.edda_name.format(region)}/api/v2/group/autoScalingGroups;_expand'
r = requests.get(url)
if not r.ok:
print(f'ERROR: Failed to load Edda data from {url}')
sys.exit(1)
else:
return [asg for asg in r.json() if asg['name'].split('-')[0] in args.app_name]
def get_ddb_table(args: Namespace, region: str):
try:
session = boto3.session.Session(profile_name=args.profile)
except ProfileNotFound:
print(f'ERROR: AWS profile {args.profile} does not exist')
sys.exit(1)
dynamodb = session.resource('dynamodb', region_name=region)
table = dynamodb.Table(args.slotting_table)
try:
table.table_status
except ClientError as e:
code = e.response['Error']['Code']
if code == 'ExpiredTokenException':
print(f'ERROR: Security token in AWS profile {args.profile} has expired')
elif code == 'ResourceNotFoundException':
print(f'ERROR: Table {args.slotting_table} does not exist in {region}')
else:
pprint.pprint(e.response)
sys.exit(1)
return table
def lift_data(args: Namespace, region: str):
asgs = get_edda_data(args, region)
table = get_ddb_table(args, region)
for asg in asgs:
item = {
'name': asg['name'],
'active': True,
'data': Binary(gzip.compress(bytes(json.dumps(asg), encoding='utf-8'))),
'timestamp': int(datetime.utcnow().timestamp() * 1000)
}
if args.dryrun:
print(f'DRYRUN: PUT {asg["name"]}')
else:
print(f'PUT {asg["name"]}')
table.put_item(Item=item)
def main():
args = parse_args()
print('==== config ====')
print(f'AWS Profile: {args.profile}')
print(f'Source Edda: {args.edda_name}')
print(f'Destination Table: {args.slotting_table}')
for region in args.region:
print(f'==== {region} ====')
lift_data(args, region)
if __name__ == "__main__":
main()
| apache-2.0 |
xively/node-red-nodes | hardware/Pibrella/nrgpio.py | 9 | 7471 | #!/usr/bin/python
#
# Copyright 2014,2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Import library functions we need
import RPi.GPIO as GPIO
import struct
import sys
import os
import subprocess
from time import sleep
bounce = 25;
if sys.version_info >= (3,0):
print("Sorry - currently only configured to work with python 2.x")
sys.exit(1)
if len(sys.argv) > 2:
cmd = sys.argv[1].lower()
pin = int(sys.argv[2])
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
if cmd == "pwm":
#print "Initialised pin "+str(pin)+" to PWM"
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, 100)
p.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
p.ChangeDutyCycle(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print "bad data: "+data
elif cmd == "buzz":
#print "Initialised pin "+str(pin)+" to Buzz"
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, 100)
p.stop()
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
elif float(data) == 0:
p.stop()
else:
p.start(50)
p.ChangeFrequency(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print "bad data: "+data
elif cmd == "out":
#print "Initialised pin "+str(pin)+" to OUT"
GPIO.setup(pin,GPIO.OUT)
if len(sys.argv) == 4:
GPIO.output(pin,int(sys.argv[3]))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except:
data = 0
if data != 0:
data = 1
GPIO.output(pin,data)
elif cmd == "in":
#print "Initialised pin "+str(pin)+" to IN"
bounce = int(sys.argv[4])
def handle_callback(chan):
sleep(bounce/1000)
print GPIO.input(chan)
if sys.argv[3].lower() == "up":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_UP)
elif sys.argv[3].lower() == "down":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_DOWN)
else:
GPIO.setup(pin,GPIO.IN)
print GPIO.input(pin)
GPIO.add_event_detect(pin, GPIO.BOTH, callback=handle_callback, bouncetime=bounce)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
elif cmd == "byte":
#print "Initialised BYTE mode - "+str(pin)+
list = [7,11,13,12,15,16,18,22]
GPIO.setup(list,GPIO.OUT)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
for bit in range(8):
if pin == 1:
mask = 1 << (7 - bit)
else:
mask = 1 << bit
GPIO.output(list[bit], data & mask)
elif cmd == "borg":
#print "Initialised BORG mode - "+str(pin)+
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
r = GPIO.PWM(11, 100)
g = GPIO.PWM(13, 100)
b = GPIO.PWM(15, 100)
r.start(0)
g.start(0)
b.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
c = data.split(",")
r.ChangeDutyCycle(float(c[0]))
g.ChangeDutyCycle(float(c[1]))
b.ChangeDutyCycle(float(c[2]))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
elif cmd == "mouse": # catch mice button events
file = open( "/dev/input/mice", "rb" )
oldbutt = 0
def getMouseEvent():
global oldbutt
global pin
buf = file.read(3)
pin = pin & 0x07
button = ord( buf[0] ) & pin # mask out just the required button(s)
if button != oldbutt: # only send if changed
oldbutt = button
print button
while True:
try:
getMouseEvent()
except:
file.close()
sys.exit(0)
elif cmd == "kbd": # catch keyboard button events
try:
while not os.path.isdir("/dev/input/by-path"):
time.sleep(10)
infile = subprocess.check_output("ls /dev/input/by-path/ | grep -m 1 'kbd'", shell=True).strip()
infile_path = "/dev/input/by-path/" + infile
EVENT_SIZE = struct.calcsize('llHHI')
file = open(infile_path, "rb")
event = file.read(EVENT_SIZE)
while event:
(tv_sec, tv_usec, type, code, value) = struct.unpack('llHHI', event)
#if type != 0 or code != 0 or value != 0:
if type == 1:
# type,code,value
print("%u,%u" % (code, value))
event = file.read(EVENT_SIZE)
print "0,0"
file.close()
sys.exit(0)
except:
file.close()
sys.exit(0)
elif len(sys.argv) > 1:
cmd = sys.argv[1].lower()
if cmd == "rev":
print GPIO.RPI_REVISION
elif cmd == "ver":
print GPIO.VERSION
elif cmd == "info":
print GPIO.RPI_INFO
else:
print "Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}"
print " only ver (gpio version) and info (board information) accept no pin parameter."
else:
print "Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}"
| apache-2.0 |
VirtueSecurity/aws-extender | BappModules/boto/roboto/awsqueryservice.py | 153 | 4453 | from __future__ import print_function
import os
import urlparse
import boto
import boto.connection
import boto.jsonresponse
import boto.exception
from boto.roboto import awsqueryrequest
class NoCredentialsError(boto.exception.BotoClientError):
def __init__(self):
s = 'Unable to find credentials'
super(NoCredentialsError, self).__init__(s)
class AWSQueryService(boto.connection.AWSQueryConnection):
Name = ''
Description = ''
APIVersion = ''
Authentication = 'sign-v2'
Path = '/'
Port = 443
Provider = 'aws'
EnvURL = 'AWS_URL'
Regions = []
def __init__(self, **args):
self.args = args
self.check_for_credential_file()
self.check_for_env_url()
if 'host' not in self.args:
if self.Regions:
region_name = self.args.get('region_name',
self.Regions[0]['name'])
for region in self.Regions:
if region['name'] == region_name:
self.args['host'] = region['endpoint']
if 'path' not in self.args:
self.args['path'] = self.Path
if 'port' not in self.args:
self.args['port'] = self.Port
try:
super(AWSQueryService, self).__init__(**self.args)
self.aws_response = None
except boto.exception.NoAuthHandlerFound:
raise NoCredentialsError()
def check_for_credential_file(self):
"""
Checks for the existence of an AWS credential file.
If the environment variable AWS_CREDENTIAL_FILE is
set and points to a file, that file will be read and
will be searched credentials.
Note that if credentials have been explicitelypassed
into the class constructor, those values always take
precedence.
"""
if 'AWS_CREDENTIAL_FILE' in os.environ:
path = os.environ['AWS_CREDENTIAL_FILE']
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if os.path.isfile(path):
fp = open(path)
lines = fp.readlines()
fp.close()
for line in lines:
if line[0] != '#':
if '=' in line:
name, value = line.split('=', 1)
if name.strip() == 'AWSAccessKeyId':
if 'aws_access_key_id' not in self.args:
value = value.strip()
self.args['aws_access_key_id'] = value
elif name.strip() == 'AWSSecretKey':
if 'aws_secret_access_key' not in self.args:
value = value.strip()
self.args['aws_secret_access_key'] = value
else:
print('Warning: unable to read AWS_CREDENTIAL_FILE')
def check_for_env_url(self):
"""
First checks to see if a url argument was explicitly passed
in. If so, that will be used. If not, it checks for the
existence of the environment variable specified in ENV_URL.
If this is set, it should contain a fully qualified URL to the
service you want to use.
Note that any values passed explicitly to the class constructor
will take precedence.
"""
url = self.args.get('url', None)
if url:
del self.args['url']
if not url and self.EnvURL in os.environ:
url = os.environ[self.EnvURL]
if url:
rslt = urlparse.urlparse(url)
if 'is_secure' not in self.args:
if rslt.scheme == 'https':
self.args['is_secure'] = True
else:
self.args['is_secure'] = False
host = rslt.netloc
port = None
l = host.split(':')
if len(l) > 1:
host = l[0]
port = int(l[1])
if 'host' not in self.args:
self.args['host'] = host
if port and 'port' not in self.args:
self.args['port'] = port
if rslt.path and 'path' not in self.args:
self.args['path'] = rslt.path
def _required_auth_capability(self):
return [self.Authentication]
| mit |
MartinHjelmare/home-assistant | tests/components/hassio/test_init.py | 5 | 12794 | """The tests for the hassio component."""
import asyncio
import os
from unittest.mock import patch, Mock
import pytest
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.setup import async_setup_component
from homeassistant.components.hassio import STORAGE_KEY
from homeassistant.components import frontend
from tests.common import mock_coro
MOCK_ENVIRON = {
'HASSIO': '127.0.0.1',
'HASSIO_TOKEN': 'abcdefgh',
}
@pytest.fixture(autouse=True)
def mock_all(aioclient_mock):
"""Mock all setup requests."""
aioclient_mock.post(
"http://127.0.0.1/homeassistant/options", json={'result': 'ok'})
aioclient_mock.get(
"http://127.0.0.1/supervisor/ping", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/supervisor/options", json={'result': 'ok'})
aioclient_mock.get(
"http://127.0.0.1/homeassistant/info", json={
'result': 'ok', 'data': {'last_version': '10.0'}})
aioclient_mock.get(
"http://127.0.0.1/ingress/panels", json={
'result': 'ok', 'data': {'panels': {}}})
@asyncio.coroutine
def test_setup_api_ping(hass, aioclient_mock):
"""Test setup with API ping."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = yield from async_setup_component(hass, 'hassio', {})
assert result
assert aioclient_mock.call_count == 4
assert hass.components.hassio.get_homeassistant_version() == "10.0"
assert hass.components.hassio.is_hassio()
async def test_setup_api_panel(hass, aioclient_mock):
"""Test setup with API ping."""
assert await async_setup_component(hass, 'frontend', {})
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, 'hassio', {})
assert result
panels = hass.data[frontend.DATA_PANELS]
assert panels.get('hassio').to_response() == {
'component_name': 'custom',
'icon': 'hass:home-assistant',
'title': 'Hass.io',
'url_path': 'hassio',
'require_admin': True,
'config': {'_panel_custom': {'embed_iframe': True,
'js_url': '/api/hassio/app/entrypoint.js',
'name': 'hassio-main',
'trust_external': False}},
}
@asyncio.coroutine
def test_setup_api_push_api_data(hass, aioclient_mock):
"""Test setup with API push."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = yield from async_setup_component(hass, 'hassio', {
'http': {
'server_port': 9999
},
'hassio': {}
})
assert result
assert aioclient_mock.call_count == 4
assert not aioclient_mock.mock_calls[1][2]['ssl']
assert aioclient_mock.mock_calls[1][2]['port'] == 9999
assert aioclient_mock.mock_calls[1][2]['watchdog']
@asyncio.coroutine
def test_setup_api_push_api_data_server_host(hass, aioclient_mock):
"""Test setup with API push with active server host."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = yield from async_setup_component(hass, 'hassio', {
'http': {
'server_port': 9999,
'server_host': "127.0.0.1"
},
'hassio': {}
})
assert result
assert aioclient_mock.call_count == 4
assert not aioclient_mock.mock_calls[1][2]['ssl']
assert aioclient_mock.mock_calls[1][2]['port'] == 9999
assert not aioclient_mock.mock_calls[1][2]['watchdog']
async def test_setup_api_push_api_data_default(hass, aioclient_mock,
hass_storage):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, 'hassio', {
'http': {},
'hassio': {}
})
assert result
assert aioclient_mock.call_count == 4
assert not aioclient_mock.mock_calls[1][2]['ssl']
assert aioclient_mock.mock_calls[1][2]['port'] == 8123
refresh_token = aioclient_mock.mock_calls[1][2]['refresh_token']
hassio_user = await hass.auth.async_get_user(
hass_storage[STORAGE_KEY]['data']['hassio_user']
)
assert hassio_user is not None
assert hassio_user.system_generated
assert len(hassio_user.groups) == 1
assert hassio_user.groups[0].id == GROUP_ID_ADMIN
for token in hassio_user.refresh_tokens.values():
if token.token == refresh_token:
break
else:
assert False, 'refresh token not found'
async def test_setup_adds_admin_group_to_user(hass, aioclient_mock,
hass_storage):
"""Test setup with API push default data."""
# Create user without admin
user = await hass.auth.async_create_system_user('Hass.io')
assert not user.is_admin
await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {
'data': {'hassio_user': user.id},
'key': STORAGE_KEY,
'version': 1
}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, 'hassio', {
'http': {},
'hassio': {}
})
assert result
assert user.is_admin
async def test_setup_api_existing_hassio_user(hass, aioclient_mock,
hass_storage):
"""Test setup with API push default data."""
user = await hass.auth.async_create_system_user('Hass.io test')
token = await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {
'version': 1,
'data': {
'hassio_user': user.id
}
}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, 'hassio', {
'http': {},
'hassio': {}
})
assert result
assert aioclient_mock.call_count == 4
assert not aioclient_mock.mock_calls[1][2]['ssl']
assert aioclient_mock.mock_calls[1][2]['port'] == 8123
assert aioclient_mock.mock_calls[1][2]['refresh_token'] == token.token
@asyncio.coroutine
def test_setup_core_push_timezone(hass, aioclient_mock):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = yield from async_setup_component(hass, 'hassio', {
'hassio': {},
'homeassistant': {
'time_zone': 'testzone',
},
})
assert result
assert aioclient_mock.call_count == 5
assert aioclient_mock.mock_calls[2][2]['timezone'] == "testzone"
@asyncio.coroutine
def test_setup_hassio_no_additional_data(hass, aioclient_mock):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON), \
patch.dict(os.environ, {'HASSIO_TOKEN': "123456"}):
result = yield from async_setup_component(hass, 'hassio', {
'hassio': {},
})
assert result
assert aioclient_mock.call_count == 4
assert aioclient_mock.mock_calls[-1][3]['X-Hassio-Key'] == "123456"
@asyncio.coroutine
def test_fail_setup_without_environ_var(hass):
"""Fail setup if no environ variable set."""
with patch.dict(os.environ, {}, clear=True):
result = yield from async_setup_component(hass, 'hassio', {})
assert not result
@asyncio.coroutine
def test_warn_when_cannot_connect(hass, caplog):
"""Fail warn when we cannot connect."""
with patch.dict(os.environ, MOCK_ENVIRON), \
patch('homeassistant.components.hassio.HassIO.is_connected',
Mock(return_value=mock_coro(None))):
result = yield from async_setup_component(hass, 'hassio', {})
assert result
assert hass.components.hassio.is_hassio()
assert "Not connected with Hass.io / system to busy!" in caplog.text
@asyncio.coroutine
def test_service_register(hassio_env, hass):
"""Check if service will be setup."""
assert (yield from async_setup_component(hass, 'hassio', {}))
assert hass.services.has_service('hassio', 'addon_start')
assert hass.services.has_service('hassio', 'addon_stop')
assert hass.services.has_service('hassio', 'addon_restart')
assert hass.services.has_service('hassio', 'addon_stdin')
assert hass.services.has_service('hassio', 'host_shutdown')
assert hass.services.has_service('hassio', 'host_reboot')
assert hass.services.has_service('hassio', 'host_reboot')
assert hass.services.has_service('hassio', 'snapshot_full')
assert hass.services.has_service('hassio', 'snapshot_partial')
assert hass.services.has_service('hassio', 'restore_full')
assert hass.services.has_service('hassio', 'restore_partial')
@asyncio.coroutine
def test_service_calls(hassio_env, hass, aioclient_mock):
"""Call service and check the API calls behind that."""
assert (yield from async_setup_component(hass, 'hassio', {}))
aioclient_mock.post(
"http://127.0.0.1/addons/test/start", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/addons/test/stop", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/addons/test/restart", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/addons/test/stdin", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/host/shutdown", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/host/reboot", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/snapshots/new/full", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/snapshots/new/partial", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/snapshots/test/restore/full", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/snapshots/test/restore/partial",
json={'result': 'ok'})
yield from hass.services.async_call(
'hassio', 'addon_start', {'addon': 'test'})
yield from hass.services.async_call(
'hassio', 'addon_stop', {'addon': 'test'})
yield from hass.services.async_call(
'hassio', 'addon_restart', {'addon': 'test'})
yield from hass.services.async_call(
'hassio', 'addon_stdin', {'addon': 'test', 'input': 'test'})
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 6
assert aioclient_mock.mock_calls[-1][2] == 'test'
yield from hass.services.async_call('hassio', 'host_shutdown', {})
yield from hass.services.async_call('hassio', 'host_reboot', {})
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 8
yield from hass.services.async_call('hassio', 'snapshot_full', {})
yield from hass.services.async_call('hassio', 'snapshot_partial', {
'addons': ['test'],
'folders': ['ssl'],
'password': "123456",
})
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 10
assert aioclient_mock.mock_calls[-1][2] == {
'addons': ['test'], 'folders': ['ssl'], 'password': "123456"}
yield from hass.services.async_call('hassio', 'restore_full', {
'snapshot': 'test',
})
yield from hass.services.async_call('hassio', 'restore_partial', {
'snapshot': 'test',
'homeassistant': False,
'addons': ['test'],
'folders': ['ssl'],
'password': "123456",
})
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 12
assert aioclient_mock.mock_calls[-1][2] == {
'addons': ['test'], 'folders': ['ssl'], 'homeassistant': False,
'password': "123456"
}
@asyncio.coroutine
def test_service_calls_core(hassio_env, hass, aioclient_mock):
"""Call core service and check the API calls behind that."""
assert (yield from async_setup_component(hass, 'hassio', {}))
aioclient_mock.post(
"http://127.0.0.1/homeassistant/restart", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/homeassistant/stop", json={'result': 'ok'})
yield from hass.services.async_call('homeassistant', 'stop')
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 3
yield from hass.services.async_call('homeassistant', 'check_config')
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 3
with patch(
'homeassistant.config.async_check_ha_config_file',
return_value=mock_coro()
) as mock_check_config:
yield from hass.services.async_call('homeassistant', 'restart')
yield from hass.async_block_till_done()
assert mock_check_config.called
assert aioclient_mock.call_count == 4
| apache-2.0 |
pranjan77/kb_go_express | lib/kb_go_express/baseclient.py | 150 | 11073 | ############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
from __future__ import print_function
import json as _json
import requests as _requests
import random as _random
import os as _os
try:
from configparser import ConfigParser as _ConfigParser # py 3
except ImportError:
from ConfigParser import ConfigParser as _ConfigParser # py 2
try:
from urllib.parse import urlparse as _urlparse # py3
except ImportError:
from urlparse import urlparse as _urlparse # py2
import time
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password, auth_svc):
# This is bandaid helper function until we get a full
# KBase python auth client released
# note that currently globus usernames, and therefore kbase usernames,
# cannot contain non-ascii characters. In python 2, quote doesn't handle
# unicode, so if this changes this client will need to change.
body = ('user_id=' + _requests.utils.quote(user_id) + '&password=' +
_requests.utils.quote(password) + '&fields=token')
ret = _requests.post(auth_svc, data=body, allow_redirects=True)
status = ret.status_code
if status >= 200 and status <= 299:
tok = _json.loads(ret.text)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination for user %s' % (user_id))
else:
raise Exception(ret.text)
return tok['token']
def _read_inifile(file=_os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', _os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if _os.path.exists(file):
try:
config = _ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in ('user_id', 'token',
'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception as e:
print('Error while reading INI file {}: {}'.format(file, e))
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class _JSONObjectEncoder(_json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return _json.JSONEncoder.default(self, obj)
class BaseClient(object):
'''
The KBase base client.
Required initialization arguments (positional):
url - the url of the the service to contact:
For SDK methods: either the url of the callback service or the
Narrative Job Service Wrapper.
For SDK dynamic services: the url of the Service Wizard.
For other services: the url of the service.
Optional arguments (keywords in positional order):
timeout - methods will fail if they take longer than this value in seconds.
Default 1800.
user_id - a KBase user name.
password - the password corresponding to the user name.
token - a KBase authentication token.
ignore_authrc - if True, don't read auth configuration from
~/.kbase_config.
trust_all_ssl_certificates - set to True to trust self-signed certificates.
If you don't understand the implications, leave as the default, False.
auth_svc - the url of the KBase authorization service.
lookup_url - set to true when contacting KBase dynamic services.
async_job_check_time_ms - the wait time between checking job state for
asynchronous jobs run with the run_job method.
'''
def __init__(
self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False,
auth_svc='https://kbase.us/services/authorization/Sessions/Login',
lookup_url=False,
async_job_check_time_ms=100,
async_job_check_time_scale_percent=150,
async_job_check_max_time_ms=300000):
if url is None:
raise ValueError('A url is required')
scheme, _, _, _, _, _ = _urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
self.lookup_url = lookup_url
self.async_job_check_time = async_job_check_time_ms / 1000.0
self.async_job_check_time_scale_percent = (
async_job_check_time_scale_percent)
self.async_job_check_max_time = async_job_check_max_time_ms / 1000.0
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(
user_id, password, auth_svc)
elif 'KB_AUTH_TOKEN' in _os.environ:
self._headers['AUTHORIZATION'] = _os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None and
authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'], auth_svc)
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, url, method, params, context=None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if context:
if type(context) is not dict:
raise ValueError('context is not type dict as required.')
arg_hash['context'] = context
body = _json.dumps(arg_hash, cls=_JSONObjectEncoder)
ret = _requests.post(url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
ret.encoding = 'utf-8'
if ret.status_code == 500:
if ret.headers.get(_CT) == _AJ:
err = ret.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if not ret.ok:
ret.raise_for_status()
resp = ret.json()
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
if not resp['result']:
return
if len(resp['result']) == 1:
return resp['result'][0]
return resp['result']
def _get_service_url(self, service_method, service_version):
if not self.lookup_url:
return self.url
service, _ = service_method.split('.')
service_status_ret = self._call(
self.url, 'ServiceWizard.get_service_status',
[{'module_name': service, 'version': service_version}])
return service_status_ret['url']
def _set_up_context(self, service_ver=None, context=None):
if service_ver:
if not context:
context = {}
context['service_ver'] = service_ver
return context
def _check_job(self, service, job_id):
return self._call(self.url, service + '._check_job', [job_id])
def _submit_job(self, service_method, args, service_ver=None,
context=None):
context = self._set_up_context(service_ver, context)
mod, meth = service_method.split('.')
return self._call(self.url, mod + '._' + meth + '_submit',
args, context)
def run_job(self, service_method, args, service_ver=None, context=None):
'''
Run a SDK method asynchronously.
Required arguments:
service_method - the service and method to run, e.g. myserv.mymeth.
args - a list of arguments to the method.
Optional arguments:
service_ver - the version of the service to run, e.g. a git hash
or dev/beta/release.
context - the rpc context dict.
'''
mod, _ = service_method.split('.')
job_id = self._submit_job(service_method, args, service_ver, context)
async_job_check_time = self.async_job_check_time
while True:
time.sleep(async_job_check_time)
async_job_check_time = (async_job_check_time *
self.async_job_check_time_scale_percent /
100.0)
if async_job_check_time > self.async_job_check_max_time:
async_job_check_time = self.async_job_check_max_time
job_state = self._check_job(mod, job_id)
if job_state['finished']:
if not job_state['result']:
return
if len(job_state['result']) == 1:
return job_state['result'][0]
return job_state['result']
def call_method(self, service_method, args, service_ver=None,
context=None):
'''
Call a standard or dynamic service synchronously.
Required arguments:
service_method - the service and method to run, e.g. myserv.mymeth.
args - a list of arguments to the method.
Optional arguments:
service_ver - the version of the service to run, e.g. a git hash
or dev/beta/release.
context - the rpc context dict.
'''
url = self._get_service_url(service_method, service_ver)
context = self._set_up_context(service_ver, context)
return self._call(url, service_method, args, context)
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_02_01/models/identity.py | 2 | 1517 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Identity(Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar principal_id: The principal id of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of resource.
:vartype tenant_id: str
:param type: The identity type. Possible values include: 'SystemAssigned'
:type type: str or
~azure.mgmt.resource.resources.v2016_02_01.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'ResourceIdentityType'},
}
def __init__(self, type=None):
super(Identity, self).__init__()
self.principal_id = None
self.tenant_id = None
self.type = type
| mit |
drmrd/ansible | lib/ansible/modules/network/junos/junos_static_route.py | 12 | 7438 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_static_route
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage static IP routes on Juniper JUNOS network devices
description:
- This module provides declarative management of static
IP routes on Juniper JUNOS network devices.
options:
address:
description:
- Network address with prefix of the static route.
required: true
aliases: ['prefix']
next_hop:
description:
- Next hop IP of the static route.
required: true
qualified_next_hop:
description:
- Qualified next hop IP of the static route. Qualified next hops allow
to associate preference with a particular next-hop address.
preference:
description:
- Global admin preference of the static route.
aliases: ['admin_distance']
qualified_preference:
description:
- Assign preference for qualified next hop.
aggregate:
description: List of static route definitions
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
type: bool
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: configure static route
junos_static_route:
address: 192.168.2.0/24
next_hop: 10.0.0.1
preference: 10
qualified_next_hop: 10.0.0.2
qualified_preference: 3
state: present
- name: delete static route
junos_static_route:
address: 192.168.2.0/24
state: absent
- name: deactivate static route configuration
junos_static_route:
address: 192.168.2.0/24
next_hop: 10.0.0.1
preference: 10
qualified_next_hop: 10.0.0.2
qualified_preference: 3
state: present
active: False
- name: activate static route configuration
junos_static_route:
address: 192.168.2.0/24
next_hop: 10.0.0.1
preference: 10
qualified_next_hop: 10.0.0.2
qualified_preference: 3
state: present
active: True
- name: Configure static route using aggregate
junos_static_route:
aggregate:
- { address: 4.4.4.0/24, next_hop: 3.3.3.3, qualified_next_hop: 5.5.5.5, qualified_preference: 30 }
- { address: 5.5.5.0/24, next_hop: 6.6.6.6, qualified_next_hop: 7.7.7.7, qualified_preference: 12 }
preference: 10
- name: Delete static route using aggregate
junos_static_route:
aggregate:
- address: 4.4.4.0/24
- address: 5.5.5.0/24
state: absent
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: string
sample: >
[edit routing-options static]
route 2.2.2.0/24 { ... }
+ route 4.4.4.0/24 {
next-hop 3.3.3.3;
qualified-next-hop 5.5.5.5 {
+ preference 30;
}
+ preference 10;
+ }
"""
import collections
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.junos.junos import junos_argument_spec
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele, to_param_list
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config
try:
from lxml.etree import tostring
except ImportError:
from xml.etree.ElementTree import tostring
USE_PERSISTENT_CONNECTION = True
def main():
""" main entry point for module execution
"""
element_spec = dict(
address=dict(aliases=['prefix']),
next_hop=dict(),
preference=dict(type='int', aliases=['admin_distance']),
qualified_next_hop=dict(type='str'),
qualified_preference=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
active=dict(default=True, type='bool')
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['address'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(junos_argument_spec)
required_one_of = [['aggregate', 'address']]
mutually_exclusive = [['aggregate', 'address']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'routing-options/static/route'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('address', {'xpath': 'name', 'is_key': True}),
('next_hop', 'next-hop'),
('preference', 'preference/metric-value'),
('qualified_next_hop', {'xpath': 'name', 'top': 'qualified-next-hop'}),
('qualified_preference', {'xpath': 'preference', 'top': 'qualified-next-hop'})
])
params = to_param_list(module)
requests = list()
for param in params:
# if key doesn't exist in the item, get it from module.params
for key in param:
if param.get(key) is None:
param[key] = module.params[key]
item = param.copy()
if item['state'] == 'present':
if not item['address'] and item['next_hop']:
module.fail_json(msg="parameters are required together: ['address', 'next_hop']")
want = map_params_to_obj(module, param_to_xpath_map, param=item)
requests.append(map_obj_to_ele(module, want, top, param=item))
with locked_config(module):
for req in requests:
diff = load_config(module, tostring(req), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
mengxn/tensorflow | tensorflow/python/kernel_tests/decode_csv_op_test.py | 57 | 5036 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeCSV op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class DecodeCSVOpTest(test.TestCase):
def _test(self, args, expected_out=None, expected_err_re=None):
with self.test_session() as sess:
decode = parsing_ops.decode_csv(**args)
if expected_err_re is None:
out = sess.run(decode)
for i, field in enumerate(out):
if field.dtype == np.float32:
self.assertAllClose(field, expected_out[i])
else:
self.assertAllEqual(field, expected_out[i])
else:
with self.assertRaisesOpError(expected_err_re):
sess.run(decode)
def testSimple(self):
args = {
"records": ["1", "2", '"3"'],
"record_defaults": [[1]],
}
expected_out = [[1, 2, 3]]
self._test(args, expected_out)
def testScalar(self):
args = {"records": '1,""', "record_defaults": [[3], [4]]}
expected_out = [1, 4]
self._test(args, expected_out)
def test2D(self):
args = {"records": [["1", "2"], ['""', "4"]], "record_defaults": [[5]]}
expected_out = [[[1, 2], [5, 4]]]
self._test(args, expected_out)
def testInt64(self):
args = {
"records": ["1", "2", '"2147483648"'],
"record_defaults": [np.array(
[], dtype=np.int64)],
}
expected_out = [[1, 2, 2147483648]]
self._test(args, expected_out)
def testComplexString(self):
args = {
"records": ['"1.0"', '"ab , c"', '"a\nbc"', '"ab""c"', " abc "],
"record_defaults": [["1"]]
}
expected_out = [[b"1.0", b"ab , c", b"a\nbc", b'ab"c', b" abc "]]
self._test(args, expected_out)
def testMultiRecords(self):
args = {
"records": ["1.0,4,aa", "0.2,5,bb", "3,6,cc"],
"record_defaults": [[1.0], [1], ["aa"]]
}
expected_out = [[1.0, 0.2, 3], [4, 5, 6], [b"aa", b"bb", b"cc"]]
self._test(args, expected_out)
def testWithDefaults(self):
args = {
"records": [",1,", "0.2,3,bcd", "3.0,,"],
"record_defaults": [[1.0], [0], ["a"]]
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0], [b"a", b"bcd", b"a"]]
self._test(args, expected_out)
def testWithTabDelim(self):
args = {
"records": ["1\t1", "0.2\t3", "3.0\t"],
"record_defaults": [[1.0], [0]],
"field_delim": "\t"
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0]]
self._test(args, expected_out)
def testWithoutDefaultsError(self):
args = {
"records": [",1", "0.2,3", "3.0,"],
"record_defaults": [[1.0], np.array(
[], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 1 is required but missing in record 2!")
def testWrongFieldIntError(self):
args = {
"records": [",1", "0.2,234a", "3.0,2"],
"record_defaults": [[1.0], np.array(
[], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 1 in record 1 is not a valid int32: 234a")
def testOutOfRangeError(self):
args = {
"records": ["1", "9999999999999999999999999", "3"],
"record_defaults": [[1]]
}
self._test(
args, expected_err_re="Field 0 in record 1 is not a valid int32: ")
def testWrongFieldFloatError(self):
args = {
"records": [",1", "0.2,2", "3.0adf,3"],
"record_defaults": [[1.0], np.array(
[], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 0 in record 2 is not a valid float: ")
def testWrongFieldStringError(self):
args = {"records": ['"1,a,"', "0.22", 'a"bc'], "record_defaults": [["a"]]}
self._test(
args, expected_err_re="Unquoted fields cannot have quotes/CRLFs inside")
def testWrongDefaults(self):
args = {"records": [",1", "0.2,2", "3.0adf,3"], "record_defaults": [[1.0]]}
self._test(args, expected_err_re="Expect 1 fields but have 2 in record 0")
def testShortQuotedString(self):
args = {
"records": ["\""],
"record_defaults": [["default"]],
}
self._test(
args, expected_err_re="Quoted field has to end with quote followed.*")
if __name__ == "__main__":
test.main()
| apache-2.0 |
EricMountain-1A/openshift-ansible | roles/openshift_health_checker/openshift_checks/package_availability.py | 8 | 2460 | """Check that required RPM packages are available."""
from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
"""Check that required RPM packages are available."""
name = "package_availability"
tags = ["preflight"]
def is_active(self):
"""Run only when yum is the package manager as the code is specific to it."""
return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum"
def run(self):
rpm_prefix = self.get_var("openshift", "common", "service_type")
group_names = self.get_var("group_names", default=[])
packages = set()
if "oo_masters_to_config" in group_names:
packages.update(self.master_packages(rpm_prefix))
if "oo_nodes_to_config" in group_names:
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
return self.execute_module_with_retries("check_yum_update", args)
@staticmethod
def master_packages(rpm_prefix):
"""Return a list of RPMs that we expect a master install to have available."""
return [
"{rpm_prefix}".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-clients".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-master".format(rpm_prefix=rpm_prefix),
"bash-completion",
"cockpit-bridge",
"cockpit-docker",
"cockpit-system",
"cockpit-ws",
"etcd",
"httpd-tools",
]
@staticmethod
def node_packages(rpm_prefix):
"""Return a list of RPMs that we expect a node install to have available."""
return [
"{rpm_prefix}".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-node".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-sdn-ovs".format(rpm_prefix=rpm_prefix),
"bind",
"ceph-common",
"dnsmasq",
"docker",
"firewalld",
"flannel",
"glusterfs-fuse",
"iptables-services",
"iptables",
"iscsi-initiator-utils",
"libselinux-python",
"nfs-utils",
"ntp",
"openssl",
"pyparted",
"python-httplib2",
"PyYAML",
"yum-utils",
]
| apache-2.0 |
nemonik/CoCreateLite | ccl-cookbook/files/default/cocreatelite/cocreate/views/playgrounds.py | 1 | 5229 | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from ..models import VMPlayground
from ..forms import VMPlaygroundForm, VMPlaygroundDescriptionForm, VMPlaygroundUserAccessForm, VMPlaygroundGroupAccessForm
from . import util
from ..util import single_user_mode
"""
View controllers for playground data
"""
@single_user_mode
def index(request):
"""
Show the list of playgrounds for this user.
"""
# determine all of the playgrounds this user has access to
groupids = [group.id for group in request.user.groups.all()]
print ("Group ids: " + str(groupids))
playgrounds = VMPlayground.objects.filter(creator = request.user) | VMPlayground.objects.filter(access_users__id = request.user.id) | VMPlayground.objects.filter(access_groups__id__in = groupids)
# determine all of the demo boxes from a set of playgrounds
demos = []
for playground in playgrounds:
demos = demos + playground.getDemos()
context = {
"playgrounds": playgrounds,
"demos": demos
}
return render(request, "playgrounds.html", util.fillContext(context, request))
@single_user_mode
def add(request):
"""
Add a new playground.
"""
if request.method == 'GET':
form = VMPlaygroundForm()
elif request.method == 'POST':
form = VMPlaygroundForm(request.POST)
if form.is_valid():
# hooray, let's create the playground
playground = VMPlayground.objects.create(
name = form.data['name'],
creator = request.user,
description = form.data['description'],
description_is_markdown = form.data.get('description_is_markdown', False),
environment = form.data['environment'],
)
playground.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form}
return render(request, "addPlayground.html", util.fillContext(opts, request))
@single_user_mode
def remove(request, playground_id):
"""
Remove a playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
for sandbox in playground.sandboxes.all():
sandox.delete()
playground.delete()
return HttpResponseRedirect(reverse("playgrounds"))
@single_user_mode
def playground(request, playground_id):
"""
Show the details for this playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
opts = {"playground": playground}
return render(request, "newPlaygroundDetails.html", util.fillContext(opts, request))
@single_user_mode
def alterUserAccess(request, playground_id):
"""
Alter the access control list for a playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
if request.method == 'GET':
form = VMPlaygroundUserAccessForm(instance = playground)
elif request.method == 'POST':
form = VMPlaygroundUserAccessForm(request.POST, instance=playground)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form, "playground": playground }
return render(request, "alterPlaygroundUserAccess.html", util.fillContext(opts, request))
@single_user_mode
def alterGroupAccess(request, playground_id):
"""
Alter the access control list for a playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
if request.method == 'GET':
form = VMPlaygroundGroupAccessForm(instance = playground)
elif request.method == 'POST':
form = VMPlaygroundGroupAccessForm(request.POST, instance=playground)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form, "playground": playground }
return render(request, "alterPlaygroundGroupAccess.html", util.fillContext(opts, request))
@single_user_mode
def editDesc(request, playground_id):
"""
Alter or edit the description of the playground
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
if request.method == 'GET':
form = VMPlaygroundDescriptionForm(instance = playground)
elif request.method == 'POST':
form = VMPlaygroundDescriptionForm(request.POST)
if form.is_valid():
playground.description_is_markdown = form.data['description_is_markdown']
playground.description = form.data['description']
playground.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form, "playground": playground }
return render(request, "editPlaygroundDesc.html", util.fillContext(opts, request))
| bsd-3-clause |
roscopecoltran/scraper | .staging/meta-engines/xlinkBook/update/spider.py | 1 | 7851 | #!/usr/bin/env python
#author: wowdd1
#mail: developergf@gmail.com
#data: 2014.12.09
import requests
import json
from bs4 import BeautifulSoup;
import os,sys
import time
import re
from all_subject import subject_dict, need_update_subject_list
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append("..")
from record import Category
class Spider:
google = None
baidu = None
bing = None
yahoo = None
db_dir = None
zh_re = None
shcool = None
subject = None
url = None
count = None
deep_mind = None
category = ''
category_obj = None
proxies = {
"http": "http://127.0.0.1:8087",
"https": "http://127.0.0.1:8087",
}
proxies2 = {
"http": "http://127.0.0.1:8787",
"https": "http://127.0.0.1:8787",
}
def __init__(self):
self.google = "https://www.google.com.hk/?gws_rd=cr,ssl#safe=strict&q="
self.baidu = "http://www.baidu.com/s?word="
self.bing = "http://cn.bing.com/search?q=a+b&go=Submit&qs=n&form=QBLH&pq="
self.yahoo = "https://search.yahoo.com/search;_ylt=Atkyc2y9pQQo09zbTUWM4CWbvZx4?p="
self.db_dir = os.path.abspath('.') + "/../" + "db/"
self.zh_re=re.compile(u"[\u4e00-\u9fa5]+")
self.school = None
self.subject = None
self.url = None
self.count = 0
self.deep_mind = False
self.category_obj = Category()
def doWork(self):
return
def requestWithProxy(self, url):
return requests.get(url, proxies=self.proxies, verify=False)
def requestWithProxy2(self, url):
return requests.get(url, proxies=self.proxies2, verify=False)
def format_subject(self, subject):
match_list = []
for (k, v) in subject_dict.items():
if subject.find('/') != -1 and subject.lower()[0:subject.find('/')].strip().find(k.lower()) != -1:
match_list.append(k)
elif subject.find('/') == -1 and subject.lower().strip().find(k.lower()) != -1:
match_list.append(k)
result = subject
if len(match_list) > 1:
max_len = 0
for key in match_list:
if key.lower() == subject[0: subject.find(' ')].lower().strip():
result = subject_dict[key]
break
if len(key) > max_len:
max_len = len(key)
result = subject_dict[key]
elif len(match_list) == 1:
#print subject_dict[match_list[0]]
result = subject_dict[match_list[0]]
#print subject
if result != subject and subject.find('/') != -1:
last_index = 0
while subject.find('/', last_index + 1) != -1:
last_index = subject.find('/', last_index + 1)
return result + subject[subject.find('/') : last_index + 1]
elif result != subject:
return result + "/"
else:
if subject.strip()[len(subject) - 1 : ] != '/':
return subject + "/"
else:
return subject
def need_update_subject(self, subject):
subject_converted = self.format_subject(subject)
if subject_converted[len(subject_converted) - 1 : ] == '/':
subject_converted = subject_converted[0 : len(subject_converted) - 1]
for item in need_update_subject_list:
if subject_converted.find(item) != -1:
return True
print subject + " not config in all_subject.py, ignore it"
return False
def replace_sp_char(self, text):
while text.find('/') != -1:
text = text[text.find('/') + 1 : ]
return text.replace(",","").replace("&","").replace(":","").replace("-"," ").replace(" "," ").replace(" ","-").lower()
def get_file_name(self, subject, school):
dir_name = self.format_subject(subject)
return self.db_dir + dir_name + self.replace_sp_char(subject) + "-" + school + time.strftime("%Y")
def create_dir_by_file_name(self, file_name):
if os.path.exists(file_name) == False:
index = 0
for i in range(0, len(file_name)):
if file_name[i] == "/":
index = i
if index > 0:
if os.path.exists(file_name[0:index]) == False:
print "creating " + file_name[0:index] + " dir"
os.makedirs(file_name[0:index])
def open_db(self, file_name, append=False):
self.create_dir_by_file_name(file_name)
flag = 'w'
if append:
flag = 'a'
try:
f = open(file_name, flag)
except IOError, err:
print str(err)
return f
def do_upgrade_db(self, file_name):
tmp_file = file_name + ".tmp"
if os.path.exists(file_name) and os.path.exists(tmp_file):
print "upgrading..."
#os.system("diff -y --suppress-common-lines -EbwBi " + file_name + " " + file_name + ".tmp " + "| colordiff")
#print "remove " + file_name[file_name.find("db"):]
os.remove(file_name)
#print "rename " + file_name[file_name.find("db"):] + ".tmp"
os.rename(tmp_file, file_name)
print "upgrade done"
elif os.path.exists(tmp_file):
print "upgrading..."
#print "rename " + file_name[file_name.find("db"):] + ".tmp"
os.rename(tmp_file, file_name)
print "upgrade done"
else:
print "upgrade error"
def cancel_upgrade(self, file_name):
if os.path.exists(file_name + ".tmp"):
os.remove(file_name + ".tmp")
def close_db(self, f):
f.close()
def write_db(self, f, course_num, course_name, url, describe=""):
#if url == "":
# url = self.google + course_num + " " + course_name
if self.category != '' and describe.find('category:') == -1:
describe += ' category:' + self.category
f.write(course_num.strip() + " | " + course_name.replace("|","") + " | " + url + " | " + describe + "\n")
def get_storage_format(self,course_num, course_name, url, describe=""):
if url == "":
url = self.google + course_num + " " + course_name
return course_num.strip() + " | " + course_name.replace("|","") + " | " + url + " | " + describe
def countFileLineNum(self, file_name):
if os.path.exists(file_name):
line_count = len(open(file_name,'rU').readlines())
return line_count
return 0
def truncateUrlData(self, dir_name):
print "truncateUrlData ...."
self.create_dir_by_file_name(get_url_file_name(dir_name))
f = open(get_url_file_name(dir_name), "w+")
f.truncate()
f.close
def delZh(self, text):
if isinstance(text, unicode):
list_u = self.zh_re.findall(text)
if len(list_u) > 0 :
last_ele = list_u[len(list_u) - 1]
last_pos = text.find(last_ele)
first_pos = text.find(list_u[0])
title = ""
if first_pos == 0:
title = text[last_pos + len(last_ele):]
else:
title = text[0:first_pos] + text[last_pos + len(last_ele):].strip()
if title.find("|") != -1:
title = title.replace("|", "").strip()
return title
return text
def getKeyValue(self, option):
value_pos = option.find("value=") + 7
return option[value_pos : option.find('"', value_pos)], option[option.find(">") + 1 : option.find("</", 2)].replace("&", "").replace("\n", "").strip()
| mit |
Soya93/Extract-Refactoring | python/lib/Lib/site-packages/django/contrib/gis/gdal/tests/test_geom.py | 93 | 21063 | from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, \
OGRException, OGRIndexError, SpatialReference, CoordTransform, \
gdal_version
from django.utils import unittest
from django.contrib.gis.geometry.test_data import TestDataMixin
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
try:
g = OGRGeomType(1)
g = OGRGeomType(7)
g = OGRGeomType('point')
g = OGRGeomType('GeometrycollectioN')
g = OGRGeomType('LINearrING')
g = OGRGeomType('Unknown')
except:
self.fail('Could not create an OGRGeomType object!')
# Should throw TypeError on this input
self.assertRaises(OGRException, OGRGeomType, 23)
self.assertRaises(OGRException, OGRGeomType, 'fooD')
self.assertRaises(OGRException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(True, OGRGeomType(1) == OGRGeomType(1))
self.assertEqual(True, OGRGeomType(7) == 'GeometryCollection')
self.assertEqual(True, OGRGeomType('point') == 'POINT')
self.assertEqual(False, OGRGeomType('point') == 2)
self.assertEqual(True, OGRGeomType('unknown') == 0)
self.assertEqual(True, OGRGeomType(6) == 'MULtiPolyGON')
self.assertEqual(False, OGRGeomType(1) != OGRGeomType('point'))
self.assertEqual(True, OGRGeomType('POINT') != OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertEqual(None, OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.failUnless(OGRGeomType(wkb25bit + 1) == 'Point25D')
self.failUnless(OGRGeomType('MultiLineString25D') == (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex, geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
from django.contrib.gis.gdal.prototypes.geom import GEOJSON
if not GEOJSON: return
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
def test02_points(self):
"Testing Point objects."
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(True, linestr == OGRGeometry(ls.wkt))
self.assertEqual(True, linestr != prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(True, mlinestr == OGRGeometry(mls.wkt))
self.assertEqual(True, mlinestr != prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
#self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr == OGRGeometry(rr.wkt))
self.assertEqual(True, lr != prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180,-90,180,90)
p = OGRGeometry.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(True, poly == OGRGeometry(p.wkt))
self.assertEqual(True, poly != prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
print "\nBEGIN - expecting IllegalArgumentException; safe to ignore.\n"
try:
c = poly.centroid
except OGRException:
# Should raise an OGR exception, rings are not closed
pass
else:
self.fail('Should have raised an OGRException!')
print "\nEND - expecting IllegalArgumentException; safe to ignore.\n"
# Closing the rings -- doesn't work on GDAL versions 1.4.1 and below:
# http://trac.osgeo.org/gdal/ticket/1673
major, minor1, minor2 = gdal_version().split('.')
if major == '1':
iminor1 = int(minor1)
if iminor1 < 4 or (iminor1 == 4 and minor2.startswith('1')): return
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
prev = OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolyogn after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(OGRException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3): self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
import cPickle
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = cPickle.loads(cPickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertNotEqual(None, OGRGeometry('POINT(0 0)'))
self.assertEqual(False, OGRGeometry('LINESTRING(0 0, 1 1)') == 3)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(OGRGeomTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| apache-2.0 |
idovear/odoo | addons/account_voucher/report/account_voucher_sales_receipt.py | 326 | 5808 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class sale_receipt_report(osv.osv):
_name = "sale.receipt.report"
_description = "Sales Receipt Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Without Tax', readonly=True),
'price_total_tax': fields.float('Total With Tax', readonly=True),
'nbr':fields.integer('# of Voucher Lines', readonly=True),
'type': fields.selection([
('sale','Sale'),
('purchase','Purchase'),
('payment','Payment'),
('receipt','Receipt'),
],'Type', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('posted','Posted'),
('cancel','Cancelled')
], 'Voucher Status', readonly=True),
'pay_now':fields.selection([
('pay_now','Pay Directly'),
('pay_later','Pay Later or Group Funds'),
],'Payment', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'account_id': fields.many2one('account.account', 'Account',readonly=True),
'delay_to_pay': fields.float('Avg. Delay To Pay', readonly=True, group_operator="avg"),
'due_delay': fields.float('Avg. Due Delay', readonly=True, group_operator="avg")
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'sale_receipt_report')
cr.execute("""
create or replace view sale_receipt_report as (
select min(avl.id) as id,
av.date as date,
av.partner_id as partner_id,
aj.currency as currency_id,
av.journal_id as journal_id,
rp.user_id as user_id,
av.company_id as company_id,
count(avl.*) as nbr,
av.type as type,
av.state,
av.pay_now,
av.date_due as date_due,
av.account_id as account_id,
sum(av.amount-av.tax_amount)/(select count(l.id) from account_voucher_line as l
left join account_voucher as a ON (a.id=l.voucher_id)
where a.id=av.id) as price_total,
sum(av.amount)/(select count(l.id) from account_voucher_line as l
left join account_voucher as a ON (a.id=l.voucher_id)
where a.id=av.id) as price_total_tax,
sum((select extract(epoch from avg(date_trunc('day',aml.date_created)-date_trunc('day',l.create_date)))/(24*60*60)::decimal(16,2)
from account_move_line as aml
left join account_voucher as a ON (a.move_id=aml.move_id)
left join account_voucher_line as l ON (a.id=l.voucher_id)
where a.id=av.id)) as delay_to_pay,
sum((select extract(epoch from avg(date_trunc('day',a.date_due)-date_trunc('day',a.date)))/(24*60*60)::decimal(16,2)
from account_move_line as aml
left join account_voucher as a ON (a.move_id=aml.move_id)
left join account_voucher_line as l ON (a.id=l.voucher_id)
where a.id=av.id)) as due_delay
from account_voucher_line as avl
left join account_voucher as av on (av.id=avl.voucher_id)
left join res_partner as rp ON (rp.id=av.partner_id)
left join account_journal as aj ON (aj.id=av.journal_id)
where av.type='sale' and aj.type in ('sale','sale_refund')
group by
av.date,
av.id,
av.partner_id,
aj.currency,
av.journal_id,
rp.user_id,
av.company_id,
av.type,
av.state,
av.date_due,
av.account_id,
av.tax_amount,
av.amount,
av.tax_amount,
av.pay_now
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
DPaaS-Raksha/horizon | openstack_dashboard/dashboards/settings/user/forms.py | 14 | 2544 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import pytz
from django import shortcuts
from django.conf import settings
from django.utils import translation
from horizon import forms
from horizon import messages
class UserSettingsForm(forms.SelfHandlingForm):
language = forms.ChoiceField()
timezone = forms.ChoiceField()
def __init__(self, *args, **kwargs):
super(UserSettingsForm, self).__init__(*args, **kwargs)
# Languages
languages = [(k, "%s (%s)"
% (translation.get_language_info(k)['name_local'], k))
for k, v in settings.LANGUAGES]
self.fields['language'].choices = languages
# Timezones
d = datetime(datetime.today().year, 1, 1)
timezones = []
for tz in pytz.common_timezones:
try:
utc_offset = pytz.timezone(tz).localize(d).strftime('%z')
utc_offset = " (UTC %s:%s)" % (utc_offset[:3], utc_offset[3:])
except:
utc_offset = ""
if tz != "UTC":
tz_name = "%s%s" % (tz, utc_offset)
else:
tz_name = tz
timezones.append((tz, tz_name))
self.fields['timezone'].choices = timezones
def handle(self, request, data):
response = shortcuts.redirect(request.build_absolute_uri())
# Language
lang_code = data['language']
if lang_code and translation.check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
# Timezone
request.session['django_timezone'] = pytz.timezone(
data['timezone']).zone
messages.success(request, translation.ugettext("Settings saved."))
return response
| apache-2.0 |
wangdahoo/jinja2 | examples/profile.py | 75 | 1107 | try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
from jinja2 import Environment as JinjaEnvironment
context = {
'page_title': 'mitsuhiko\'s benchmark',
'table': [dict(a=1,b=2,c=3,d=4,e=5,f=6,g=7,h=8,i=9,j=10) for x in range(1000)]
}
source = """\
% macro testmacro(x)
<span>${x}</span>
% endmacro
<!doctype html>
<html>
<head>
<title>${page_title|e}</title>
</head>
<body>
<div class="header">
<h1>${page_title|e}</h1>
</div>
<div class="table">
<table>
% for row in table
<tr>
% for cell in row
<td>${testmacro(cell)}</td>
% endfor
</tr>
% endfor
</table>
</div>
</body>
</html>\
"""
jinja_template = JinjaEnvironment(
line_statement_prefix='%',
variable_start_string="${",
variable_end_string="}"
).from_string(source)
print jinja_template.environment.compile(source, raw=True)
p = Profile()
p.runcall(lambda: jinja_template.render(context))
stats = Stats(p)
stats.sort_stats('time', 'calls')
stats.print_stats()
| bsd-3-clause |
bliz937/kivy | kivy/tools/extensions/make-kivyext.py | 49 | 6573 | #!/usr/bin/env python
"""
make-kivyext
~~~~~~~~~~~~~
Little helper script that helps creating new Kivy extensions.
To use it, just run it::
python make-kivyext.py
:copyright: (c) 2011: Adjusted by the Kivy Authors,
2010: Courtesy of Armin Ronacher
(Originally developed for flask.pocoo.org)
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import getpass
from datetime import datetime
from urllib.parse import quote
_sep_re = re.compile(r'[\s.,;_-]+')
FILE_HEADER_TEMPLATE = '''\
# -*- coding: utf-8 -*-
"""
%(module)s
%(moduledecor)s
Please describe your extension here...
:copyright: (c) %(year)s by %(name)s.
"""
'''
SETUP_PY_TEMPLATE = '''\
"""
%(name)s
%(namedecor)s
To create a Kivy *.kex extension file for this extension, run this file like
so::
python setup.py create_package
That will turn your current Kivy extension development folder into a *.kex Kivy
extension file that you can just drop in one of the extensions/ directories
supported by Kivy.
"""
from distutils.core import setup
from distutils.cmd import Command
import %(extname)s
long_desc = %(extname)s.__doc__
import os
from os.path import join
from shutil import copy
from subprocess import call
import sys
class PackageBuild(Command):
description = 'Create Extension Package'
user_options = []
def run(self):
# Call this file and make a distributable .zip file that has our desired
# folder structure
call([sys.executable, 'setup.py', 'install', '--root', 'output/',
'--install-lib', '/', '--install-platlib', '/', '--install-data',
'/%(extname)s/data', 'bdist', '--formats=zip'])
files = os.listdir('dist')
if not os.path.isdir('kexfiles'):
os.mkdir('kexfiles')
for file in files:
# Simply copy & replace...
copy(join('dist', file), join('kexfiles', file[:-3] + "kex"))
print('The extension files are now available in kexfiles/')
def initialize_options(self):
pass
def finalize_options(self):
pass
cmdclass = {'create_package': PackageBuild}
setup(
name='%(name)s',
version='0.1',
url='<enter URL here>',
license='<specify license here>',
author='%(author)s',
author_email='%(email)s',
description='<enter short description here>',
long_description=long_desc,
packages=['%(extname)s'],
cmdclass=cmdclass,
classifiers=[
# Add your own classifiers here
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
'''
def prompt(name, default=None):
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = input(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_bool(name, default=False):
while True:
rv = prompt(name + '?', default and 'Y' or 'N')
if not rv:
return default
if rv.lower() in ('y', 'yes', '1', 'on', 'true', 't'):
return True
elif rv.lower() in ('n', 'no', '0', 'off', 'false', 'f'):
return False
def prompt_choices(name, choices):
while True:
rv = prompt(name + '? - (%s)' % ', '.join(choices), choices[0])
rv = rv.lower()
if not rv:
return choices[0]
if rv in choices:
if rv == 'none':
return None
else:
return rv
def guess_package(name):
"""Guess the package name"""
words = [x.lower() for x in _sep_re.split(name)]
return '_'.join(words) or None
class Extension(object):
def __init__(self, name, shortname, author, email, output_folder):
self.name = name
self.shortname = shortname
self.author = author
self.email = email
self.output_folder = output_folder
def make_folder(self):
root = os.path.join(self.output_folder, self.shortname)
os.makedirs(root)
os.mkdir(os.path.join(root, 'data'))
def create_files(self):
decor = '~' * len(self.shortname)
with open(os.path.join(self.output_folder, self.shortname,
'__init__.py'), 'w') as f:
f.write(FILE_HEADER_TEMPLATE % dict(
module=self.shortname,
moduledecor=decor,
year=datetime.utcnow().year,
name=self.author,
))
with open(os.path.join(self.output_folder, 'setup.py'), 'w') as f:
f.write(SETUP_PY_TEMPLATE % dict(
name=self.name,
namedecor='~' * len(self.name),
urlname=quote(self.name),
author=self.author,
extname=self.shortname,
email=self.email,
))
def main():
if len(sys.argv) not in (1, 2):
print('usage: make-kivyext.py [output-folder]')
return
msg = 'Welcome to the Kivy Extension Creator Wizard'
print(msg)
print('~' * len(msg))
name = prompt('Extension Name (human readable)')
shortname = prompt('Extension Name (for filesystem)', guess_package(name))
author = prompt('Author', default=getpass.getuser())
email = prompt('EMail', default='')
output_folder = len(sys.argv) == 2 and sys.argv[1] or shortname + '-dev'
while 1:
folder = prompt('Output folder', default=output_folder)
if os.path.isfile(folder):
print('Error: output folder is a file')
elif os.path.isdir(folder) and os.listdir(folder):
if prompt_bool('Warning: output folder is not empty. Continue'):
break
else:
break
output_folder = os.path.abspath(folder)
ext = Extension(name, shortname, author, email, output_folder)
ext.make_folder()
ext.create_files()
msg = '''
Congratulations!
Your initial Kivy extension code skeleton has been created in:
%(output_folder)s
The next step is to look at the files that have been created and to
populate the placeholder values. Obviously you will also need to add the
actual extension code.
''' % dict(output_folder=output_folder)
print(msg)
if __name__ == '__main__':
main()
| mit |
PabloPiaggi/lammps | tools/i-pi/ipi/utils/nmtransform.py | 41 | 9470 | """Contains functions for doing the inverse and forward normal mode transforms.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
nm_trans: Uses matrix multiplication to do normal mode transformations.
nm_rescale: Uses matrix multiplication to do ring polymer contraction
or expansion.
nm_fft: Uses fast-Fourier transforms to do normal modes transformations.
Functions:
mk_nm_matrix: Makes a matrix to transform between the normal mode and bead
representations.
mk_rs_matrix: Makes a matrix to transform between one number of beads and
another. Higher normal modes in the case of an expansion are set to zero.
"""
__all__ = ['nm_trans', 'nm_rescale', 'nm_fft']
import numpy as np
from ipi.utils.messages import verbosity, info
def mk_nm_matrix(nbeads):
"""Gets the matrix that transforms from the bead representation
to the normal mode representation.
If we return from this function a matrix C, then we transform between the
bead and normal mode representation using q_nm = C . q_b, q_b = C.T . q_nm
Args:
nbeads: The number of beads.
"""
b2nm = np.zeros((nbeads,nbeads))
b2nm[0,:] = np.sqrt(1.0)
for j in range(nbeads):
for i in range(1, nbeads/2+1):
b2nm[i,j] = np.sqrt(2.0)*np.cos(2*np.pi*j*i/float(nbeads))
for i in range(nbeads/2+1, nbeads):
b2nm[i,j] = np.sqrt(2.0)*np.sin(2*np.pi*j*i/float(nbeads))
if (nbeads%2) == 0:
b2nm[nbeads/2,0:nbeads:2] = 1.0
b2nm[nbeads/2,1:nbeads:2] = -1.0
return b2nm/np.sqrt(nbeads)
def mk_rs_matrix(nb1, nb2):
"""Gets the matrix that transforms a path with nb1 beads into one with
nb2 beads.
If we return from this function a matrix T, then we transform between the
system with nb1 bead and the system of nb2 beads using q_2 = T . q_1
Args:
nb1: The initial number of beads.
nb2: The final number of beads.
"""
if (nb1 == nb2):
return np.identity(nb1,float)
elif (nb1 > nb2):
b1_nm = mk_nm_matrix(nb1)
nm_b2 = mk_nm_matrix(nb2).T
#builds the "reduction" matrix that picks the normal modes we want to keep
b1_b2 = np.zeros((nb2, nb1), float)
b1_b2[0,0] = 1.0
for i in range(1, nb2/2+1):
b1_b2[i,i] = 1.0
b1_b2[nb2-i, nb1-i] = 1.0
if (nb2 % 2 == 0):
#if we are contracting down to an even number of beads, then we have to
#pick just one of the last degenerate modes to match onto the single
#stiffest mode in the new path
b1_b2[nb2/2, nb1-nb2/2] = 0.0
rs_b1_b2 = np.dot(nm_b2, np.dot(b1_b2, b1_nm))
return rs_b1_b2*np.sqrt(float(nb2)/float(nb1))
else:
return mk_rs_matrix(nb2, nb1).T*(float(nb2)/float(nb1))
class nm_trans:
"""Helper class to perform beads <--> normal modes transformation.
Attributes:
_b2nm: The matrix to transform between the bead and normal mode
representations.
_nm2b: The matrix to transform between the normal mode and bead
representations.
"""
def __init__(self, nbeads):
"""Initializes nm_trans.
Args:
nbeads: The number of beads.
"""
self._b2nm = mk_nm_matrix(nbeads)
self._nm2b = self._b2nm.T
def b2nm(self, q):
"""Transforms a matrix to the normal mode representation.
Args:
q: A matrix with nbeads rows, in the bead representation.
"""
return np.dot(self._b2nm,q)
def nm2b(self, q):
"""Transforms a matrix to the bead representation.
Args:
q: A matrix with nbeads rows, in the normal mode representation.
"""
return np.dot(self._nm2b,q)
class nm_rescale:
"""Helper class to rescale a ring polymer between different number of beads.
Attributes:
_b1tob2: The matrix to transform between a ring polymer with 'nbeads1'
beads and another with 'nbeads2' beads.
_b2tob1: The matrix to transform between a ring polymer with 'nbeads2'
beads and another with 'nbeads1' beads.
"""
def __init__(self, nbeads1, nbeads2):
"""Initializes nm_rescale.
Args:
nbeads1: The initial number of beads.
nbeads2: The rescaled number of beads.
"""
self._b1tob2 = mk_rs_matrix(nbeads1,nbeads2)
self._b2tob1 = self._b1tob2.T*(float(nbeads1)/float(nbeads2))
def b1tob2(self, q):
"""Transforms a matrix from one value of beads to another.
Args:
q: A matrix with nbeads1 rows, in the bead representation.
"""
return np.dot(self._b1tob2,q)
def b2tob1(self, q):
"""Transforms a matrix from one value of beads to another.
Args:
q: A matrix with nbeads2 rows, in the bead representation.
"""
return np.dot(self._b2tob1,q)
class nm_fft:
"""Helper class to perform beads <--> normal modes transformation
using Fast Fourier transforms.
Attributes:
fft: The fast-Fourier transform function to transform between the
bead and normal mode representations.
ifft: The inverse fast-Fourier transform function to transform
between the normal mode and bead representations.
qdummy: A matrix to hold a copy of the bead positions to transform
them to the normal mode representation.
qnmdummy: A matrix to hold a copy of the normal modes to transform
them to the bead representation.
nbeads: The number of beads.
natoms: The number of atoms.
"""
def __init__(self, nbeads, natoms):
"""Initializes nm_trans.
Args:
nbeads: The number of beads.
natoms: The number of atoms.
"""
self.nbeads = nbeads
self.natoms = natoms
try:
import pyfftw
info("Import of PyFFTW successful", verbosity.medium)
self.qdummy = pyfftw.n_byte_align_empty((nbeads, 3*natoms), 16, 'float32')
self.qnmdummy = pyfftw.n_byte_align_empty((nbeads//2+1, 3*natoms), 16, 'complex64')
self.fft = pyfftw.FFTW(self.qdummy, self.qnmdummy, axes=(0,), direction='FFTW_FORWARD')
self.ifft = pyfftw.FFTW(self.qnmdummy, self.qdummy, axes=(0,), direction='FFTW_BACKWARD')
except ImportError: #Uses standard numpy fft library if nothing better
#is available
info("Import of PyFFTW unsuccessful, using NumPy library instead", verbosity.medium)
self.qdummy = np.zeros((nbeads,3*natoms), dtype='float32')
self.qnmdummy = np.zeros((nbeads//2+1,3*natoms), dtype='complex64')
def dummy_fft(self):
self.qnmdummy = np.fft.rfft(self.qdummy, axis=0)
def dummy_ifft(self):
self.qdummy = np.fft.irfft(self.qnmdummy, n=self.nbeads, axis=0)
self.fft = lambda: dummy_fft(self)
self.ifft = lambda: dummy_ifft(self)
def b2nm(self, q):
"""Transforms a matrix to the normal mode representation.
Args:
q: A matrix with nbeads rows and 3*natoms columns,
in the bead representation.
"""
if self.nbeads == 1:
return q
self.qdummy[:] = q
self.fft()
if self.nbeads == 2:
return self.qnmdummy.real/np.sqrt(self.nbeads)
nmodes = self.nbeads/2
self.qnmdummy /= np.sqrt(self.nbeads)
qnm = np.zeros(q.shape)
qnm[0,:] = self.qnmdummy[0,:].real
if self.nbeads % 2 == 0:
self.qnmdummy[1:-1,:] *= np.sqrt(2)
(qnm[1:nmodes,:], qnm[self.nbeads:nmodes:-1,:]) = (self.qnmdummy[1:-1,:].real, self.qnmdummy[1:-1,:].imag)
qnm[nmodes,:] = self.qnmdummy[nmodes,:].real
else:
self.qnmdummy[1:,:] *= np.sqrt(2)
(qnm[1:nmodes+1,:], qnm[self.nbeads:nmodes:-1,:]) = (self.qnmdummy[1:,:].real, self.qnmdummy[1:,:].imag)
return qnm
def nm2b(self, qnm):
"""Transforms a matrix to the bead representation.
Args:
qnm: A matrix with nbeads rows and 3*natoms columns,
in the normal mode representation.
"""
if self.nbeads == 1:
return qnm
if self.nbeads == 2:
self.qnmdummy[:] = qnm
self.ifft()
return self.qdummy*np.sqrt(self.nbeads)
nmodes = self.nbeads/2
odd = self.nbeads - 2*nmodes # 0 if even, 1 if odd
qnm_complex = np.zeros((nmodes+1, len(qnm[0,:])), complex)
qnm_complex[0,:] = qnm[0,:]
if not odd:
(qnm_complex[1:-1,:].real, qnm_complex[1:-1,:].imag) = (qnm[1:nmodes,:], qnm[self.nbeads:nmodes:-1,:])
qnm_complex[1:-1,:] /= np.sqrt(2)
qnm_complex[nmodes,:] = qnm[nmodes,:]
else:
(qnm_complex[1:,:].real, qnm_complex[1:,:].imag) = (qnm[1:nmodes+1,:], qnm[self.nbeads:nmodes:-1,:])
qnm_complex[1:,:] /= np.sqrt(2)
self.qnmdummy[:] = qnm_complex
self.ifft()
return self.qdummy*np.sqrt(self.nbeads)
| gpl-2.0 |
ptoraskar/django | django/contrib/gis/geos/linestring.py | 259 | 5843 | from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import (
GEOSGeometry, ProjectInterpolateMixin,
)
from django.contrib.gis.geos.point import Point
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class LineString(ProjectInterpolateMixin, GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
has_cs = True
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1:
coords = args[0]
else:
coords = args
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ncoords = len(coords)
if coords:
ndim = len(coords[0])
else:
raise TypeError('Cannot initialize on empty sequence.')
self._checkdim(ndim)
# Incrementing through each of the coordinates and verifying
for i in range(1, ncoords):
if not isinstance(coords[i], (tuple, list, Point)):
raise TypeError('each coordinate should be a sequence (list or tuple)')
if len(coords[i]) != ndim:
raise TypeError('Dimension mismatch.')
numpy_coords = False
elif numpy and isinstance(coords, numpy.ndarray):
shape = coords.shape # Using numpy's shape.
if len(shape) != 2:
raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ncoords = shape[0]
ndim = shape[1]
numpy_coords = True
else:
raise TypeError('Invalid initialization input for LineStrings.')
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3))
for i in range(ncoords):
if numpy_coords:
cs[i] = coords[i, :]
elif isinstance(coords[i], Point):
cs[i] = coords[i].tuple
else:
cs[i] = coords[i]
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid')
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3):
raise TypeError('Dimension mismatch.')
# #### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in range(len(self))]
if numpy:
return numpy.array(lst) # ARRRR!
else:
return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def merged(self):
"Returns the line merge of this LineString."
return self._topology(capi.geos_linemerge(self.ptr))
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz:
return None
else:
return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minLength = 4
_init_func = capi.create_linearring
| bsd-3-clause |
MuckRock/muckrock | muckrock/organization/tests/test_models.py | 1 | 8505 | """
Tests the models of the organization application
"""
# Django
from django.test import TestCase
# Standard Library
from datetime import date
# Third Party
from nose.tools import assert_false, assert_raises, assert_true, eq_
# MuckRock
from muckrock.core.factories import UserFactory
from muckrock.foia.exceptions import InsufficientRequestsError
from muckrock.organization.factories import (
EntitlementFactory,
FreeEntitlementFactory,
MembershipFactory,
OrganizationEntitlementFactory,
OrganizationFactory,
)
class TestOrganization(TestCase):
"""Tests for Organization methods"""
def test_has_member(self):
"""Test has_member method"""
org = OrganizationFactory()
users = UserFactory.create_batch(2)
MembershipFactory(user=users[0], organization=org)
assert_true(org.has_member(users[0]))
assert_false(org.has_member(users[1]))
def test_has_admin(self):
"""Test has_admin method"""
org = OrganizationFactory()
users = UserFactory.create_batch(2)
MembershipFactory(user=users[0], organization=org, admin=True)
MembershipFactory(user=users[1], organization=org, admin=False)
assert_true(org.has_admin(users[0]))
assert_false(org.has_admin(users[1]))
def test_make_requests(self):
"""Test Org make_requests method"""
org = OrganizationFactory(monthly_requests=10, number_requests=10)
request_count = org.make_requests(5)
org.refresh_from_db()
eq_(request_count, {"monthly": 5, "regular": 0})
eq_(org.monthly_requests, 5)
eq_(org.number_requests, 10)
request_count = org.make_requests(10)
org.refresh_from_db()
eq_(request_count, {"monthly": 5, "regular": 5})
eq_(org.monthly_requests, 0)
eq_(org.number_requests, 5)
request_count = org.make_requests(4)
org.refresh_from_db()
eq_(request_count, {"monthly": 0, "regular": 4})
eq_(org.monthly_requests, 0)
eq_(org.number_requests, 1)
with assert_raises(InsufficientRequestsError):
request_count = org.make_requests(2)
org.refresh_from_db()
eq_(org.monthly_requests, 0)
eq_(org.number_requests, 1)
def ent_json(entitlement, date_update):
"""Helper function for serializing entitlement data"""
return {
"name": entitlement.name,
"slug": entitlement.slug,
"description": entitlement.description,
"resources": entitlement.resources,
"date_update": date_update,
}
class TestSquareletUpdateData(TestCase):
"""Test cases for updating organization data from squarelet"""
def test_create_subscription(self):
"""Create a new subscription"""
ent = OrganizationEntitlementFactory()
organization = OrganizationFactory()
organization.update_data(
{
"name": organization.name,
"slug": organization.slug,
"individual": False,
"private": False,
"entitlements": [ent_json(ent, date(2019, 2, 21))],
"max_users": 5,
"card": "",
}
)
organization.refresh_from_db()
eq_(organization.requests_per_month, 50)
eq_(organization.monthly_requests, 50)
def test_cancel_subscription(self):
"""Cancel a subscription"""
ent = FreeEntitlementFactory()
organization = OrganizationFactory(
entitlement=OrganizationEntitlementFactory(),
date_update=date(2019, 2, 21),
requests_per_month=50,
monthly_requests=33,
)
organization.update_data(
{
"name": organization.name,
"slug": organization.slug,
"individual": False,
"private": False,
"entitlements": [ent_json(ent, None)],
"max_users": 5,
"card": "",
}
)
organization.refresh_from_db()
eq_(organization.requests_per_month, 0)
eq_(organization.monthly_requests, 0)
def test_upgrade_subscription(self):
"""Upgrade a subscription"""
ent = EntitlementFactory(
name="Plus", resources=dict(minimum_users=5, base_requests=100)
)
organization = OrganizationFactory(
entitlement=OrganizationEntitlementFactory(),
date_update=date(2019, 2, 21),
requests_per_month=50,
monthly_requests=33,
)
organization.update_data(
{
"name": organization.name,
"slug": organization.slug,
"individual": False,
"private": False,
"entitlements": [ent_json(ent, date(2019, 2, 21))],
"max_users": 5,
"card": "",
}
)
organization.refresh_from_db()
eq_(organization.requests_per_month, 100)
eq_(organization.monthly_requests, 83)
def test_downgrade_subscription(self):
"""Downgrade a subscription"""
# Downgrades only happen at monthly restore
ent = OrganizationEntitlementFactory()
plus = EntitlementFactory(
name="Plus", resources=dict(minimum_users=5, base_requests=100)
)
organization = OrganizationFactory(
entitlement=plus,
date_update=date(2019, 2, 21),
requests_per_month=100,
monthly_requests=83,
)
organization.update_data(
{
"name": organization.name,
"slug": organization.slug,
"individual": False,
"private": False,
"entitlements": [ent_json(ent, date(2019, 3, 21))],
"max_users": 5,
"card": "",
}
)
organization.refresh_from_db()
eq_(organization.requests_per_month, 50)
eq_(organization.monthly_requests, 50)
def test_increase_max_users(self):
"""Increase max users"""
ent = OrganizationEntitlementFactory()
organization = OrganizationFactory(
entitlement=ent,
date_update=date(2019, 2, 21),
requests_per_month=50,
monthly_requests=33,
)
organization.update_data(
{
"name": organization.name,
"slug": organization.slug,
"individual": False,
"private": False,
"entitlements": [ent_json(ent, date(2019, 2, 21))],
"max_users": 9,
"card": "",
}
)
organization.refresh_from_db()
eq_(organization.requests_per_month, 70)
eq_(organization.monthly_requests, 53)
def test_decrease_max_users(self):
"""Decrease max users"""
ent = OrganizationEntitlementFactory()
organization = OrganizationFactory(
entitlement=ent,
date_update=date(2019, 2, 21),
requests_per_month=75,
monthly_requests=33,
)
organization.update_data(
{
"name": organization.name,
"slug": organization.slug,
"individual": False,
"private": False,
"entitlements": [ent_json(ent, date(2019, 2, 21))],
"max_users": 7,
"card": "",
}
)
organization.refresh_from_db()
eq_(organization.requests_per_month, 60)
eq_(organization.monthly_requests, 33)
def test_monthly_restore(self):
"""Monthly restore"""
ent = OrganizationEntitlementFactory()
organization = OrganizationFactory(
entitlement=ent,
date_update=date(2019, 2, 21),
requests_per_month=50,
monthly_requests=33,
)
organization.update_data(
{
"name": organization.name,
"slug": organization.slug,
"individual": False,
"private": False,
"entitlements": [ent_json(ent, date(2019, 3, 21))],
"max_users": 5,
"card": "",
}
)
organization.refresh_from_db()
eq_(organization.requests_per_month, 50)
eq_(organization.monthly_requests, 50)
| agpl-3.0 |
Grokzen/redisco | redisco/containers.py | 1 | 34087 | # -*- coding: utf-8 -*-
# doctest: +ELLIPSIS
import collections
from . import default_expire_time
def _parse_values(values):
(_values,) = values if len(values) == 1 else (None,)
if _values and type(_values) == type([]):
return _values
return values
class Container(object):
"""
Base class for all containers. This class should not
be used and does not provide anything except the ``db``
member.
:members:
"""
def __init__(self, key, db=None, pipeline=None):
self._db = db
self.key = key
self.pipeline = pipeline
def clear(self):
"""
Remove the container from the redis storage
>>> s = Set('test')
>>> s.add('1')
1
>>> s.clear()
>>> s.members
set()
"""
del self.db[self.key]
def set_expire(self, time=None):
"""
Allow the key to expire after ``time`` seconds.
>>> s = Set("test")
>>> s.add("1")
1
>>> s.set_expire(1)
>>> # from time import sleep
>>> # sleep(1)
>>> # s.members
# set([])
>>> s.clear()
:param time: time expressed in seconds. If time is not specified, then ``default_expire_time`` will be used.
:rtype: None
"""
if time is None:
time = default_expire_time
self.db.expire(self.key, time)
@property
def db(self):
if self.pipeline is not None:
return self.pipeline
if self._db is not None:
return self._db
if hasattr(self, 'db_cache') and self.db_cache:
return self.db_cache
else:
from redisco import connection
self.db_cache = connection
return self.db_cache
class Set(Container):
"""
.. default-domain:: set
This class represent a Set in redis.
"""
def __repr__(self):
return "<%s '%s' %s>" % (self.__class__.__name__, self.key,
self.members)
def sadd(self, *values):
"""
Add the specified members to the Set.
:param values: a list of values or a simple value.
:rtype: integer representing the number of value added to the set.
>>> s = Set("test")
>>> s.clear()
>>> s.add(["1", "2", "3"])
3
>>> s.add(["4"])
1
>>> import testfixtures
>>> testfixtures.compare(s.members, {'4', '3', '2', '1'})
<identity>
>>> s.clear()
"""
return self.db.sadd(self.key, *_parse_values(values))
def srem(self, *values):
"""
Remove the values from the Set if they are present.
:param values: a list of values or a simple value.
:rtype: boolean indicating if the values have been removed.
>>> s = Set("test")
>>> s.add(["1", "2", "3"])
3
>>> s.srem(["1", "3"])
2
>>> s.clear()
"""
return self.db.srem(self.key, *_parse_values(values))
def spop(self):
"""
Remove and return (pop) a random element from the Set.
:rtype: String representing the value poped.
>>> s = Set("test")
>>> s.add("1")
1
>>> s.spop()
'1'
>>> s.members
set()
"""
return self.db.spop(self.key)
#def __repr__(self):
# return "<%s '%s' %s>" % (self.__class__.__name__, self.key,
# self.members)
def isdisjoint(self, other):
"""
Return True if the set has no elements in common with other.
:param other: another ``Set``
:rtype: boolean
>>> s1 = Set("key1")
>>> s2 = Set("key2")
>>> s1.add(['a', 'b', 'c'])
3
>>> s2.add(['c', 'd', 'e'])
3
>>> s1.isdisjoint(s2)
False
>>> s1.clear()
>>> s2.clear()
"""
return not bool(self.db.sinter([self.key, other.key]))
def issubset(self, other_set):
"""
Test whether every element in the set is in other.
:param other_set: another ``Set`` to compare to.
>>> s1 = Set("key1")
>>> s2 = Set("key2")
>>> s1.add(['a', 'b', 'c'])
3
>>> s2.add('b')
1
>>> s2.issubset(s1)
True
>>> s1.clear()
>>> s2.clear()
"""
return self <= other_set
def __le__(self, other_set):
return self.db.sinter([self.key, other_set.key]) == self.all()
def __lt__(self, other_set):
"""Test whether the set is a true subset of other."""
return self <= other_set and self != other_set
def __eq__(self, other_set):
"""
Test equality of:
1. keys
2. members
"""
if other_set.key == self.key:
return True
slen, olen = len(self), len(other_set)
if olen == slen:
return self.members == other_set.members
else:
return False
def __ne__(self, other_set):
return not self.__eq__(other_set)
def issuperset(self, other_set):
"""
Test whether every element in other is in the set.
:param other_set: another ``Set`` to compare to.
>>> s1 = Set("key1")
>>> s2 = Set("key2")
>>> s1.add(['a', 'b', 'c'])
3
>>> s2.add('b')
1
>>> s1.issuperset(s2)
True
>>> s1.clear()
>>> s2.clear()
"""
return self >= other_set
def __ge__(self, other_set):
"""Test whether every element in other is in the set."""
return self.db.sinter([self.key, other_set.key]) == other_set.all()
def __gt__(self, other_set):
"""Test whether the set is a true superset of other."""
return self >= other_set and self != other_set
# SET Operations
def union(self, key, *other_sets):
"""
Return a new ``Set`` representing the union of *n* sets.
:param key: String representing the key where to store the result (the union)
:param other_sets: list of other ``Set``.
:rtype: ``Set``
>>> s1 = Set('key1')
>>> s2 = Set('key2')
>>> s1.add(['a', 'b', 'c'])
3
>>> s2.add(['d', 'e'])
2
>>> s3 = s1.union('key3', s2)
>>> s3.key
'key3'
>>> import testfixtures
>>> testfixtures.compare(s3.members, {'a', 'c', 'b', 'e', 'd'})
<identity>
>>> s1.clear()
>>> s2.clear()
>>> s3.clear()
"""
if not isinstance(key, str):
raise ValueError("Expect a string as key")
key = str(key)
self.db.sunionstore(key, [self.key] + [o.key for o in other_sets])
return Set(key)
def intersection(self, key, *other_sets):
"""
Return a new ``Set`` representing the intersection of *n* sets.
:param key: String representing the key where to store the result (the union)
:param other_sets: list of other ``Set``.
:rtype: Set
>>> s1 = Set('key1')
>>> s2 = Set('key2')
>>> s1.add(['a', 'b', 'c'])
3
>>> s2.add(['c', 'e'])
2
>>> s3 = s1.intersection('key3', s2)
>>> s3.key
'key3'
>>> s3.members
{'c'}
>>> s1.clear()
>>> s2.clear()
>>> s3.clear()
"""
if not isinstance(key, str):
raise ValueError("Expect a string as key")
key = str(key)
self.db.sinterstore(key, [self.key] + [o.key for o in other_sets])
return Set(key)
def difference(self, key, *other_sets):
"""
Return a new ``Set`` representing the difference of *n* sets.
:param key: String representing the key where to store the result (the union)
:param other_sets: list of other ``Set``.
:rtype: Set
>>> s1 = Set('key1')
>>> s2 = Set('key2')
>>> s1.add(['a', 'b', 'c'])
3
>>> s2.add(['c', 'e'])
2
>>> s3 = s1.difference('key3', s2)
>>> s3.key
'key3'
>>> import testfixtures
>>> testfixtures.compare(s3.members, {'a', 'b'})
<identity>
>>> s1.clear()
>>> s2.clear()
>>> s3.clear()
"""
if not isinstance(key, str):
raise ValueError("Expect a string as key")
key = str(key)
self.db.sdiffstore(key, [self.key] + [o.key for o in other_sets])
return Set(key)
def update(self, *other_sets):
"""Update the set, adding elements from all other_sets.
:param other_sets: list of ``Set``
:rtype: None
"""
self.db.sunionstore(self.key, [self.key] + [o.key for o in other_sets])
def __ior__(self, other_set):
self.db.sunionstore(self.key, [self.key, other_set.key])
return self
def intersection_update(self, *other_sets):
"""
Update the set, keeping only elements found in it and all other_sets.
:param other_sets: list of ``Set``
:rtype: None
"""
self.db.sinterstore(self.key, [o.key for o in [self.key] + other_sets])
def __iand__(self, other_set):
self.db.sinterstore(self.key, [self.key, other_set.key])
return self
def difference_update(self, *other_sets):
"""
Update the set, removing elements found in others.
:param other_sets: list of ``Set``
:rtype: None
"""
self.db.sdiffstore(self.key, [o.key for o in [self.key] + other_sets])
def __isub__(self, other_set):
self.db.sdiffstore(self.key, [self.key, other_set.key])
return self
def all(self):
return self.db.smembers(self.key)
members = property(all)
"""
return the real content of the Set.
"""
def copy(self, key):
"""
Copy the set to another key and return the new Set.
.. WARNING::
If the new key already contains a value, it will be overwritten.
"""
copy = Set(key=key, db=self.db)
copy.clear()
copy |= self
return copy
def __iter__(self):
return self.members.__iter__()
def sinter(self, *other_sets):
"""
Performs an intersection between Sets and return the *RAW* result.
.. NOTE::
This function return an actual ``set`` object (from python) and not a ``Set``. See func:``intersection``.
"""
return self.db.sinter([self.key] + [s.key for s in other_sets])
def sunion(self, *other_sets):
"""
Performs a union between two sets and returns the *RAW* result.
.. NOTE::
This function return an actual ``set`` object (from python) and not a ``Set``.
"""
return self.db.sunion([self.key] + [s.key for s in other_sets])
def sdiff(self, *other_sets):
"""
Performs a difference between two sets and returns the *RAW* result.
.. NOTE::
This function return an actual ``set`` object (from python) and not a ``Set``.
See function difference.
"""
return self.db.sdiff([self.key] + [s.key for s in other_sets])
def scard(self):
"""
Returns the cardinality of the Set.
:rtype: String containing the cardinality.
"""
return self.db.scard(self.key)
def sismember(self, value):
"""
Return ``True`` if the provided value is in the ``Set``.
"""
return self.db.sismember(self.key, value)
def srandmember(self):
"""
Return a random member of the set.
>>> s = Set("test")
>>> s.add(['a', 'b', 'c'])
3
>>> s.srandmember() # doctest: +ELLIPSIS
'...'
>>> # 'a', 'b' or 'c'
"""
return self.db.srandmember(self.key)
add = sadd
"""see sadd"""
pop = spop
"""see spop"""
remove = srem
"""see srem"""
__contains__ = sismember
__len__ = scard
class List(Container):
"""
This class represent a list object as seen in redis.
"""
def all(self):
"""
Returns all items in the list.
"""
return self.lrange(0, -1)
members = property(all)
"""Return all items in the list."""
def llen(self):
"""
Returns the length of the list.
"""
return self.db.llen(self.key)
__len__ = llen
def __getitem__(self, index):
if isinstance(index, int):
return self.lindex(index)
elif isinstance(index, slice):
indices = index.indices(len(self))
return self.lrange(indices[0], indices[1] - 1)
else:
raise TypeError
def __setitem__(self, index, value):
self.lset(index, value)
def lrange(self, start, stop):
"""
Returns a range of items.
:param start: integer representing the start index of the range
:param stop: integer representing the size of the list.
>>> l = List("test")
>>> l.push(['a', 'b', 'c', 'd'])
4
>>> l.lrange(1, 2)
['b', 'c']
>>> l.clear()
"""
return self.db.lrange(self.key, start, stop)
def lpush(self, *values):
"""
Push the value into the list from the *left* side
:param values: a list of values or single value to push
:rtype: long representing the number of values pushed.
>>> l = List("test")
>>> l.lpush(['a', 'b'])
2
>>> l.clear()
"""
return self.db.lpush(self.key, *_parse_values(values))
def rpush(self, *values):
"""
Push the value into the list from the *right* side
:param values: a list of values or single value to push
:rtype: long representing the size of the list.
>>> l = List("test")
>>> l.lpush(['a', 'b'])
2
>>> l.rpush(['c', 'd'])
4
>>> l.members
['b', 'a', 'c', 'd']
>>> l.clear()
"""
return self.db.rpush(self.key, *_parse_values(values))
def extend(self, iterable):
"""
Extend list by appending elements from the iterable.
:param iterable: an iterable objects.
"""
self.rpush(*[e for e in iterable])
def count(self, value):
"""
Return number of occurrences of value.
:param value: a value tha *may* be contained in the list
"""
return self.members.count(value)
def lpop(self):
"""
Pop the first object from the left.
:return: the popped value.
"""
return self.db.lpop(self.key)
def rpop(self):
"""
Pop the first object from the right.
:return: the popped value.
"""
return self.db.rpop(self.key)
def rpoplpush(self, key):
"""
Remove an element from the list,
atomically add it to the head of the list indicated by key
:param key: the key of the list receiving the popped value.
:return: the popped (and pushed) value
>>> l = List('list1')
>>> l.push(['a', 'b', 'c'])
3
>>> l.rpoplpush('list2')
'c'
>>> l2 = List('list2')
>>> l2.members
['c']
>>> l.clear()
>>> l2.clear()
"""
return self.db.rpoplpush(self.key, key)
def lrem(self, value, num=1):
"""
Remove first occurrence of value.
:return: 1 if the value has been removed, 0 otherwise
"""
return self.db.lrem(self.key, num, value)
def reverse(self):
"""
Reverse the list in place.
:return: None
"""
r = self[:]
r.reverse()
self.clear()
self.extend(r)
def copy(self, key):
"""Copy the list to a new list.
..WARNING:
If destination key already contains a value, it clears it before copying.
"""
copy = List(key, self.db)
copy.clear()
copy.extend(self)
return copy
def ltrim(self, start, end):
"""
Trim the list from start to end.
:return: None
"""
return self.db.ltrim(self.key, start, end)
def lindex(self, idx):
"""
Return the value at the index *idx*
:param idx: the index to fetch the value.
:return: the value or None if out of range.
"""
return self.db.lindex(self.key, idx)
def lset(self, idx, value=0):
"""
Set the value in the list at index *idx*
:return: True is the operation succeed.
>>> l = List('test')
>>> l.push(['a', 'b', 'c'])
3
>>> l.lset(0, 'e')
True
>>> l.members
['e', 'b', 'c']
>>> l.clear()
"""
return self.db.lset(self.key, idx, value)
def __iter__(self):
return self.members.__iter__()
def __repr__(self):
return "<%s '%s' %s>" % (self.__class__.__name__, self.key, self.members)
__len__ = llen
remove = lrem
trim = ltrim
shift = lpop
unshift = lpush
pop = rpop
pop_onto = rpoplpush
push = rpush
append = rpush
class TypedList(object):
"""Create a container to store a list of objects in Redis.
Arguments:
key -- the Redis key this container is stored at
target_type -- can be a Python object or a redisco model class.
Optional Arguments:
type_args -- additional args to pass to type constructor (tuple)
type_kwargs -- additional kwargs to pass to type constructor (dict)
If target_type is not a redisco model class, the target_type should
also a callable that casts the (string) value of a list element into
target_type. E.g. str, int, float -- using this format:
target_type(string_val_of_list_elem, *type_args, **type_kwargs)
target_type also accepts a string that refers to a redisco model.
"""
def __init__(self, key, target_type, type_args=[], type_kwargs={}, **kwargs):
self.list = List(key, **kwargs)
self.klass = self.value_type(target_type)
self._klass_args = type_args
self._klass_kwargs = type_kwargs
from .models.base import Model
self._redisco_model = issubclass(self.klass, Model)
def value_type(self, target_type):
if isinstance(target_type, str):
t = target_type
from .models.base import get_model_from_key
target_type = get_model_from_key(target_type)
if target_type is None:
raise ValueError("Unknown Redisco class %s" % t)
return target_type
def typecast_item(self, value):
if self._redisco_model:
return self.klass.objects.get_by_id(value)
else:
return self.klass(value, *self._klass_args, **self._klass_kwargs)
def typecast_iter(self, values):
if self._redisco_model:
return [o for o in [self.klass.objects.get_by_id(v) for v in values] if o is not None]
else:
return [self.klass(v, *self._klass_args, **self._klass_kwargs) for v in values]
def all(self):
"""Returns all items in the list."""
return self.typecast_iter(self.list.all())
def __len__(self):
return len(self.list)
def __getitem__(self, index):
val = self.list[index]
if isinstance(index, slice):
return self.typecast_iter(val)
else:
return self.typecast_item(val)
def typecast_stor(self, value):
if self._redisco_model:
return value.id
else:
return value
def append(self, value):
self.list.append(self.typecast_stor(value))
def extend(self, iter):
self.list.extend(map(lambda i: self.typecast_stor(i), iter))
def __setitem__(self, index, value):
self.list[index] = self.typecast_stor(value)
def __iter__(self):
for i in range(len(self.list)):
yield self[i]
def __repr__(self):
return repr(self.typecast_iter(self.list))
class SortedSet(Container):
"""
This class represents a SortedSet in redis.
Use it if you want to arrange your set in any order.
"""
def __getitem__(self, index):
if isinstance(index, slice):
return self.zrange(index.start, index.stop)
else:
return self.zrange(index, index)[0]
def score(self, member):
"""
Returns the score of member.
"""
return self.zscore(member)
def __contains__(self, val):
return self.zscore(val) is not None
@property
def members(self):
"""
Returns the members of the set.
"""
return self.zrange(0, -1)
@property
def revmembers(self):
"""
Returns the members of the set in reverse.
"""
return self.zrevrange(0, -1)
def __iter__(self):
return self.members.__iter__()
def __reversed__(self):
return self.revmembers.__iter__()
# def __repr__(self):
# return "<%s '%s' %s>" % (self.__class__.__name__, self.key,
# self.members)
@property
def _min_score(self):
"""
Returns the minimum score in the SortedSet.
"""
try:
return self.zscore(self.__getitem__(0))
except IndexError:
return None
@property
def _max_score(self):
"""
Returns the maximum score in the SortedSet.
"""
try:
self.zscore(self.__getitem__(-1))
except IndexError:
return None
def lt(self, v, limit=None, offset=None):
"""
Returns the list of the members of the set that have scores
less than v.
:param v: the score to compare to.
:param limit: limit the result to *n* elements
:param offset: Skip the first *n* elements
"""
if limit is not None and offset is None:
offset = 0
return self.zrangebyscore("-inf", "(%f" % v,
start=offset, num=limit)
def le(self, v, limit=None, offset=None):
"""
Returns the list of the members of the set that have scores
less than or equal to v.
:param v: the score to compare to.
:param limit: limit the result to *n* elements
:param offset: Skip the first *n* elements
"""
if limit is not None and offset is None:
offset = 0
return self.zrangebyscore("-inf", v,
start=offset, num=limit)
def gt(self, v, limit=None, offset=None, withscores=False):
"""Returns the list of the members of the set that have scores
greater than v.
"""
if limit is not None and offset is None:
offset = 0
return self.zrangebyscore("(%f" % v, "+inf",
start=offset, num=limit, withscores=withscores)
def ge(self, v, limit=None, offset=None, withscores=False):
"""Returns the list of the members of the set that have scores
greater than or equal to v.
:param v: the score to compare to.
:param limit: limit the result to *n* elements
:param offset: Skip the first *n* elements
"""
if limit is not None and offset is None:
offset = 0
return self.zrangebyscore("%f" % v, "+inf",
start=offset, num=limit, withscores=withscores)
def between(self, min, max, limit=None, offset=None):
"""
Returns the list of the members of the set that have scores
between min and max.
.. Note::
The min and max are inclusive when comparing the values.
:param min: the minimum score to compare to.
:param max: the maximum score to compare to.
:param limit: limit the result to *n* elements
:param offset: Skip the first *n* elements
>>> s = SortedSet("foo")
>>> s.add('a', 10)
1
>>> s.add('b', 20)
1
>>> s.add('c', 30)
1
>>> s.between(20, 30)
['b', 'c']
>>> s.clear()
"""
if limit is not None and offset is None:
offset = 0
return self.zrangebyscore(min, max,
start=offset, num=limit)
def zadd(self, members, score=1):
"""
Add members in the set and assign them the score.
:param members: a list of item or a single item
:param score: the score the assign to the item(s)
>>> s = SortedSet("foo")
>>> s.add('a', 10)
1
>>> s.add('b', 20)
1
>>> s.clear()
"""
_members = []
if not isinstance(members, dict):
_members = [score, members]
else:
for member, score in members.items():
_members += [score, member]
return self.db.zadd(self.key, *_members)
def zrem(self, *values):
"""
Remove the values from the SortedSet
:return: True if **at least one** value is successfully
removed, False otherwise
>>> s = SortedSet('foo')
>>> s.add('a', 10)
1
>>> s.zrem('a')
1
>>> s.members
[]
>>> s.clear()
"""
return self.db.zrem(self.key, *_parse_values(values))
def zincrby(self, att, value=1):
"""
Increment the score of the item by ``value``
:param att: the member to increment
:param value: the value to add to the current score
:returns: the new score of the member
>>> s = SortedSet("foo")
>>> s.add('a', 10)
1
>>> s.zincrby("a", 10)
20.0
>>> s.clear()
"""
return self.db.zincrby(self.key, att, value)
def zrevrank(self, member):
"""
Returns the ranking in reverse order for the member
>>> s = SortedSet("foo")
>>> s.add('a', 10)
1
>>> s.add('b', 20)
1
>>> s.revrank('a')
1
>>> s.clear()
"""
return self.db.zrevrank(self.key, member)
def zrange(self, start, stop, withscores=False):
"""
Returns all the elements including between ``start`` (non included) and
``stop`` (included).
:param withscore: True if the score of the elements should
also be returned
>>> s = SortedSet("foo")
>>> s.add('a', 10)
1
>>> s.add('b', 20)
1
>>> s.add('c', 30)
1
>>> s.zrange(1, 3)
['b', 'c']
>>> s.zrange(1, 3, withscores=True)
[('b', 20.0), ('c', 30.0)]
>>> s.clear()
"""
return self.db.zrange(self.key, start, stop, withscores=withscores)
def zrevrange(self, start, end, **kwargs):
"""
Returns the range of items included between ``start`` and ``stop``
in reverse order (from high to low)
>>> s = SortedSet("foo")
>>> s.add('a', 10)
1
>>> s.add('b', 20)
1
>>> s.add('c', 30)
1
>>> s.zrevrange(1, 2)
['b', 'a']
>>> s.clear()
"""
return self.db.zrevrange(self.key, start, end, **kwargs)
def zrangebyscore(self, min, max, **kwargs):
"""
Returns the range of elements included between the scores (min and max)
>>> s = SortedSet("foo")
>>> s.add('a', 10)
1
>>> s.add('b', 20)
1
>>> s.add('c', 30)
1
>>> s.zrangebyscore(20, 30)
['b', 'c']
>>> s.clear()
"""
return self.db.zrangebyscore(self.key, min, max, **kwargs)
def zrevrangebyscore(self, max, min, **kwargs):
"""
Returns the range of elements included between the scores (min and max)
>>> s = SortedSet("foo")
>>> s.add('a', 10)
1
>>> s.add('b', 20)
1
>>> s.add('c', 30)
1
>>> s.zrangebyscore(20, 20)
['b']
>>> s.clear()
"""
return self.db.zrevrangebyscore(self.key, max, min, **kwargs)
def zcard(self):
"""
Returns the cardinality of the SortedSet.
>>> s = SortedSet("foo")
>>> s.add("a", 1)
1
>>> s.add("b", 2)
1
>>> s.add("c", 3)
1
>>> s.zcard()
3
>>> s.clear()
"""
return self.db.zcard(self.key)
def zscore(self, elem):
"""
Return the score of an element
>>> s = SortedSet("foo")
>>> s.add("a", 10)
1
>>> s.score("a")
10.0
>>> s.clear()
"""
return self.db.zscore(self.key, elem)
def zremrangebyrank(self, start, stop):
"""
Remove a range of element between the rank ``start`` and
``stop`` both included.
:return: the number of item deleted
>>> s = SortedSet("foo")
>>> s.add("a", 10)
1
>>> s.add("b", 20)
1
>>> s.add("c", 30)
1
>>> s.zremrangebyrank(1, 2)
2
>>> s.members
['a']
>>> s.clear()
"""
return self.db.zremrangebyrank(self.key, start, stop)
def zremrangebyscore(self, min_value, max_value):
"""
Remove a range of element by between score ``min_value`` and
``max_value`` both included.
:returns: the number of items deleted.
>>> s = SortedSet("foo")
>>> s.add("a", 10)
1
>>> s.add("b", 20)
1
>>> s.add("c", 30)
1
>>> s.zremrangebyscore(10, 20)
2
>>> s.members
['c']
>>> s.clear()
"""
return self.db.zremrangebyscore(self.key, min_value, max_value)
def zrank(self, elem):
"""
Returns the rank of the element.
>>> s = SortedSet("foo")
>>> s.add("a", 10)
1
>>> s.zrank("a")
0
>>> s.clear()
"""
return self.db.zrank(self.key, elem)
def eq(self, value):
"""
Returns the elements that have ``value`` for score.
"""
return self.zrangebyscore(value, value)
__len__ = zcard
revrank = zrevrank
score = zscore
rank = zrank
incr_by = zincrby
add = zadd
remove = zrem
class NonPersistentList(object):
def __init__(self, l):
self._list = l
@property
def members(self):
return self._list
def __iter__(self):
return iter(self.members)
def __len__(self):
return len(self._list)
class Hash(Container, collections.MutableMapping):
def __iter__(self):
return self.hgetall().__iter__()
def __repr__(self):
return "<%s '%s' %s>" % (self.__class__.__name__,
self.key, self.hgetall())
def _set_dict(self, new_dict):
self.clear()
self.update(new_dict)
def hlen(self):
"""
Returns the number of elements in the Hash.
"""
return self.db.hlen(self.key)
def hset(self, member, value):
"""
Set ``member`` in the Hash at ``value``.
:returns: 1 if member is a new field and the value has been
stored, 0 if the field existed and the value has been
updated.
>>> h = Hash("foo")
>>> h.hset("bar", "value")
1
>>> h.clear()
"""
return self.db.hset(self.key, member, value)
def hdel(self, *members):
"""
Delete one or more hash field.
:param members: on or more fields to remove.
:return: the number of fields that were removed
>>> h = Hash("foo")
>>> h.hset("bar", "value")
1
>>> h.hdel("bar")
1
>>> h.clear()
"""
return self.db.hdel(self.key, *_parse_values(members))
def hkeys(self):
"""
Returns all fields name in the Hash
"""
return self.db.hkeys(self.key)
def hgetall(self):
"""
Returns all the fields and values in the Hash.
:rtype: dict
"""
return self.db.hgetall(self.key)
def hvals(self):
"""
Returns all the values in the Hash
:rtype: list
"""
return self.db.hvals(self.key)
def hget(self, field):
"""
Returns the value stored in the field, None if the field doesn't exist.
"""
return self.db.hget(self.key, field)
def hexists(self, field):
"""
Returns ``True`` if the field exists, ``False`` otherwise.
"""
return self.db.hexists(self.key, field)
def hincrby(self, field, increment=1):
"""
Increment the value of the field.
:returns: the value of the field after incrementation
>>> h = Hash("foo")
>>> h.hincrby("bar", 10)
10
>>> h.hincrby("bar", 2)
12
>>> h.clear()
"""
return self.db.hincrby(self.key, field, increment)
def hmget(self, fields):
"""
Returns the values stored in the fields.
"""
return self.db.hmget(self.key, fields)
def hmset(self, mapping):
"""
Sets or updates the fields with their corresponding values.
:param mapping: a dict with keys and values
"""
return self.db.hmset(self.key, mapping)
keys = hkeys
values = hvals
_get_dict = hgetall
__getitem__ = hget
__setitem__ = hset
__delitem__ = hdel
__len__ = hlen
__contains__ = hexists
dict = property(_get_dict, _set_dict)
| mit |
johancz/olympia | apps/addons/management/commands/personas_fix_mojibake.py | 13 | 5793 | from getpass import getpass
from optparse import make_option
from time import time
from django.core.management.base import BaseCommand
from django.db import connection as django_connection, transaction
import MySQLdb as mysql
def debake(s):
for c in s:
try:
yield c.encode('windows-1252')
except UnicodeEncodeError:
yield c.encode('latin-1')
class Command(BaseCommand):
"""
Consult the personas database to find and fix mangled descriptions.
`host`: the host of the personas database
`database`: the personas database, eg: personas
`commit`: if yes, actually commit the transaction, for any other value, it
aborts the transaction at the end.
`users`: migrate user accounts?
`favorites`: migrate favorites for users?
"""
option_list = BaseCommand.option_list + (
make_option('--host', action='store',
dest='host', help='The host of MySQL'),
make_option('--db', action='store',
dest='db', help='The database in MySQL'),
make_option('--user', action='store',
dest='user', help='The database user'),
make_option('--commit', action='store',
dest='commit', help='If yes, then commits the run'),
make_option('--start', action='store', type="int",
dest='start', help='An optional offset to start at'),
)
def log(self, msg):
print msg
def commit_or_not(self, gogo):
if gogo == 'yes':
self.log('Committing changes.')
transaction.commit()
else:
self.log('Not committing changes, this is a dry run.')
transaction.rollback()
def connect(self, **options):
options = dict([(k, v) for k, v in options.items() if k in
['host', 'db', 'user'] and v])
options['passwd'] = getpass('MySQL Password: ')
options['charset'] = 'latin1'
options['use_unicode'] = False
if options['host'][0] == '/':
options['unix_socket'] = options['host']
del options['host']
self.connection = mysql.connect(**options)
self.cursor = self.connection.cursor()
self.cursor_z = django_connection.cursor()
def do_fix(self, offset, limit, **options):
self.log('Processing themes %s to %s' % (offset, offset + limit))
ids = []
descs = []
for theme in self.get_themes(limit, offset):
if max(theme[1]) > u'\x7f':
try:
descs.append(''.join(debake(theme[1])))
ids.append(theme[0])
except UnicodeEncodeError:
# probably already done?
print "skipped", theme[0]
else:
print "clean", theme[0]
if ids:
targets = self.find_needed_fixes(ids, descs)
self.fix_descs(targets)
def find_needed_fixes(self, ids, descs):
original_descs = self.get_original_descs(ids)
for id, d, original_d in zip(ids, descs, original_descs):
if d == original_d:
yield id, d
def get_original_descs(self, ids):
qs = ', '.join(['%s'] * len(ids))
self.cursor.execute(
"SELECT description from personas where id in (%s)" % qs, ids)
return (x[0] for x in self.cursor.fetchall())
def fix_descs(self, targets):
for id, desc in targets:
try:
desc.decode('utf-8')
print "FIX", id
except UnicodeDecodeError:
print "SKIPPED", id
continue
self.cursor_z.execute(
'UPDATE translations AS t, personas AS p '
'SET t.localized_string = %s, '
't.localized_string_clean = NULL '
'WHERE t.id = p.description AND p.persona_id = %s', [desc, id])
def count_themes(self):
self.cursor_z.execute('SELECT count(persona_id) from personas')
return self.cursor_z.fetchone()[0]
def get_themes(self, limit, offset):
self.cursor_z.execute(
'SELECT p.persona_id, t.localized_string from personas as p, '
'translations as t where t.id = p.description and '
't.localized_string != "" LIMIT %s OFFSET %s' % (limit, offset))
return self.cursor_z.fetchall()
@transaction.commit_manually
def handle(self, *args, **options):
t_total_start = time()
self.connect(**options)
self.log(
"Fixing mojibake in theme descriptions. Think these mangled "
"strings are bad? Have a look at "
"https://en.wikipedia.org/wiki/File:Letter_to_Russia"
"_with_krokozyabry.jpg")
try:
count = self.count_themes()
self.log("Found %s themes. Hope you're not in a hurry" % count)
step = 2500
start = options.get('start', 0)
self.log("Starting at offset: %s" % start)
for offset in range(start, count, step):
t_start = time()
self.do_fix(offset, step, **options)
self.commit_or_not(options.get('commit'))
t_average = 1 / ((time() - t_total_start) /
(offset - start + step))
print "> %.2fs for %s themes. Averaging %.2f themes/s" % (
time() - t_start, step, t_average)
except:
self.log('Error, not committing changes.')
transaction.rollback()
raise
finally:
self.commit_or_not(options.get('commit'))
self.log("Done. Total time: %s seconds" % (time() - t_total_start))
| bsd-3-clause |
abhishekkrthakur/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
robinro/ansible | lib/ansible/module_utils/facts/utils.py | 9 | 1847 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
def get_file_content(path, default=None, strip=True):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
try:
try:
datafile = open(path)
data = datafile.read()
if strip:
data = data.strip()
if len(data) == 0:
data = default
finally:
datafile.close()
except:
# ignore errors as some jails/containers might have readable permissions but not allow reads to proc
# done in 2 blocks for 2.4 compat
pass
return data
def get_file_lines(path):
'''get list of lines from file'''
data = get_file_content(path)
if data:
ret = data.splitlines()
else:
ret = []
return ret
def get_mount_size(mountpoint):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(mountpoint)
size_total = statvfs_result.f_frsize * statvfs_result.f_blocks
size_available = statvfs_result.f_frsize * (statvfs_result.f_bavail)
except OSError:
pass
return size_total, size_available
| gpl-3.0 |
JimCircadian/ansible | lib/ansible/modules/clustering/consul_acl.py | 32 | 22127 | #!/usr/bin/python
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: consul_acl
short_description: Manipulate Consul ACL keys and rules
description:
- Allows the addition, modification and deletion of ACL keys and associated
rules in a consul cluster via the agent. For more details on using and
configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
version_added: "2.0"
author:
- Steve Gargan (@sgargan)
- Colin Nolan (@colin-nolan)
options:
mgmt_token:
description:
- a management token is required to manipulate the acl lists
state:
description:
- whether the ACL pair should be present or absent
required: false
choices: ['present', 'absent']
default: present
token_type:
description:
- the type of token that should be created, either management or client
choices: ['client', 'management']
default: client
name:
description:
- the name that should be associated with the acl key, this is opaque
to Consul
required: false
token:
description:
- the token key indentifying an ACL rule set. If generated by consul
this will be a UUID
required: false
rules:
description:
- a list of the rules that should be associated with a given token
required: false
host:
description:
- host of the consul agent defaults to localhost
required: false
default: localhost
port:
description:
- the port on which the consul agent is running
required: false
default: 8500
scheme:
description:
- the protocol scheme on which the consul agent is running
required: false
default: http
version_added: "2.1"
validate_certs:
description:
- whether to verify the tls certificate of the consul agent
required: false
default: True
version_added: "2.1"
requirements:
- "python >= 2.6"
- python-consul
- pyhcl
- requests
"""
EXAMPLES = """
- name: create an ACL with rules
consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
name: Foo access
rules:
- key: "foo"
policy: read
- key: "private/foo"
policy: deny
- name: create an ACL with a specific token
consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
name: Foo access
token: my-token
rules:
- key: "foo"
policy: read
- name: update the rules associated to an ACL token
consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
name: Foo access
token: some_client_token
rules:
- event: "bbq"
policy: write
- key: "foo"
policy: read
- key: "private"
policy: deny
- keyring: write
- node: "hgs4"
policy: write
- operator: read
- query: ""
policy: write
- service: "consul"
policy: write
- session: "standup"
policy: write
- name: remove a token
consul_acl:
host: consul1.example.com
mgmt_token: some_management_acl
token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
state: absent
"""
RETURN = """
token:
description: the token associated to the ACL (the ACL's ID)
returned: success
type: string
sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
rules:
description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
returned: I(status) == "present"
type: string
sample: {
"key": {
"foo": {
"policy": "write"
},
"bar": {
"policy": "deny"
}
}
}
operation:
description: the operation performed on the ACL
returned: changed
type: string
sample: update
"""
try:
import consul
python_consul_installed = True
except ImportError:
python_consul_installed = False
try:
import hcl
pyhcl_installed = True
except ImportError:
pyhcl_installed = False
try:
from requests.exceptions import ConnectionError
has_requests = True
except ImportError:
has_requests = False
from collections import defaultdict
from ansible.module_utils.basic import to_text, AnsibleModule
RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"]
MANAGEMENT_PARAMETER_NAME = "mgmt_token"
HOST_PARAMETER_NAME = "host"
SCHEME_PARAMETER_NAME = "scheme"
VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
NAME_PARAMETER_NAME = "name"
PORT_PARAMETER_NAME = "port"
RULES_PARAMETER_NAME = "rules"
STATE_PARAMETER_NAME = "state"
TOKEN_PARAMETER_NAME = "token"
TOKEN_TYPE_PARAMETER_NAME = "token_type"
PRESENT_STATE_VALUE = "present"
ABSENT_STATE_VALUE = "absent"
CLIENT_TOKEN_TYPE_VALUE = "client"
MANAGEMENT_TOKEN_TYPE_VALUE = "management"
REMOVE_OPERATION = "remove"
UPDATE_OPERATION = "update"
CREATE_OPERATION = "create"
_POLICY_JSON_PROPERTY = "policy"
_RULES_JSON_PROPERTY = "Rules"
_TOKEN_JSON_PROPERTY = "ID"
_TOKEN_TYPE_JSON_PROPERTY = "Type"
_NAME_JSON_PROPERTY = "Name"
_POLICY_YML_PROPERTY = "policy"
_POLICY_HCL_PROPERTY = "policy"
_ARGUMENT_SPEC = {
MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
HOST_PARAMETER_NAME: dict(default='localhost'),
SCHEME_PARAMETER_NAME: dict(required=False, default='http'),
VALIDATE_CERTS_PARAMETER_NAME: dict(required=False, type='bool', default=True),
NAME_PARAMETER_NAME: dict(required=False),
PORT_PARAMETER_NAME: dict(default=8500, type='int'),
RULES_PARAMETER_NAME: dict(default=None, required=False, type='list'),
STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
TOKEN_PARAMETER_NAME: dict(required=False),
TOKEN_TYPE_PARAMETER_NAME: dict(required=False, choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
default=CLIENT_TOKEN_TYPE_VALUE)
}
def set_acl(consul_client, configuration):
"""
Sets an ACL based on the given configuration.
:param consul_client: the consul client
:param configuration: the run configuration
:return: the output of setting the ACL
"""
acls_as_json = decode_acls_as_json(consul_client.acl.list())
existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
if None in existing_acls_mapped_by_token:
raise AssertionError("expecting ACL list to be associated to a token: %s" %
existing_acls_mapped_by_token[None])
if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
# No token but name given so can get token from name
configuration.token = existing_acls_mapped_by_name[configuration.name].token
if configuration.token and configuration.token in existing_acls_mapped_by_token:
return update_acl(consul_client, configuration)
else:
if configuration.token in existing_acls_mapped_by_token:
raise AssertionError()
if configuration.name in existing_acls_mapped_by_name:
raise AssertionError()
return create_acl(consul_client, configuration)
def update_acl(consul_client, configuration):
"""
Updates an ACL.
:param consul_client: the consul client
:param configuration: the run configuration
:return: the output of the update
"""
existing_acl = load_acl_with_token(consul_client, configuration.token)
changed = existing_acl.rules != configuration.rules
if changed:
name = configuration.name if configuration.name is not None else existing_acl.name
rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
updated_token = consul_client.acl.update(
configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
if updated_token != configuration.token:
raise AssertionError()
return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
def create_acl(consul_client, configuration):
"""
Creates an ACL.
:param consul_client: the consul client
:param configuration: the run configuration
:return: the output of the creation
"""
rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
token = consul_client.acl.create(
name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
rules = configuration.rules
return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
def remove_acl(consul, configuration):
"""
Removes an ACL.
:param consul: the consul client
:param configuration: the run configuration
:return: the output of the removal
"""
token = configuration.token
changed = consul.acl.info(token) is not None
if changed:
consul.acl.destroy(token)
return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
def load_acl_with_token(consul, token):
"""
Loads the ACL with the given token (token == rule ID).
:param consul: the consul client
:param token: the ACL "token"/ID (not name)
:return: the ACL associated to the given token
:exception ConsulACLTokenNotFoundException: raised if the given token does not exist
"""
acl_as_json = consul.acl.info(token)
if acl_as_json is None:
raise ConsulACLNotFoundException(token)
return decode_acl_as_json(acl_as_json)
def encode_rules_as_hcl_string(rules):
"""
Converts the given rules into the equivalent HCL (string) representation.
:param rules: the rules
:return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
note for justification)
"""
if len(rules) == 0:
# Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
# string if there is no rules...
return None
rules_as_hcl = ""
for rule in rules:
rules_as_hcl += encode_rule_as_hcl_string(rule)
return rules_as_hcl
def encode_rule_as_hcl_string(rule):
"""
Converts the given rule into the equivalent HCL (string) representation.
:param rule: the rule
:return: the equivalent HCL (string) representation of the rule
"""
if rule.pattern is not None:
return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
else:
return '%s = "%s"\n' % (rule.scope, rule.policy)
def decode_rules_as_hcl_string(rules_as_hcl):
"""
Converts the given HCL (string) representation of rules into a list of rule domain models.
:param rules_as_hcl: the HCL (string) representation of a collection of rules
:return: the equivalent domain model to the given rules
"""
rules_as_hcl = to_text(rules_as_hcl)
rules_as_json = hcl.loads(rules_as_hcl)
return decode_rules_as_json(rules_as_json)
def decode_rules_as_json(rules_as_json):
"""
Converts the given JSON representation of rules into a list of rule domain models.
:param rules_as_json: the JSON representation of a collection of rules
:return: the equivalent domain model to the given rules
"""
rules = RuleCollection()
for scope in rules_as_json:
if not isinstance(rules_as_json[scope], dict):
rules.add(Rule(scope, rules_as_json[scope]))
else:
for pattern, policy in rules_as_json[scope].items():
rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
return rules
def encode_rules_as_json(rules):
"""
Converts the given rules into the equivalent JSON representation according to the documentation:
https://www.consul.io/docs/guides/acl.html#rule-specification.
:param rules: the rules
:return: JSON representation of the given rules
"""
rules_as_json = defaultdict(dict)
for rule in rules:
if rule.pattern is not None:
if rule.pattern in rules_as_json[rule.scope]:
raise AssertionError()
rules_as_json[rule.scope][rule.pattern] = {
_POLICY_JSON_PROPERTY: rule.policy
}
else:
if rule.scope in rules_as_json:
raise AssertionError()
rules_as_json[rule.scope] = rule.policy
return rules_as_json
def decode_rules_as_yml(rules_as_yml):
"""
Converts the given YAML representation of rules into a list of rule domain models.
:param rules_as_yml: the YAML representation of a collection of rules
:return: the equivalent domain model to the given rules
"""
rules = RuleCollection()
if rules_as_yml:
for rule_as_yml in rules_as_yml:
rule_added = False
for scope in RULE_SCOPES:
if scope in rule_as_yml:
if rule_as_yml[scope] is None:
raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
else rule_as_yml[scope]
pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
rules.add(Rule(scope, policy, pattern))
rule_added = True
break
if not rule_added:
raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
return rules
def decode_acl_as_json(acl_as_json):
"""
Converts the given JSON representation of an ACL into the equivalent domain model.
:param acl_as_json: the JSON representation of an ACL
:return: the equivalent domain model to the given ACL
"""
rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
else RuleCollection()
return ACL(
rules=rules,
token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
token=acl_as_json[_TOKEN_JSON_PROPERTY],
name=acl_as_json[_NAME_JSON_PROPERTY]
)
def decode_acls_as_json(acls_as_json):
"""
Converts the given JSON representation of ACLs into a list of ACL domain models.
:param acls_as_json: the JSON representation of a collection of ACLs
:return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
"""
return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
class ConsulACLNotFoundException(Exception):
"""
Exception raised if an ACL with is not found.
"""
class Configuration:
"""
Configuration for this module.
"""
def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
rules=None, state=None, token=None, token_type=None):
self.management_token = management_token # type: str
self.host = host # type: str
self.scheme = scheme # type: str
self.validate_certs = validate_certs # type: bool
self.name = name # type: str
self.port = port # type: bool
self.rules = rules # type: RuleCollection
self.state = state # type: str
self.token = token # type: str
self.token_type = token_type # type: str
class Output:
"""
Output of an action of this module.
"""
def __init__(self, changed=None, token=None, rules=None, operation=None):
self.changed = changed # type: bool
self.token = token # type: str
self.rules = rules # type: RuleCollection
self.operation = operation # type: str
class ACL:
"""
Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
"""
def __init__(self, rules, token_type, token, name):
self.rules = rules
self.token_type = token_type
self.token = token
self.name = name
def __eq__(self, other):
return other \
and isinstance(other, self.__class__) \
and self.rules == other.rules \
and self.token_type == other.token_type \
and self.token == other.token \
and self.name == other.name
def __hash__(self):
return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
class Rule:
"""
ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
"""
def __init__(self, scope, policy, pattern=None):
self.scope = scope
self.policy = policy
self.pattern = pattern
def __eq__(self, other):
return isinstance(other, self.__class__) \
and self.scope == other.scope \
and self.policy == other.policy \
and self.pattern == other.pattern
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
def __str__(self):
return encode_rule_as_hcl_string(self)
class RuleCollection:
"""
Collection of ACL rules, which are part of a Consul ACL.
"""
def __init__(self):
self._rules = {}
for scope in RULE_SCOPES:
self._rules[scope] = {}
def __iter__(self):
all_rules = []
for scope, pattern_keyed_rules in self._rules.items():
for pattern, rule in pattern_keyed_rules.items():
all_rules.append(rule)
return iter(all_rules)
def __len__(self):
count = 0
for scope in RULE_SCOPES:
count += len(self._rules[scope])
return count
def __eq__(self, other):
return isinstance(other, self.__class__) \
and set(self) == set(other)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return encode_rules_as_hcl_string(self)
def add(self, rule):
"""
Adds the given rule to this collection.
:param rule: model of a rule
:raises ValueError: raised if there already exists a rule for a given scope and pattern
"""
if rule.pattern in self._rules[rule.scope]:
patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
self._rules[rule.scope][rule.pattern] = rule
def get_consul_client(configuration):
"""
Gets a Consul client for the given configuration.
Does not check if the Consul client can connect.
:param configuration: the run configuration
:return: Consul client
"""
token = configuration.management_token
if token is None:
token = configuration.token
if token is None:
raise AssertionError("Expecting the management token to always be set")
return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
verify=configuration.validate_certs, token=token)
def check_dependencies():
"""
Checks that the required dependencies have been imported.
:exception ImportError: if it is detected that any of the required dependencies have not been iported
"""
if not python_consul_installed:
raise ImportError("python-consul required for this module. "
"See: https://python-consul.readthedocs.io/en/latest/#installation")
if not pyhcl_installed:
raise ImportError("pyhcl required for this module. "
"See: https://pypi.org/project/pyhcl/")
if not has_requests:
raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
def main():
"""
Main method.
"""
module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
try:
check_dependencies()
except ImportError as e:
module.fail_json(msg=str(e))
configuration = Configuration(
management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
host=module.params.get(HOST_PARAMETER_NAME),
scheme=module.params.get(SCHEME_PARAMETER_NAME),
validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
name=module.params.get(NAME_PARAMETER_NAME),
port=module.params.get(PORT_PARAMETER_NAME),
rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
state=module.params.get(STATE_PARAMETER_NAME),
token=module.params.get(TOKEN_PARAMETER_NAME),
token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
)
consul_client = get_consul_client(configuration)
try:
if configuration.state == PRESENT_STATE_VALUE:
output = set_acl(consul_client, configuration)
else:
output = remove_acl(consul_client, configuration)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
configuration.host, configuration.port, str(e)))
raise
return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
if output.rules is not None:
return_values["rules"] = encode_rules_as_json(output.rules)
module.exit_json(**return_values)
if __name__ == "__main__":
main()
| gpl-3.0 |
flackr/quickopen | src/prelaunch_test.py | 2 | 1798 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import prelaunch_client
import os
import subprocess
import temporary_daemon
import unittest
import message_loop
from quickopen_test_base import QuickopenTestBase
class PrelaunchTest(unittest.TestCase, QuickopenTestBase):
def setUp(self):
unittest.TestCase.setUp(self)
QuickopenTestBase.setUp(self)
self.daemon = temporary_daemon.TemporaryDaemon()
def qo(self, cmd, *args):
quickopen_script = os.path.join(os.path.dirname(__file__), "../quickopen")
assert os.path.exists(quickopen_script)
full_args = [quickopen_script,
"--host=%s" % self.daemon.host,
"--port=%s" % str(self.daemon.port),
"--no_auto_start",
'prelaunch',
cmd]
full_args.extend(args)
proc = subprocess.Popen(full_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if len(stderr):
print "Error during %s:\n%s\n\n" % (args, stderr)
return stdout
def turn_off_daemon(self):
self.daemon.close()
def tearDown(self):
unittest.TestCase.tearDown(self)
QuickopenTestBase.tearDown(self)
self.daemon.close()
| apache-2.0 |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/slim/python/slim/summaries.py | 63 | 7497 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains helper functions for creating summaries.
This module contains various helper functions for quickly and easily adding
tensorflow summaries. These allow users to print summary values
automatically as they are computed and add prefixes to collections of summaries.
Example usage:
import tensorflow as tf
slim = tf.contrib.slim
slim.summaries.add_histogram_summaries(slim.variables.get_model_variables())
slim.summaries.add_scalar_summary(total_loss, 'Total Loss')
slim.summaries.add_scalar_summary(learning_rate, 'Learning Rate')
slim.summaries.add_histogram_summaries(my_tensors)
slim.summaries.add_zero_fraction_summaries(my_tensors)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn_impl as nn
from tensorflow.python.summary import summary
def _get_summary_name(tensor, name=None, prefix=None, postfix=None):
"""Produces the summary name given.
Args:
tensor: A variable or op `Tensor`.
name: The optional name for the summary.
prefix: An optional prefix for the summary name.
postfix: An optional postfix for the summary name.
Returns:
a summary name.
"""
if not name:
name = tensor.op.name
if prefix:
name = prefix + '/' + name
if postfix:
name = name + '/' + postfix
return name
def add_histogram_summary(tensor, name=None, prefix=None):
"""Adds a histogram summary for the given tensor.
Args:
tensor: A variable or op tensor.
name: The optional name for the summary.
prefix: An optional prefix for the summary names.
Returns:
A scalar `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
return summary.histogram(
_get_summary_name(tensor, name, prefix), tensor)
def add_image_summary(tensor, name=None, prefix=None, print_summary=False):
"""Adds an image summary for the given tensor.
Args:
tensor: a variable or op tensor with shape [batch,height,width,channels]
name: the optional name for the summary.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
An image `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
summary_name = _get_summary_name(tensor, name, prefix)
# If print_summary, then we need to make sure that this call doesn't add the
# non-printing op to the collection. We'll add it to the collection later.
collections = [] if print_summary else None
op = summary.image(
name=summary_name, tensor=tensor, collections=collections)
if print_summary:
op = logging_ops.Print(op, [tensor], summary_name)
ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
return op
def add_scalar_summary(tensor, name=None, prefix=None, print_summary=False):
"""Adds a scalar summary for the given tensor.
Args:
tensor: a variable or op tensor.
name: the optional name for the summary.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
A scalar `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
collections = [] if print_summary else None
summary_name = _get_summary_name(tensor, name, prefix)
# If print_summary, then we need to make sure that this call doesn't add the
# non-printing op to the collection. We'll add it to the collection later.
op = summary.scalar(
name=summary_name, tensor=tensor, collections=collections)
if print_summary:
op = logging_ops.Print(op, [tensor], summary_name)
ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
return op
def add_zero_fraction_summary(tensor, name=None, prefix=None,
print_summary=False):
"""Adds a summary for the percentage of zero values in the given tensor.
Args:
tensor: a variable or op tensor.
name: the optional name for the summary.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
A scalar `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
name = _get_summary_name(tensor, name, prefix, 'Fraction of Zero Values')
tensor = nn.zero_fraction(tensor)
return add_scalar_summary(tensor, name, print_summary=print_summary)
def add_histogram_summaries(tensors, prefix=None):
"""Adds a histogram summary for each of the given tensors.
Args:
tensors: A list of variable or op tensors.
prefix: An optional prefix for the summary names.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_histogram_summary(tensor, prefix=prefix))
return summary_ops
def add_image_summaries(tensors, prefix=None):
"""Adds an image summary for each of the given tensors.
Args:
tensors: A list of variable or op tensors.
prefix: An optional prefix for the summary names.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_image_summary(tensor, prefix=prefix))
return summary_ops
def add_scalar_summaries(tensors, prefix=None, print_summary=False):
"""Adds a scalar summary for each of the given tensors.
Args:
tensors: a list of variable or op tensors.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_scalar_summary(tensor, prefix=prefix,
print_summary=print_summary))
return summary_ops
def add_zero_fraction_summaries(tensors, prefix=None):
"""Adds a scalar zero-fraction summary for each of the given tensors.
Args:
tensors: a list of variable or op tensors.
prefix: An optional prefix for the summary names.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_zero_fraction_summary(tensor, prefix=prefix))
return summary_ops
| mit |
joshuajan/odoo | addons/l10n_in_hr_payroll/l10n_in_hr_payroll.py | 39 | 13980 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from calendar import isleap
from openerp.tools.translate import _
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
DATETIME_FORMAT = "%Y-%m-%d"
class hr_contract(osv.osv):
"""
Employee contract allows to add different values in fields.
Fields are used in salary rule computation.
"""
_inherit = 'hr.contract'
_description = 'HR Contract'
_columns = {
'tds': fields.float('TDS', digits_compute=dp.get_precision('Payroll'), help="Amount for Tax Deduction at Source"),
'driver_salay': fields.boolean('Driver Salary', help="Check this box if you provide allowance for driver"),
'medical_insurance': fields.float('Medical Insurance', digits_compute=dp.get_precision('Payroll'), help="Deduction towards company provided medical insurance"),
'voluntary_provident_fund': fields.float('Voluntary Provident Fund (%)', digits_compute=dp.get_precision('Payroll'), help="VPF is a safe option wherein you can contribute more than the PF ceiling of 12% that has been mandated by the government and VPF computed as percentage(%)"),
'house_rent_allowance_metro_nonmetro': fields.float('House Rent Allowance (%)', digits_compute=dp.get_precision('Payroll'), help="HRA is an allowance given by the employer to the employee for taking care of his rental or accommodation expenses for metro city it is 50% and for non metro 40%. \nHRA computed as percentage(%)"),
'supplementary_allowance': fields.float('Supplementary Allowance', digits_compute=dp.get_precision('Payroll')),
}
class payroll_advice(osv.osv):
'''
Bank Advice
'''
_name = 'hr.payroll.advice'
_description = 'Bank Advice'
_columns = {
'name':fields.char('Name', size=32, readonly=True, required=True, states={'draft': [('readonly', False)]},),
'note': fields.text('Description'),
'date': fields.date('Date', readonly=True, required=True, states={'draft': [('readonly', False)]}, help="Advice Date is used to search Payslips"),
'state':fields.selection([
('draft', 'Draft'),
('confirm', 'Confirmed'),
('cancel', 'Cancelled'),
], 'Status', select=True, readonly=True),
'number':fields.char('Reference', size=16, readonly=True),
'line_ids':fields.one2many('hr.payroll.advice.line', 'advice_id', 'Employee Salary', states={'draft': [('readonly', False)]}, readonly=True),
'chaque_nos':fields.char('Cheque Numbers', size=256),
'neft': fields.boolean('NEFT Transaction', help="Check this box if your company use online transfer for salary"),
'company_id':fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'bank_id':fields.many2one('res.bank', 'Bank', readonly=True, states={'draft': [('readonly', False)]}, help="Select the Bank from which the salary is going to be paid"),
'batch_id': fields.many2one('hr.payslip.run', 'Batch', readonly=True)
}
_defaults = {
'date': lambda * a: time.strftime('%Y-%m-%d'),
'state': lambda * a: 'draft',
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
'note': "Please make the payroll transfer from above account number to the below mentioned account numbers towards employee salaries:"
}
def compute_advice(self, cr, uid, ids, context=None):
"""
Advice - Create Advice lines in Payment Advice and
compute Advice lines.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Advice’s IDs
@return: Advice lines
@param context: A standard dictionary for contextual values
"""
payslip_pool = self.pool.get('hr.payslip')
advice_line_pool = self.pool.get('hr.payroll.advice.line')
payslip_line_pool = self.pool.get('hr.payslip.line')
for advice in self.browse(cr, uid, ids, context=context):
old_line_ids = advice_line_pool.search(cr, uid, [('advice_id', '=', advice.id)], context=context)
if old_line_ids:
advice_line_pool.unlink(cr, uid, old_line_ids, context=context)
slip_ids = payslip_pool.search(cr, uid, [('date_from', '<=', advice.date), ('date_to', '>=', advice.date), ('state', '=', 'done')], context=context)
for slip in payslip_pool.browse(cr, uid, slip_ids, context=context):
if not slip.employee_id.bank_account_id and not slip.employee_id.bank_account_id.acc_number:
raise osv.except_osv(_('Error!'), _('Please define bank account for the %s employee') % (slip.employee_id.name))
line_ids = payslip_line_pool.search(cr, uid, [ ('slip_id', '=', slip.id), ('code', '=', 'NET')], context=context)
if line_ids:
line = payslip_line_pool.browse(cr, uid, line_ids, context=context)[0]
advice_line = {
'advice_id': advice.id,
'name': slip.employee_id.bank_account_id.acc_number,
'employee_id': slip.employee_id.id,
'bysal': line.total
}
advice_line_pool.create(cr, uid, advice_line, context=context)
payslip_pool.write(cr, uid, slip_ids, {'advice_id': advice.id}, context=context)
return True
def confirm_sheet(self, cr, uid, ids, context=None):
"""
confirm Advice - confirmed Advice after computing Advice Lines..
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of confirm Advice’s IDs
@return: confirmed Advice lines and set sequence of Advice.
@param context: A standard dictionary for contextual values
"""
seq_obj = self.pool.get('ir.sequence')
for advice in self.browse(cr, uid, ids, context=context):
if not advice.line_ids:
raise osv.except_osv(_('Error!'), _('You can not confirm Payment advice without advice lines.'))
advice_date = datetime.strptime(advice.date, DATETIME_FORMAT)
advice_year = advice_date.strftime('%m') + '-' + advice_date.strftime('%Y')
number = seq_obj.get(cr, uid, 'payment.advice')
sequence_num = 'PAY' + '/' + advice_year + '/' + number
self.write(cr, uid, [advice.id], {'number': sequence_num, 'state': 'confirm'}, context=context)
return True
def set_to_draft(self, cr, uid, ids, context=None):
"""Resets Advice as draft.
"""
return self.write(cr, uid, ids, {'state':'draft'}, context=context)
def cancel_sheet(self, cr, uid, ids, context=None):
"""Marks Advice as cancelled.
"""
return self.write(cr, uid, ids, {'state':'cancel'}, context=context)
def onchange_company_id(self, cr, uid, ids, company_id=False, context=None):
res = {}
if company_id:
company = self.pool.get('res.company').browse(cr, uid, [company_id], context=context)[0]
if company.partner_id.bank_ids:
res.update({'bank_id': company.partner_id.bank_ids[0].bank.id})
return {
'value':res
}
class hr_payslip_run(osv.osv):
_inherit = 'hr.payslip.run'
_description = 'Payslip Batches'
_columns = {
'available_advice': fields.boolean('Made Payment Advice?', help="If this box is checked which means that Payment Advice exists for current batch", readonly=False),
}
def copy(self, cr, uid, id, default={}, context=None):
if not default:
default = {}
default.update({'available_advice': False})
return super(hr_payslip_run, self).copy(cr, uid, id, default, context=context)
def draft_payslip_run(self, cr, uid, ids, context=None):
res = super(hr_payslip_run, self).draft_payslip_run(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'available_advice': False}, context=context)
return res
def create_advice(self, cr, uid, ids, context=None):
payslip_pool = self.pool.get('hr.payslip')
payslip_line_pool = self.pool.get('hr.payslip.line')
advice_pool = self.pool.get('hr.payroll.advice')
advice_line_pool = self.pool.get('hr.payroll.advice.line')
users = self.pool.get('res.users').browse(cr, uid, [uid], context=context)
for run in self.browse(cr, uid, ids, context=context):
if run.available_advice:
raise osv.except_osv(_('Error!'), _("Payment advice already exists for %s, 'Set to Draft' to create a new advice.") %(run.name))
advice_data = {
'batch_id': run.id,
'company_id': users[0].company_id.id,
'name': run.name,
'date': run.date_end,
'bank_id': users[0].company_id.bank_ids and users[0].company_id.bank_ids[0].id or False
}
advice_id = advice_pool.create(cr, uid, advice_data, context=context)
slip_ids = []
for slip_id in run.slip_ids:
# TODO is it necessary to interleave the calls ?
payslip_pool.signal_hr_verify_sheet(cr, uid, [slip_id.id])
payslip_pool.signal_process_sheet(cr, uid, [slip_id.id])
slip_ids.append(slip_id.id)
for slip in payslip_pool.browse(cr, uid, slip_ids, context=context):
if not slip.employee_id.bank_account_id or not slip.employee_id.bank_account_id.acc_number:
raise osv.except_osv(_('Error!'), _('Please define bank account for the %s employee') % (slip.employee_id.name))
line_ids = payslip_line_pool.search(cr, uid, [('slip_id', '=', slip.id), ('code', '=', 'NET')], context=context)
if line_ids:
line = payslip_line_pool.browse(cr, uid, line_ids, context=context)[0]
advice_line = {
'advice_id': advice_id,
'name': slip.employee_id.bank_account_id.acc_number,
'employee_id': slip.employee_id.id,
'bysal': line.total
}
advice_line_pool.create(cr, uid, advice_line, context=context)
return self.write(cr, uid, ids, {'available_advice' : True})
class payroll_advice_line(osv.osv):
'''
Bank Advice Lines
'''
def onchange_employee_id(self, cr, uid, ids, employee_id=False, context=None):
res = {}
hr_obj = self.pool.get('hr.employee')
if not employee_id:
return {'value': res}
employee = hr_obj.browse(cr, uid, [employee_id], context=context)[0]
res.update({'name': employee.bank_account_id.acc_number , 'ifsc_code': employee.bank_account_id.bank_bic or ''})
return {'value': res}
_name = 'hr.payroll.advice.line'
_description = 'Bank Advice Lines'
_columns = {
'advice_id': fields.many2one('hr.payroll.advice', 'Bank Advice'),
'name': fields.char('Bank Account No.', size=25, required=True),
'ifsc_code': fields.char('IFSC Code', size=16),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'bysal': fields.float('By Salary', digits_compute=dp.get_precision('Payroll')),
'debit_credit': fields.char('C/D', size=3, required=False),
'company_id': fields.related('advice_id', 'company_id', type='many2one', required=False, relation='res.company', string='Company', store=True),
'ifsc': fields.related('advice_id', 'neft', type='boolean', string='IFSC'),
}
_defaults = {
'debit_credit': 'C',
}
class hr_payslip(osv.osv):
'''
Employee Pay Slip
'''
_inherit = 'hr.payslip'
_description = 'Pay Slips'
_columns = {
'advice_id': fields.many2one('hr.payroll.advice', 'Bank Advice')
}
def copy(self, cr, uid, id, default={}, context=None):
if not default:
default = {}
default.update({'advice_id' : False})
return super(hr_payslip, self).copy(cr, uid, id, default, context=context)
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'dearness_allowance': fields.boolean('Dearness Allowance', help="Check this box if your company provide Dearness Allowance to employee")
}
_defaults = {
'dearness_allowance': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
IONISx/edx-platform | lms/envs/bok_choy.py | 11 | 6630 | """
Settings for Bok Choy tests that are used when running LMS.
Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running the tests
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
import os
from path import Path as path
from tempfile import mkdtemp
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
CONFIG_ROOT = path(__file__).abspath().dirname()
TEST_ROOT = CONFIG_ROOT.dirname().dirname() / "test_root"
########################## Prod-like settings ###################################
# These should be as close as possible to the settings we use in production.
# As in prod, we read in environment and auth variables from JSON files.
# Unlike in prod, we use the JSON files stored in this repo.
# This is a convenience for ensuring (a) that we can consistently find the files
# and (b) that the files are the same in Jenkins as in local dev.
os.environ['SERVICE_VARIANT'] = 'bok_choy'
os.environ['CONFIG_ROOT'] = CONFIG_ROOT
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
######################### Testing overrides ####################################
# Needed for the reset database management command
INSTALLED_APPS += ('django_extensions',)
# Redirect to the test_root folder within the repo
GITHUB_REPO_ROOT = (TEST_ROOT / "data").abspath()
LOG_DIR = (TEST_ROOT / "log").abspath()
# Configure modulestore to use the test folder within the repo
update_module_store_settings(
MODULESTORE,
module_store_options={
'fs_root': (TEST_ROOT / "data").abspath(),
},
xml_store_options={
'data_dir': (TEST_ROOT / "data").abspath(),
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
############################ STATIC FILES #############################
# Enable debug so that static assets are served by Django
DEBUG = True
# Serve static files at /static directly from the staticfiles directory under test root
# Note: optimized files for testing are generated with settings from test_static_optimized
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATICFILES_DIRS = (
(TEST_ROOT / "staticfiles" / "lms").abspath(),
)
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = TEST_ROOT / "uploads"
MEDIA_URL = "/static/uploads/"
# Don't use compression during tests
PIPELINE_JS_COMPRESSOR = None
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
###################### Grade Downloads ######################
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': os.path.join(mkdtemp(), 'edx-s3', 'grades'),
}
# Configure the LMS to use our stub XQueue implementation
XQUEUE_INTERFACE['url'] = 'http://localhost:8040'
# Configure the LMS to use our stub ORA implementation
OPEN_ENDED_GRADING_INTERFACE['url'] = 'http://localhost:8041/'
# Configure the LMS to use our stub EdxNotes implementation
EDXNOTES_PUBLIC_API = 'http://localhost:8042/api/v1'
EDXNOTES_INTERNAL_API = 'http://localhost:8042/api/v1'
# Silence noisy logs
import logging
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('edxmako.shortcuts', logging.ERROR),
('dd.dogapi', logging.ERROR),
('edx.discussion', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Enable milestones app
FEATURES['MILESTONES_APP'] = True
# Enable pre-requisite course
FEATURES['ENABLE_PREREQUISITE_COURSES'] = True
# Enable Course Discovery
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
# Enable student notes
FEATURES['ENABLE_EDXNOTES'] = True
# Enable teams feature
FEATURES['ENABLE_TEAMS'] = True
# Enable custom content licensing
FEATURES['LICENSING'] = True
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
########################### Entrance Exams #################################
FEATURES['MILESTONES_APP'] = True
FEATURES['ENTRANCE_EXAMS'] = True
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE_PORT = 9080
YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
############################# SECURITY SETTINGS ################################
# Default to advanced security in common.py, so tests can reset here to use
# a simpler security model
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
FEATURES['ENABLE_MOBILE_REST_API'] = True # Show video bumper in LMS
FEATURES['ENABLE_VIDEO_BUMPER'] = True # Show video bumper in LMS
FEATURES['SHOW_BUMPER_PERIODICITY'] = 1
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
# Enable courseware search for tests
FEATURES['ENABLE_COURSEWARE_SEARCH'] = True
# Enable dashboard search for tests
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Path at which to store the mock index
MOCK_SEARCH_BACKING_FILE = (
TEST_ROOT / "index_file.dat"
).abspath()
# Generate a random UUID so that different runs of acceptance tests don't break each other
import uuid
SECRET_KEY = uuid.uuid4().hex
# Set dummy values for profile image settings.
PROFILE_IMAGE_BACKEND = {
'class': 'storages.backends.overwrite.OverwriteStorage',
'options': {
'location': os.path.join(MEDIA_ROOT, 'profile-images/'),
'base_url': os.path.join(MEDIA_URL, 'profile-images/'),
},
}
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
| agpl-3.0 |
robinro/ansible | contrib/inventory/apstra_aos.py | 17 | 20303 | #!/usr/bin/env python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Apstra AOS external inventory script
====================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this:
- copy this file over /etc/ansible/hosts and chmod +x the file.
- Copy both files (.py and .ini) in your prefered directory
More information about Ansible Dynamic Inventory here
http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
2 modes are currently, supported: **device based** or **blueprint based**:
- For **Device based**, the list of device is taken from the global device list
the serial ID will be used as the inventory_hostname
- For **Blueprint based**, the list of device is taken from the given blueprint
the Node name will be used as the inventory_hostname
Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
The config file takes precedence over the Environment Variables
Tested with Apstra AOS 1.1
This script has been inspired by the cobbler.py inventory. thanks
Author: Damien Garros (@dgarros)
Version: 0.2.0
"""
import os
import argparse
import re
try:
import json
except ImportError:
import simplejson as json
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils.six.moves import configparser
"""
##
Expected output format in Device mode
{
"Cumulus": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"EOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
},
"Generic Model": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"Ubuntu GNU/Linux": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"VX": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"_meta": {
"hostvars": {
"5254001CAFD8": {
"agent_start_time": "2017-02-03T00:49:16.000000Z",
"ansible_ssh_host": "172.20.52.6",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:58.454480Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.6",
"mgmt_macaddr": "52:54:00:1C:AF:D8",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "5254001CAFD8",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"52540022211A": {
"agent_start_time": "2017-02-03T00:45:22.000000Z",
"ansible_ssh_host": "172.20.52.7",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.019189Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.7",
"mgmt_macaddr": "52:54:00:22:21:1a",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540022211A",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"52540073956E": {
"agent_start_time": "2017-02-03T00:45:19.000000Z",
"ansible_ssh_host": "172.20.52.8",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.030113Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.8",
"mgmt_macaddr": "52:54:00:73:95:6e",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540073956E",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"525400DDDF72": {
"agent_start_time": "2017-02-03T00:49:07.000000Z",
"ansible_ssh_host": "172.20.52.5",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:46.929921Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.5",
"mgmt_macaddr": "52:54:00:DD:DF:72",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "525400DDDF72",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"525400E5486D": {
"agent_start_time": "2017-02-02T18:44:42.000000Z",
"ansible_ssh_host": "172.20.52.4",
"aos_hcl_model": "Generic_Server_1RU_1x10G",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-02T21:11:25.188734Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "Generic Model",
"hw_version": "pc-i440fx-trusty",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.4",
"mgmt_macaddr": "52:54:00:e5:48:6d",
"os_arch": "x86_64",
"os_family": "Ubuntu GNU/Linux",
"os_version": "14.04 LTS",
"os_version_info": {
"build": "",
"major": "14",
"minor": "04"
},
"serial_number": "525400E5486D",
"state": "OOS-QUARANTINED",
"vendor": "Generic Manufacturer"
}
}
},
"all": {
"hosts": [
"5254001CAFD8",
"52540073956E",
"525400DDDF72",
"525400E5486D",
"52540022211A"
],
"vars": {}
},
"vEOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
}
}
"""
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
class AosInventory(object):
def __init__(self):
""" Main execution path """
if not HAS_AOS_PYEZ:
raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
# Initialize inventory
self.inventory = dict() # A list of groups and the hosts in that group
self.inventory['_meta'] = dict()
self.inventory['_meta']['hostvars'] = dict()
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# ----------------------------------------------------
# Open session to AOS
# ----------------------------------------------------
aos = Session(server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos.login()
# Save session information in variables of group all
self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos')
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
# ----------------------------------------------------
# Build the inventory
# 2 modes are supported: device based or blueprint based
# - For device based, the list of device is taken from the global device list
# the serial ID will be used as the inventory_hostname
# - For Blueprint based, the list of device is taken from the given blueprint
# the Node name will be used as the inventory_hostname
# ----------------------------------------------------
if self.aos_blueprint:
bp = aos.Blueprints[self.aos_blueprint]
if bp.exists is False:
fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name)
device = aos.Devices.find(uid=dev_id)
if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device)
# Define admin State and Status
if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
self.add_device_status_to_var(dev_name, device)
# Go over the contents data structure
for node in bp.contents['system']['nodes']:
if node['display_name'] == dev_name:
self.add_host_to_group(node['role'], dev_name)
# Check for additional attribute to import
attributes_to_import = [
'loopback_ip',
'asn',
'role',
'position',
]
for attr in attributes_to_import:
if attr in node.keys():
self.add_var_to_host(dev_name, attr, node[attr])
# if blueprint_interface is enabled in the configuration
# Collect links information
if self.aos_blueprint_int:
interfaces = dict()
for link in bp.contents['system']['links']:
# each link has 2 sides [0,1], and it's unknown which one match this device
# at first we assume, first side match(0) and peer is (1)
peer_id = 1
for side in link['endpoints']:
if side['display_name'] == dev_name:
# import local information first
int_name = side['interface']
# init dict
interfaces[int_name] = dict()
if 'ip' in side.keys():
interfaces[int_name]['ip'] = side['ip']
if 'interface' in side.keys():
interfaces[int_name]['name'] = side['interface']
if 'display_name' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
if 'ip' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
if 'type' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
else:
# if we haven't match the first time, prepare the peer_id
# for the second loop iteration
peer_id = 0
self.add_var_to_host(dev_name, 'interfaces', interfaces)
else:
for device in aos.Devices:
# If not reacheable, create by key and
# If reacheable, create by hostname
self.add_host_to_group('all', device.name)
# populate information for this host
self.add_device_status_to_var(device.name, device)
if 'user_config' in device.value.keys():
for key, value in device.value['user_config'].items():
self.add_var_to_host(device.name, key, value)
# Based on device status online|offline, collect facts as well
if device.value['status']['comm_state'] == 'on':
if 'facts' in device.value.keys():
self.add_device_facts_to_var(device.name, device)
# Check if device is associated with a blueprint
# if it's create a new group
if 'blueprint_active' in device.value['status'].keys():
if 'blueprint_id' in device.value['status'].keys():
bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
if bp:
self.add_host_to_group(bp.name, device.name)
# ----------------------------------------------------
# Convert the inventory and return a JSON String
# ----------------------------------------------------
data_to_print = ""
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def read_settings(self):
""" Reads the settings from the apstra_aos.ini file """
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
# Default Values
self.aos_blueprint = False
self.aos_blueprint_int = True
self.aos_username = 'admin'
self.aos_password = 'admin'
self.aos_server_port = 8888
# Try to reach all parameters from File, if not available try from ENV
try:
self.aos_server = config.get('aos', 'aos_server')
except:
if 'AOS_SERVER' in os.environ.keys():
self.aos_server = os.environ['AOS_SERVER']
pass
try:
self.aos_server_port = config.get('aos', 'port')
except:
if 'AOS_PORT' in os.environ.keys():
self.aos_server_port = os.environ['AOS_PORT']
pass
try:
self.aos_username = config.get('aos', 'username')
except:
if 'AOS_USERNAME' in os.environ.keys():
self.aos_username = os.environ['AOS_USERNAME']
pass
try:
self.aos_password = config.get('aos', 'password')
except:
if 'AOS_PASSWORD' in os.environ.keys():
self.aos_password = os.environ['AOS_PASSWORD']
pass
try:
self.aos_blueprint = config.get('aos', 'blueprint')
except:
if 'AOS_BLUEPRINT' in os.environ.keys():
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
pass
try:
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
self.aos_blueprint_int = False
except:
pass
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def add_host_to_group(self, group, host):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['hosts'].append(host)
def add_var_to_host(self, host, var, value):
# Check if the host exist, if not initialize it
if host not in self.inventory['_meta']['hostvars'].keys():
self.inventory['_meta']['hostvars'][host] = {}
self.inventory['_meta']['hostvars'][host][var] = value
def add_var_to_group(self, group, var, value):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['vars'][var] = value
def add_device_facts_to_var(self, device_name, device):
# Populate variables for this host
self.add_var_to_host(device_name,
'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name, 'id', device.id)
# self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items():
self.add_var_to_host(device_name, key, value)
if key == 'os_family':
self.add_host_to_group(value, device_name)
elif key == 'hw_model':
self.add_host_to_group(value, device_name)
def cleanup_group_name(self, group_name):
"""
Clean up group name by :
- Replacing all non-alphanumeric caracter by underscore
- Converting to lowercase
"""
rx = re.compile('\W+')
clean_group = rx.sub('_', group_name).lower()
return clean_group
def add_device_status_to_var(self, device_name, device):
if 'status' in device.value.keys():
for key, value in device.value['status'].items():
self.add_var_to_host(device.name, key, value)
# Run the script
if __name__ == '__main__':
AosInventory()
| gpl-3.0 |
FirmlyReality/docklet | web/webViews/cluster.py | 2 | 15450 | from flask import session, redirect, request
from webViews.view import normalView
from webViews.dockletrequest import dockletRequest
from webViews.dashboard import *
from webViews.checkname import checkname
import time, re
class addClusterView(normalView):
template_path = "addCluster.html"
@classmethod
def get(self):
masterips = dockletRequest.post_to_all()
images = dockletRequest.post("/image/list/",{},masterips[0].split("@")[0]).get("images")
desc = dockletRequest.getdesc(masterips[0].split("@")[1])
result = dockletRequest.post("/user/usageQuery/")
quota = result.get("quota")
usage = result.get("usage")
default = result.get("default")
restcpu = int(quota['cpu']) - int(usage['cpu'])
restmemory = int(quota['memory']) - int(usage['memory'])
restdisk = int(quota['disk']) - int(usage['disk'])
if restcpu >= int(default['cpu']):
defaultcpu = default['cpu']
elif restcpu <= 0:
defaultcpu = "0"
else:
defaultcpu = str(restcpu)
if restmemory >= int(default['memory']):
defaultmemory = default['memory']
elif restmemory <= 0:
defaultmemory = "0"
else:
defaultmemory = str(restmemory)
if restdisk >= int(default['disk']):
defaultdisk = default['disk']
elif restdisk <= 0:
defaultdisk = "0"
else:
defaultdisk = str(restdisk)
defaultsetting = {
'cpu': defaultcpu,
'memory': defaultmemory,
'disk': defaultdisk
}
if (result):
return self.render(self.template_path, user = session['username'],masterips = masterips, images = images, quota = quota, usage = usage, defaultsetting = defaultsetting, masterdesc=desc)
else:
self.error()
class createClusterView(normalView):
template_path = "dashboard.html"
error_path = "error.html"
@classmethod
def post(self):
masterip = self.masterip
index1 = self.image.rindex("_")
index2 = self.image[:index1].rindex("_")
checkname(self.clustername)
data = {
"clustername": self.clustername,
'imagename': self.image[:index2],
'imageowner': self.image[index2+1:index1],
'imagetype': self.image[index1+1:],
}
result = dockletRequest.post("/cluster/create/", dict(data, **(request.form)), masterip)
if(result.get('success', None) == "true"):
return redirect("/dashboard/")
#return self.render(self.template_path, user = session['username'])
else:
return self.render(self.error_path, message = result.get('message'))
class descriptionMasterView(normalView):
template_path = "description.html"
@classmethod
def get(self):
return self.render(self.template_path, description=self.desc)
class descriptionImageView(normalView):
template_path = "description.html"
@classmethod
def get(self):
masterip = self.masterip
index1 = self.image.rindex("_")
index2 = self.image[:index1].rindex("_")
data = {
"imagename": self.image[:index2],
"imageowner": self.image[index2+1:index1],
"imagetype": self.image[index1+1:]
}
result = dockletRequest.post("/image/description/", data, masterip)
if(result):
description = result.get("message")
return self.render(self.template_path, description = description)
else:
self.error()
class scaleoutView(normalView):
error_path = "error.html"
@classmethod
def post(self):
masterip = self.masterip
index1 = self.image.rindex("_")
index2 = self.image[:index1].rindex("_")
data = {
"clustername": self.clustername,
'imagename': self.image[:index2],
'imageowner': self.image[index2+1:index1],
'imagetype': self.image[index1+1:]
}
result = dockletRequest.post("/cluster/scaleout/", dict(data, **(request.form)), masterip)
if(result.get('success', None) == "true"):
return redirect("/config/")
else:
return self.render(self.error_path, message = result.get('message'))
class scaleinView(normalView):
error_path = "error.html"
@classmethod
def get(self):
masterip = self.masterip
data = {
"clustername": self.clustername,
"containername":self.containername
}
result = dockletRequest.post("/cluster/scalein/", data, masterip)
if(result.get('success', None) == "true"):
return redirect("/config/")
else:
return self.render(self.error_path, message = result.get('message'))
class listClusterView(normalView):
template_path = "listCluster.html"
@classmethod
def get(self):
masterip = self.masterip
result = dockletRequest.post("/cluster/list/", {}, masterip)
clusters = result.get("clusters")
if(result):
return self.render(self.template_path, user = session['username'], clusters = clusters)
else:
self.error()
class startClusterView(normalView):
template_path = "dashboard.html"
error_path = "error.html"
@classmethod
def get(self):
masterip = self.masterip
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/start/", data, masterip)
if(result.get('success', None) == "true"):
return redirect("/dashboard/")
#return self.render(self.template_path, user = session['username'])
else:
return self.render(self.error_path, message = result.get('message'))
class stopClusterView(normalView):
template_path = "dashboard.html"
error_path = "error.html"
@classmethod
def get(self):
masterip = self.masterip
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/stop/", data, masterip)
if(result.get('success', None) == "true"):
return redirect("/dashboard/")
else:
return self.render(self.error_path, message = result.get('message'))
class flushClusterView(normalView):
success_path = "opsuccess.html"
failed_path = "opfailed.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername,
"from_lxc": self.containername
}
result = dockletRequest.post("/cluster/flush/", data)
if(result):
if result.get('success') == "true":
return self.render(self.success_path, user = session['username'])
else:
return self.render(self.failed_path, user = session['username'])
else:
self.error()
class deleteClusterView(normalView):
template_path = "dashboard.html"
error_path = "error.html"
@classmethod
def get(self):
masterip = self.masterip
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/delete/", data, masterip)
if(result.get('success', None) == "true"):
return redirect("/dashboard/")
else:
return self.render(self.error_path, message = result.get('message'))
class detailClusterView(normalView):
template_path = "listcontainer.html"
@classmethod
def get(self):
masterip = self.masterip
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/info/", data, masterip)
if(result):
message = result.get('message')
containers = message['containers']
status = message['status']
return self.render(self.template_path, containers = containers, user = session['username'], clustername = self.clustername, status = status)
else:
self.error()
class saveImageView(normalView):
template_path = "saveconfirm.html"
success_path = "opsuccess.html"
error_path = "error.html"
@classmethod
def post(self):
masterip = self.masterip
data = {
"clustername": self.clustername,
"image": self.imagename,
"containername": self.containername,
"description": self.description,
"isforce": self.isforce
}
result = dockletRequest.post("/cluster/save/", data, masterip)
if(result):
if result.get('success') == 'true':
#return self.render(self.success_path, user = session['username'])
return redirect("/config/")
#res = detailClusterView()
#res.clustername = self.clustername
#return res.as_view()
else:
if result.get('reason') == "exists":
return self.render(self.template_path, containername = self.containername, clustername = self.clustername, image = self.imagename, user = session['username'], description = self.description, masterip=masterip)
else:
return self.render(self.error_path, message = result.get('message'))
else:
self.error()
class shareImageView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
masterip = self.masterip
data = {
"image": self.image
}
result = dockletRequest.post("/image/share/", data, masterip)
if(result):
return redirect("/config/")
else:
self.error()
class unshareImageView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
masterip = self.masterip
data = {
"image": self.image
}
result = dockletRequest.post("/image/unshare/", data, masterip)
if(result):
return redirect("/config/")
else:
self.error()
class copyImageView(normalView):
error_path = "error.html"
@classmethod
def post(self):
masterip = self.masterip
data = {
"image": self.image,
"target": self.target
}
result = dockletRequest.post("/image/copy/", data, masterip)
if result:
if result.get('success') == 'true':
return redirect("/config/")
else:
return self.render(self.error_path,message=result.get('message'))
else:
self.error()
class deleteImageView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
masterip = self.masterip
data = {
"image": self.image
}
result = dockletRequest.post("/image/delete/", data, masterip)
if(result):
return redirect("/config/")
else:
self.error()
class addproxyView(normalView):
@classmethod
def post(self):
masterip = self.masterip
data = {
"clustername": self.clustername,
"ip": self.ip,
"port": self.port
}
result = dockletRequest.post("/addproxy/", data, masterip)
if(result):
return redirect("/config/")
else:
self.error()
class deleteproxyView(normalView):
@classmethod
def get(self):
masterip = self.masterip
data = {
"clustername":self.clustername
}
result = dockletRequest.post("/deleteproxy/", data, masterip)
if(result):
return redirect("/config/")
else:
self.error()
@classmethod
def post(self):
return self.get()
class configView(normalView):
@classmethod
def get(self):
masterips = dockletRequest.post_to_all()
allimages = dockletRequest.post_to_all('/image/list/')
for master in allimages:
allimages[master] = allimages[master].get('images')
allclusters = dockletRequest.post_to_all("/cluster/list/")
for master in allclusters:
allclusters[master] = allclusters[master].get('clusters')
allclusters_info = {}
clusters_info = {}
data={}
for master in allclusters:
allclusters_info[master] = {}
for cluster in allclusters[master]:
data["clustername"] = cluster
result = dockletRequest.post("/cluster/info/", data, master.split("@")[0]).get("message")
allclusters_info[master][cluster] = result
result = dockletRequest.post("/user/usageQuery/")
quota = result.get("quota")
usage = result.get("usage")
default = result.get("default")
restcpu = int(quota['cpu']) - int(usage['cpu'])
restmemory = int(quota['memory']) - int(usage['memory'])
restdisk = int(quota['disk']) - int(usage['disk'])
if restcpu >= int(default['cpu']):
defaultcpu = default['cpu']
elif restcpu <= 0:
defaultcpu = "0"
else:
defaultcpu = str(restcpu)
if restmemory >= int(default['memory']):
defaultmemory = default['memory']
elif restmemory <= 0:
defaultmemory = "0"
else:
defaultmemory = str(restmemory)
if restdisk >= int(default['disk']):
defaultdisk = default['disk']
elif restdisk <= 0:
defaultdisk = "0"
else:
defaultdisk = str(restdisk)
defaultsetting = {
'cpu': defaultcpu,
'memory': defaultmemory,
'disk': defaultdisk
}
return self.render("config.html", allimages = allimages, allclusters = allclusters_info, mysession=dict(session), quota = quota, usage = usage, defaultsetting = defaultsetting, masterips = masterips)
@classmethod
def post(self):
return self.get()
class addPortMappingView(normalView):
template_path = "error.html"
@classmethod
def post(self):
data = {"clustername":request.form["clustername"],"node_name":request.form["node_name"],"node_ip":request.form["node_ip"],"node_port":request.form["node_port"]}
result = dockletRequest.post('/port_mapping/add/',data, self.masterip)
success = result.get("success")
if success == "true":
return redirect("/config/")
else:
return self.render(self.template_path, message = result.get("message"))
@classmethod
def get(self):
return self.post()
class delPortMappingView(normalView):
template_path = "error.html"
@classmethod
def post(self):
data = {"clustername":self.clustername,"node_name":self.node_name,"node_port":self.node_port}
result = dockletRequest.post('/port_mapping/delete/',data, self.masterip)
success = result.get("success")
if success == "true":
return redirect("/config/")
else:
return self.render(self.template_path, message = result.get("message"))
@classmethod
def get(self):
return self.post()
| bsd-3-clause |
ty707/airflow | airflow/migrations/versions/1968acfc09e3_add_is_encrypted_column_to_variable_.py | 62 | 1058 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add is_encrypted column to variable table
Revision ID: 1968acfc09e3
Revises: bba5a7cfc896
Create Date: 2016-02-02 17:20:55.692295
"""
# revision identifiers, used by Alembic.
revision = '1968acfc09e3'
down_revision = 'bba5a7cfc896'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('variable', sa.Column('is_encrypted', sa.Boolean,default=False))
def downgrade():
op.drop_column('variable', 'is_encrypted')
| apache-2.0 |
mAzurkovic/flask | tests/test_config.py | 139 | 5543 | # -*- coding: utf-8 -*-
"""
tests.test_config
~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Flask Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import flask
# config keys used for the TestConfig
TEST_KEY = 'foo'
SECRET_KEY = 'devkey'
def common_object_test(app):
assert app.secret_key == 'devkey'
assert app.config['TEST_KEY'] == 'foo'
assert 'TestConfig' not in app.config
def test_config_from_file():
app = flask.Flask(__name__)
app.config.from_pyfile(__file__.rsplit('.', 1)[0] + '.py')
common_object_test(app)
def test_config_from_object():
app = flask.Flask(__name__)
app.config.from_object(__name__)
common_object_test(app)
def test_config_from_json():
app = flask.Flask(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
app.config.from_json(os.path.join(current_dir, 'static', 'config.json'))
common_object_test(app)
def test_config_from_mapping():
app = flask.Flask(__name__)
app.config.from_mapping({
'SECRET_KEY': 'devkey',
'TEST_KEY': 'foo'
})
common_object_test(app)
app = flask.Flask(__name__)
app.config.from_mapping([
('SECRET_KEY', 'devkey'),
('TEST_KEY', 'foo')
])
common_object_test(app)
app = flask.Flask(__name__)
app.config.from_mapping(
SECRET_KEY='devkey',
TEST_KEY='foo'
)
common_object_test(app)
app = flask.Flask(__name__)
with pytest.raises(TypeError):
app.config.from_mapping(
{}, {}
)
def test_config_from_class():
class Base(object):
TEST_KEY = 'foo'
class Test(Base):
SECRET_KEY = 'devkey'
app = flask.Flask(__name__)
app.config.from_object(Test)
common_object_test(app)
def test_config_from_envvar():
env = os.environ
try:
os.environ = {}
app = flask.Flask(__name__)
try:
app.config.from_envvar('FOO_SETTINGS')
except RuntimeError as e:
assert "'FOO_SETTINGS' is not set" in str(e)
else:
assert 0, 'expected exception'
assert not app.config.from_envvar('FOO_SETTINGS', silent=True)
os.environ = {'FOO_SETTINGS': __file__.rsplit('.', 1)[0] + '.py'}
assert app.config.from_envvar('FOO_SETTINGS')
common_object_test(app)
finally:
os.environ = env
def test_config_from_envvar_missing():
env = os.environ
try:
os.environ = {'FOO_SETTINGS': 'missing.cfg'}
try:
app = flask.Flask(__name__)
app.config.from_envvar('FOO_SETTINGS')
except IOError as e:
msg = str(e)
assert msg.startswith('[Errno 2] Unable to load configuration '
'file (No such file or directory):')
assert msg.endswith("missing.cfg'")
else:
assert False, 'expected IOError'
assert not app.config.from_envvar('FOO_SETTINGS', silent=True)
finally:
os.environ = env
def test_config_missing():
app = flask.Flask(__name__)
try:
app.config.from_pyfile('missing.cfg')
except IOError as e:
msg = str(e)
assert msg.startswith('[Errno 2] Unable to load configuration '
'file (No such file or directory):')
assert msg.endswith("missing.cfg'")
else:
assert 0, 'expected config'
assert not app.config.from_pyfile('missing.cfg', silent=True)
def test_config_missing_json():
app = flask.Flask(__name__)
try:
app.config.from_json('missing.json')
except IOError as e:
msg = str(e)
assert msg.startswith('[Errno 2] Unable to load configuration '
'file (No such file or directory):')
assert msg.endswith("missing.json'")
else:
assert 0, 'expected config'
assert not app.config.from_json('missing.json', silent=True)
def test_custom_config_class():
class Config(flask.Config):
pass
class Flask(flask.Flask):
config_class = Config
app = Flask(__name__)
assert isinstance(app.config, Config)
app.config.from_object(__name__)
common_object_test(app)
def test_session_lifetime():
app = flask.Flask(__name__)
app.config['PERMANENT_SESSION_LIFETIME'] = 42
assert app.permanent_session_lifetime.seconds == 42
def test_get_namespace():
app = flask.Flask(__name__)
app.config['FOO_OPTION_1'] = 'foo option 1'
app.config['FOO_OPTION_2'] = 'foo option 2'
app.config['BAR_STUFF_1'] = 'bar stuff 1'
app.config['BAR_STUFF_2'] = 'bar stuff 2'
foo_options = app.config.get_namespace('FOO_')
assert 2 == len(foo_options)
assert 'foo option 1' == foo_options['option_1']
assert 'foo option 2' == foo_options['option_2']
bar_options = app.config.get_namespace('BAR_', lowercase=False)
assert 2 == len(bar_options)
assert 'bar stuff 1' == bar_options['STUFF_1']
assert 'bar stuff 2' == bar_options['STUFF_2']
foo_options = app.config.get_namespace('FOO_', trim_namespace=False)
assert 2 == len(foo_options)
assert 'foo option 1' == foo_options['foo_option_1']
assert 'foo option 2' == foo_options['foo_option_2']
bar_options = app.config.get_namespace('BAR_', lowercase=False, trim_namespace=False)
assert 2 == len(bar_options)
assert 'bar stuff 1' == bar_options['BAR_STUFF_1']
assert 'bar stuff 2' == bar_options['BAR_STUFF_2']
| bsd-3-clause |
codeforamerica/westsac-urban-land-locator | farmsList/public/views.py | 1 | 5434 | # -*- coding: utf-8 -*-
'''Public section, including homepage and signup.'''
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask_mail import Message
from flask.ext.login import login_user, login_required, logout_user
from farmsList.extensions import mail, login_manager
from farmsList.user.models import User
from farmsList.public.forms import LoginForm, ContactLandOwnerForm
from farmsList.public.models import Farmland
from farmsList.user.forms import RegisterForm
from farmsList.user.models import Email
from farmsList.utils import flash_errors
from farmsList.database import db
blueprint = Blueprint('public', __name__, static_folder="../static")
@login_manager.user_loader
def load_user(id):
return User.get_by_id(int(id))
@blueprint.route("/", methods=["GET", "POST"])
def home():
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", 'success')
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/home.html", form=form)
@blueprint.route('/logout/')
@login_required
def logout():
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route("/register/", methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form, csrf_enabled=False)
if form.validate_on_submit():
new_user = User.create(username=form.username.data,
email=form.email.data,
password=form.password.data,
active=True)
flash("Thank you for registering. You can now log in.", 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route("/contact-land-owner/<int:farmlandId>", methods=["GET", "POST"])
def contactLandOwner(farmlandId):
form = ContactLandOwnerForm(request.form)
farmland = Farmland.query.filter(Farmland.id == farmlandId).all()[0]
if form.validate_on_submit():
address = "Unknown" if farmland.address is None else farmland.address
mainBodyContent = ("<p style=\"margin-left: 50px;\">"
"<b>Name:</b> " + form.name.data + "<br>"
"<b>Email:</b> " + form.email.data + "<br>"
"<b>Phone:</b> " + form.phone.data + "<br>"
"</p>"
"<p style=\"margin-left: 50px;\">"
"<b>What is your past experience farming?</b><br>"
"" + form.experience.data + "</p>"
"<p><br>Thanks,<br>"
"Acres"
"</p>")
# msg = Message("Inquiry: " + address + " Property", recipients=["aaronl@cityofwestsacramento.org")
msg = Message("Inquiry: " + address + " Property", recipients=[farmland.email])
msg.html = ("<html>"
"<body>"
"<p>Someone has contacted you about your " + address + " property:</p>"
"" + mainBodyContent + ""
"</body>"
"</html>")
mail.send(msg)
Email.create(sender=msg.sender,
recipients=",".join(msg.recipients),
body=msg.html)
msg = Message("Inquiry: " + address + " Property", recipients=[form.email.data])
msg.html = ("<html>"
"<body>"
"<p>Just a note that we sent your request for more information about the " + address + " property to " + farmland.ownerName + ":</p>"
"" + mainBodyContent + ""
"</body>"
"</html>")
mail.send(msg)
Email.create(sender=msg.sender,
recipients=",".join(msg.recipients),
body=msg.html)
flash("Thanks for your inquiry! We sent your email for more information about the property. " + farmland.ownerName + " will follow up with you shortly.", 'info')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template("public/contact-land-owner.html", form=form, farmland=farmland)
@blueprint.route("/farmland-details/<int:farmlandId>")
def farmlandDetails(farmlandId):
return render_template("public/farmland-details.html")
@blueprint.route("/farmland-approval/<int:farmlandId>")
def farmlandApproval(farmlandId):
return render_template("public/farmland-approval.html")
@blueprint.route("/find-land/")
def find_land():
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", 'success')
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/find_land.html", form=form)
| bsd-3-clause |
seocam/django | tests/queries/models.py | 36 | 16195 | """
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
@python_2_unicode_compatible
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
category = models.ForeignKey(NamedCategory, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, 'type checking should happen without calling model __iter__'
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, null=True)
objectb = models.ForeignKey(ObjectB, null=True)
childobjecta = models.ForeignKey(ChildObjectA, null=True, related_name='ca_pk')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, null=True)
d = models.ForeignKey(ModelD)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, to_field='name')
responsibility = models.ForeignKey('Responsibility', to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, null=True)
b = models.ForeignKey(FK2, null=True)
c = models.ForeignKey(FK3, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter')
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph')
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, related_name='owner')
creator = models.ForeignKey(BaseUser, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent)
class Person(models.Model):
name = models.CharField(max_length=128)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company)
employee = models.ForeignKey(Person)
title = models.CharField(max_length=128)
# Bug #22429
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School)
class Classroom(models.Model):
school = models.ForeignKey(School)
students = models.ManyToManyField(Student, related_name='classroom')
class Ticket23605A(models.Model):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A)
modelc_fk = models.ForeignKey("Ticket23605C")
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
| bsd-3-clause |
basicthinker/Sexain-MemController | gem5-stable/src/mem/SimpleMemory.py | 1 | 3222 | # Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Andreas Hansson
from m5.params import *
from AbstractMemory import *
class SimpleMemory(AbstractMemory):
type = 'SimpleMemory'
cxx_header = "mem/simple_mem.hh"
port = SlavePort("Slave ports")
latency = Param.Latency('40ns', "Latency on row buffer hit")
latency_miss = Param.Latency('80ns', "Latency on row buffer miss")
latency_var = Param.Latency('0ns', "Request to response latency variance")
# The memory bandwidth limit default is set to 12.8GB/s which is
# representative of a x64 DDR3-1600 channel.
bandwidth = Param.MemoryBandwidth('12.8GB/s',
"Combined read and write bandwidth")
lat_att_operate = Param.Latency('3ns', "ATT operation latency")
lat_buffer_operate = Param.Latency('3ns',
"Version buffer operation latency")
lat_nvm_read = Param.Latency('128ns', "NVM read latency")
lat_nvm_write = Param.Latency('368ns', "NVM write latency")
disable_timing = Param.Bool(True, "If THNVM is not timed")
| apache-2.0 |
toanalien/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/prettydiff.py | 186 | 1857 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool import steps
class PrettyDiff(AbstractSequencedCommand):
name = "pretty-diff"
help_text = "Shows the pretty diff in the default browser"
show_in_main_help = True
steps = [
steps.ConfirmDiff,
]
| bsd-3-clause |
heke123/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/style/filereader_unittest.py | 3 | 6694 | # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.logtesting import LoggingTestCase
from webkitpy.style.checker import ProcessorBase
from webkitpy.style.filereader import TextFileReader
class TextFileReaderTest(LoggingTestCase):
class MockProcessor(ProcessorBase):
"""A processor for test purposes.
This processor simply records the parameters passed to its process()
method for later checking by the unittest test methods.
"""
def __init__(self):
self.processed = []
"""The parameters passed for all calls to the process() method."""
def should_process(self, file_path):
return not file_path.endswith('should_not_process.txt')
def process(self, lines, file_path, test_kwarg=None):
self.processed.append((lines, file_path, test_kwarg))
def setUp(self):
LoggingTestCase.setUp(self)
# FIXME: This should be a MockFileSystem once TextFileReader is moved entirely on top of FileSystem.
self.filesystem = FileSystem()
self._temp_dir = str(self.filesystem.mkdtemp())
self._processor = TextFileReaderTest.MockProcessor()
self._file_reader = TextFileReader(self.filesystem, self._processor)
def tearDown(self):
LoggingTestCase.tearDown(self)
self.filesystem.rmtree(self._temp_dir)
def _create_file(self, rel_path, text):
"""Create a file with given text and return the path to the file."""
# FIXME: There are better/more secure APIs for creating tmp file paths.
file_path = self.filesystem.join(self._temp_dir, rel_path)
self.filesystem.write_text_file(file_path, text)
return file_path
def _passed_to_processor(self):
"""Return the parameters passed to MockProcessor.process()."""
return self._processor.processed
def _assert_file_reader(self, passed_to_processor, file_count):
"""Assert the state of the file reader."""
self.assertEqual(passed_to_processor, self._passed_to_processor())
self.assertEqual(file_count, self._file_reader.file_count)
def test_process_file__does_not_exist(self):
try:
self._file_reader.process_file('does_not_exist.txt')
except SystemExit, err:
self.assertEqual(str(err), '1')
else:
self.fail('No Exception raised.')
self._assert_file_reader([], 1)
self.assertLog(["ERROR: File does not exist: 'does_not_exist.txt'\n"])
def test_process_file__is_dir(self):
temp_dir = self.filesystem.join(self._temp_dir, 'test_dir')
self.filesystem.maybe_make_directory(temp_dir)
self._file_reader.process_file(temp_dir)
# Because the log message below contains exception text, it is
# possible that the text varies across platforms. For this reason,
# we check only the portion of the log message that we control,
# namely the text at the beginning.
log_messages = self.logMessages()
# We remove the message we are looking at to prevent the tearDown()
# from raising an exception when it asserts that no log messages
# remain.
message = log_messages.pop()
self.assertTrue(message.startswith("WARNING: Could not read file. Skipping: '%s'\n " % temp_dir))
self._assert_file_reader([], 1)
def test_process_file__should_not_process(self):
file_path = self._create_file('should_not_process.txt', 'contents')
self._file_reader.process_file(file_path)
self._assert_file_reader([], 1)
def test_process_file__multiple_lines(self):
file_path = self._create_file('foo.txt', 'line one\r\nline two\n')
self._file_reader.process_file(file_path)
processed = [(['line one\r', 'line two', ''], file_path, None)]
self._assert_file_reader(processed, 1)
def test_process_file__file_stdin(self):
file_path = self._create_file('-', 'file contents')
self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
processed = [(['file contents'], file_path, 'foo')]
self._assert_file_reader(processed, 1)
def test_process_file__with_kwarg(self):
file_path = self._create_file('foo.txt', 'file contents')
self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
processed = [(['file contents'], file_path, 'foo')]
self._assert_file_reader(processed, 1)
def test_process_paths(self):
# We test a list of paths that contains both a file and a directory.
dir = self.filesystem.join(self._temp_dir, 'foo_dir')
self.filesystem.maybe_make_directory(dir)
file_path1 = self._create_file('file1.txt', 'foo')
rel_path = self.filesystem.join('foo_dir', 'file2.txt')
file_path2 = self._create_file(rel_path, 'bar')
self._file_reader.process_paths([dir, file_path1])
processed = [(['bar'], file_path2, None),
(['foo'], file_path1, None)]
self._assert_file_reader(processed, 2)
def test_count_delete_only_file(self):
self._file_reader.count_delete_only_file()
delete_only_file_count = self._file_reader.delete_only_file_count
self.assertEqual(delete_only_file_count, 1)
| bsd-3-clause |
ubc/compair | alembic/versions/316f3b73962c_modified_criteria_tables.py | 1 | 2136 | """modified criteria tables
Revision ID: 316f3b73962c
Revises: 2fe3d8183c34
Create Date: 2014-09-10 15:42:55.963855
"""
# revision identifiers, used by Alembic.
revision = '316f3b73962c'
down_revision = '2fe3d8183c34'
import logging
from alembic import op
import sqlalchemy as sa
from sqlalchemy import UniqueConstraint, exc
from sqlalchemy.sql import text
from compair.models import convention
def upgrade():
try:
with op.batch_alter_table('Criteria', naming_convention=convention,
table_args=(UniqueConstraint('name'))) as batch_op:
batch_op.drop_constraint('uq_Criteria_name', type_='unique')
except exc.InternalError:
with op.batch_alter_table('Criteria', naming_convention=convention,
table_args=(UniqueConstraint('name'))) as batch_op:
batch_op.drop_constraint('name', type_='unique')
except ValueError:
logging.warning('Drop unique constraint is not support for SQLite, dropping uq_Critiera_name ignored!')
# set existing criteria's active attribute to True using server_default
with op.batch_alter_table('CriteriaAndCourses', naming_convention=convention) as batch_op:
batch_op.add_column(sa.Column('active', sa.Boolean(), default=True, server_default='1', nullable=False))
with op.batch_alter_table('Criteria', naming_convention=convention) as batch_op:
batch_op.add_column(sa.Column('public', sa.Boolean(), default=False, server_default='0', nullable=False))
# set the first criteria as public
t = {"name": "Which is better?", "public": True}
op.get_bind().execute(text("Update Criteria set public=:public where name=:name"), **t)
def downgrade():
with op.batch_alter_table('Criteria', naming_convention=convention,
table_args=(UniqueConstraint('name'))) as batch_op:
batch_op.create_unique_constraint('uq_Criteria_name', ['name'])
batch_op.drop_column('public')
with op.batch_alter_table('CriteriaAndCourses', naming_convention=convention) as batch_op:
batch_op.drop_column('active')
| gpl-3.0 |
abligh/xen-4.2-live-migrate | tools/xm-test/tests/block-integrity/02_block_device_write_verify.py | 42 | 1838 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2006
# Author: Harry Butterworth <butterwo@uk.ibm.com>
# This test imports a ram disk device as a physical device into a domU.
# The domU initialises the ram disk with data from /dev/urandom and calculates
# the md5 checksum of the data (using tee as it is written so as to avoid
# reading it back from the device which might potentially mask problems).
# The domU is stopped and the md5 checksum of the data on the device is
# calculated by dom0. The test succeeds if the checksums match, indicating
# that all the data written by domU was sucessfully committed to the device.
import re
from XmTestLib import *
from XmTestLib.block_utils import *
if ENABLE_HVM_SUPPORT:
SKIP("Block-attach not supported for HVM domains")
domain = XmTestDomain()
try:
console = domain.start()
except DomainError, e:
FAIL(str(e))
console.setHistorySaveCmds(value=True)
block_attach(domain, "phy:ram1", "xvda1")
console.setTimeout(120)
try:
run = console.runCmd("dd if=/dev/urandom bs=512 count=`cat /sys/block/xvda1/size` | tee /dev/xvda1 | md5sum")
except ConsoleError, e:
FAIL(str(e))
domU_md5sum_match = re.search(r"^[\dA-Fa-f]{32}", run["output"], re.M)
domain.closeConsole()
domain.stop()
s, o = traceCommand("md5sum /dev/ram1")
dom0_md5sum_match = re.search(r"^[\dA-Fa-f]{32}", o, re.M)
if domU_md5sum_match == None:
FAIL("Failed to get md5sum of data written in domU.")
if dom0_md5sum_match == None:
FAIL("Failed to get md5sum of data read back in dom0.")
if verbose:
print "md5sum domU:"
print domU_md5sum_match.group()
print "md5sum dom0:"
print dom0_md5sum_match.group()
if domU_md5sum_match.group() != dom0_md5sum_match.group():
FAIL("MISCOMPARE: data read in dom0 did not match data written by domU.")
| gpl-2.0 |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/carrier_constant_service/transports/__init__.py | 3 | 1067 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import CarrierConstantServiceTransport
from .grpc import CarrierConstantServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[CarrierConstantServiceTransport]]
_transport_registry['grpc'] = CarrierConstantServiceGrpcTransport
__all__ = (
'CarrierConstantServiceTransport',
'CarrierConstantServiceGrpcTransport',
)
| apache-2.0 |
craffel/mir_eval | tests/test_pattern.py | 4 | 3010 | """
Some unit tests for the pattern discovery task.
"""
import numpy as np
import json
import mir_eval
import glob
import warnings
import nose.tools
A_TOL = 1e-12
# Path to the fixture files
REF_GLOB = 'data/pattern/ref*.txt'
EST_GLOB = 'data/pattern/est*.txt'
SCORES_GLOB = 'data/pattern/output*.json'
def __unit_test_pattern_function(metric):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# First, test for a warning on empty pattern
metric([[[]]], [[[(100, 20)]]])
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert str(w[-1].message) == 'Reference patterns are empty.'
metric([[[(100, 20)]]], [[[]]])
assert len(w) == 2
assert issubclass(w[-1].category, UserWarning)
assert str(w[-1].message) == "Estimated patterns are empty."
# And that the metric is 0
assert np.allclose(metric([[[]]], [[[]]]), 0)
# Now test validation function - patterns must contain at least 1 occ
patterns = [[[(100, 20)]], []]
nose.tools.assert_raises(ValueError, metric, patterns, patterns)
# The (onset, midi) tuple must contain 2 elements
patterns = [[[(100, 20, 3)]]]
nose.tools.assert_raises(ValueError, metric, patterns, patterns)
# Valid patterns which are the same produce a score of 1 for all metrics
patterns = [[[(100, 20), (200, 30)]]]
assert np.allclose(metric(patterns, patterns), 1)
def __check_score(sco_f, metric, score, expected_score):
assert np.allclose(score, expected_score, atol=A_TOL)
def test_pattern_functions():
# Load in all files in the same order
ref_files = sorted(glob.glob(REF_GLOB))
est_files = sorted(glob.glob(EST_GLOB))
sco_files = sorted(glob.glob(SCORES_GLOB))
assert len(ref_files) == len(est_files) == len(sco_files) > 0
# Unit tests
for metric in [mir_eval.pattern.standard_FPR,
mir_eval.pattern.establishment_FPR,
mir_eval.pattern.occurrence_FPR,
mir_eval.pattern.three_layer_FPR,
mir_eval.pattern.first_n_three_layer_P,
mir_eval.pattern.first_n_target_proportion_R]:
yield (__unit_test_pattern_function, metric)
# Regression tests
for ref_f, est_f, sco_f in zip(ref_files, est_files, sco_files):
with open(sco_f, 'r') as f:
expected_scores = json.load(f)
# Load in reference and estimated patterns
reference_patterns = mir_eval.io.load_patterns(ref_f)
estimated_patterns = mir_eval.io.load_patterns(est_f)
# Compute scores
scores = mir_eval.pattern.evaluate(reference_patterns,
estimated_patterns)
# Compare them
for metric in scores:
# This is a simple hack to make nosetest's messages more useful
yield (__check_score, sco_f, metric, scores[metric],
expected_scores[metric])
| mit |
matsprea/omim | 3party/protobuf/python/google/protobuf/internal/message_python_test.py | 74 | 2359 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for ..public.message for the pure Python implementation."""
import os
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'
# We must set the implementation version above before the google3 imports.
# pylint: disable=g-import-not-at-top
from google.apputils import basetest
from google.protobuf.internal import api_implementation
# Run all tests from the original module by putting them in our namespace.
# pylint: disable=wildcard-import
from google.protobuf.internal.message_test import *
class ConfirmPurePythonTest(basetest.TestCase):
def testImplementationSetting(self):
self.assertEqual('python', api_implementation.Type())
if __name__ == '__main__':
basetest.main()
| apache-2.0 |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/encodings/euc_jp.py | 816 | 1027 | #
# euc_jp.py: Python Unicode Codec for EUC_JP
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jp')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jp',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| lgpl-3.0 |
tchellomello/home-assistant | homeassistant/components/demo/tts.py | 26 | 1444 | """Support for the demo for text to speech service."""
import os
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
SUPPORT_LANGUAGES = ["en", "de"]
DEFAULT_LANG = "en"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES)}
)
def get_engine(hass, config, discovery_info=None):
"""Set up Demo speech component."""
return DemoProvider(config.get(CONF_LANG, DEFAULT_LANG))
class DemoProvider(Provider):
"""Demo speech API provider."""
def __init__(self, lang):
"""Initialize demo provider."""
self._lang = lang
self.name = "Demo"
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_options(self):
"""Return list of supported options like voice, emotionen."""
return ["voice", "age"]
def get_tts_audio(self, message, language, options=None):
"""Load TTS from demo."""
filename = os.path.join(os.path.dirname(__file__), "tts.mp3")
try:
with open(filename, "rb") as voice:
data = voice.read()
except OSError:
return (None, None)
return ("mp3", data)
| apache-2.0 |
beiko-lab/gengis | bin/Lib/site-packages/pythonwin/pywin/scintilla/scintillacon.py | 4 | 47114 | # Generated by h2py from Include\scintilla.h
# Included from BaseTsd.h
def HandleToUlong(h): return HandleToULong(h)
def UlongToHandle(ul): return ULongToHandle(ul)
def UlongToPtr(ul): return ULongToPtr(ul)
def UintToPtr(ui): return UIntToPtr(ui)
INVALID_POSITION = -1
SCI_START = 2000
SCI_OPTIONAL_START = 3000
SCI_LEXER_START = 4000
SCI_ADDTEXT = 2001
SCI_ADDSTYLEDTEXT = 2002
SCI_INSERTTEXT = 2003
SCI_CLEARALL = 2004
SCI_CLEARDOCUMENTSTYLE = 2005
SCI_GETLENGTH = 2006
SCI_GETCHARAT = 2007
SCI_GETCURRENTPOS = 2008
SCI_GETANCHOR = 2009
SCI_GETSTYLEAT = 2010
SCI_REDO = 2011
SCI_SETUNDOCOLLECTION = 2012
SCI_SELECTALL = 2013
SCI_SETSAVEPOINT = 2014
SCI_GETSTYLEDTEXT = 2015
SCI_CANREDO = 2016
SCI_MARKERLINEFROMHANDLE = 2017
SCI_MARKERDELETEHANDLE = 2018
SCI_GETUNDOCOLLECTION = 2019
SCWS_INVISIBLE = 0
SCWS_VISIBLEALWAYS = 1
SCWS_VISIBLEAFTERINDENT = 2
SCI_GETVIEWWS = 2020
SCI_SETVIEWWS = 2021
SCI_POSITIONFROMPOINT = 2022
SCI_POSITIONFROMPOINTCLOSE = 2023
SCI_GOTOLINE = 2024
SCI_GOTOPOS = 2025
SCI_SETANCHOR = 2026
SCI_GETCURLINE = 2027
SCI_GETENDSTYLED = 2028
SC_EOL_CRLF = 0
SC_EOL_CR = 1
SC_EOL_LF = 2
SCI_CONVERTEOLS = 2029
SCI_GETEOLMODE = 2030
SCI_SETEOLMODE = 2031
SCI_STARTSTYLING = 2032
SCI_SETSTYLING = 2033
SCI_GETBUFFEREDDRAW = 2034
SCI_SETBUFFEREDDRAW = 2035
SCI_SETTABWIDTH = 2036
SCI_GETTABWIDTH = 2121
SC_CP_UTF8 = 65001
SC_CP_DBCS = 1
SCI_SETCODEPAGE = 2037
SCI_SETUSEPALETTE = 2039
MARKER_MAX = 31
SC_MARK_CIRCLE = 0
SC_MARK_ROUNDRECT = 1
SC_MARK_ARROW = 2
SC_MARK_SMALLRECT = 3
SC_MARK_SHORTARROW = 4
SC_MARK_EMPTY = 5
SC_MARK_ARROWDOWN = 6
SC_MARK_MINUS = 7
SC_MARK_PLUS = 8
SC_MARK_VLINE = 9
SC_MARK_LCORNER = 10
SC_MARK_TCORNER = 11
SC_MARK_BOXPLUS = 12
SC_MARK_BOXPLUSCONNECTED = 13
SC_MARK_BOXMINUS = 14
SC_MARK_BOXMINUSCONNECTED = 15
SC_MARK_LCORNERCURVE = 16
SC_MARK_TCORNERCURVE = 17
SC_MARK_CIRCLEPLUS = 18
SC_MARK_CIRCLEPLUSCONNECTED = 19
SC_MARK_CIRCLEMINUS = 20
SC_MARK_CIRCLEMINUSCONNECTED = 21
SC_MARK_BACKGROUND = 22
SC_MARK_DOTDOTDOT = 23
SC_MARK_ARROWS = 24
SC_MARK_PIXMAP = 25
SC_MARK_FULLRECT = 26
SC_MARK_LEFTRECT = 27
SC_MARK_CHARACTER = 10000
SC_MARKNUM_FOLDEREND = 25
SC_MARKNUM_FOLDEROPENMID = 26
SC_MARKNUM_FOLDERMIDTAIL = 27
SC_MARKNUM_FOLDERTAIL = 28
SC_MARKNUM_FOLDERSUB = 29
SC_MARKNUM_FOLDER = 30
SC_MARKNUM_FOLDEROPEN = 31
SC_MASK_FOLDERS = (-33554432)
SCI_MARKERDEFINE = 2040
SCI_MARKERSETFORE = 2041
SCI_MARKERSETBACK = 2042
SCI_MARKERADD = 2043
SCI_MARKERDELETE = 2044
SCI_MARKERDELETEALL = 2045
SCI_MARKERGET = 2046
SCI_MARKERNEXT = 2047
SCI_MARKERPREVIOUS = 2048
SCI_MARKERDEFINEPIXMAP = 2049
SCI_MARKERADDSET = 2466
SCI_MARKERSETALPHA = 2476
SC_MARGIN_SYMBOL = 0
SC_MARGIN_NUMBER = 1
SC_MARGIN_BACK = 2
SC_MARGIN_FORE = 3
SCI_SETMARGINTYPEN = 2240
SCI_GETMARGINTYPEN = 2241
SCI_SETMARGINWIDTHN = 2242
SCI_GETMARGINWIDTHN = 2243
SCI_SETMARGINMASKN = 2244
SCI_GETMARGINMASKN = 2245
SCI_SETMARGINSENSITIVEN = 2246
SCI_GETMARGINSENSITIVEN = 2247
STYLE_DEFAULT = 32
STYLE_LINENUMBER = 33
STYLE_BRACELIGHT = 34
STYLE_BRACEBAD = 35
STYLE_CONTROLCHAR = 36
STYLE_INDENTGUIDE = 37
STYLE_CALLTIP = 38
STYLE_LASTPREDEFINED = 39
STYLE_MAX = 255
SC_CHARSET_ANSI = 0
SC_CHARSET_DEFAULT = 1
SC_CHARSET_BALTIC = 186
SC_CHARSET_CHINESEBIG5 = 136
SC_CHARSET_EASTEUROPE = 238
SC_CHARSET_GB2312 = 134
SC_CHARSET_GREEK = 161
SC_CHARSET_HANGUL = 129
SC_CHARSET_MAC = 77
SC_CHARSET_OEM = 255
SC_CHARSET_RUSSIAN = 204
SC_CHARSET_CYRILLIC = 1251
SC_CHARSET_SHIFTJIS = 128
SC_CHARSET_SYMBOL = 2
SC_CHARSET_TURKISH = 162
SC_CHARSET_JOHAB = 130
SC_CHARSET_HEBREW = 177
SC_CHARSET_ARABIC = 178
SC_CHARSET_VIETNAMESE = 163
SC_CHARSET_THAI = 222
SC_CHARSET_8859_15 = 1000
SCI_STYLECLEARALL = 2050
SCI_STYLESETFORE = 2051
SCI_STYLESETBACK = 2052
SCI_STYLESETBOLD = 2053
SCI_STYLESETITALIC = 2054
SCI_STYLESETSIZE = 2055
SCI_STYLESETFONT = 2056
SCI_STYLESETEOLFILLED = 2057
SCI_STYLERESETDEFAULT = 2058
SCI_STYLESETUNDERLINE = 2059
SC_CASE_MIXED = 0
SC_CASE_UPPER = 1
SC_CASE_LOWER = 2
SCI_STYLEGETFORE = 2481
SCI_STYLEGETBACK = 2482
SCI_STYLEGETBOLD = 2483
SCI_STYLEGETITALIC = 2484
SCI_STYLEGETSIZE = 2485
SCI_STYLEGETFONT = 2486
SCI_STYLEGETEOLFILLED = 2487
SCI_STYLEGETUNDERLINE = 2488
SCI_STYLEGETCASE = 2489
SCI_STYLEGETCHARACTERSET = 2490
SCI_STYLEGETVISIBLE = 2491
SCI_STYLEGETCHANGEABLE = 2492
SCI_STYLEGETHOTSPOT = 2493
SCI_STYLESETCASE = 2060
SCI_STYLESETCHARACTERSET = 2066
SCI_STYLESETHOTSPOT = 2409
SCI_SETSELFORE = 2067
SCI_SETSELBACK = 2068
SCI_GETSELALPHA = 2477
SCI_SETSELALPHA = 2478
SCI_GETSELEOLFILLED = 2479
SCI_SETSELEOLFILLED = 2480
SCI_SETCARETFORE = 2069
SCI_ASSIGNCMDKEY = 2070
SCI_CLEARCMDKEY = 2071
SCI_CLEARALLCMDKEYS = 2072
SCI_SETSTYLINGEX = 2073
SCI_STYLESETVISIBLE = 2074
SCI_GETCARETPERIOD = 2075
SCI_SETCARETPERIOD = 2076
SCI_SETWORDCHARS = 2077
SCI_BEGINUNDOACTION = 2078
SCI_ENDUNDOACTION = 2079
INDIC_PLAIN = 0
INDIC_SQUIGGLE = 1
INDIC_TT = 2
INDIC_DIAGONAL = 3
INDIC_STRIKE = 4
INDIC_HIDDEN = 5
INDIC_BOX = 6
INDIC_ROUNDBOX = 7
INDIC_MAX = 31
INDIC_CONTAINER = 8
INDIC0_MASK = 0x20
INDIC1_MASK = 0x40
INDIC2_MASK = 0x80
INDICS_MASK = 0xE0
SCI_INDICSETSTYLE = 2080
SCI_INDICGETSTYLE = 2081
SCI_INDICSETFORE = 2082
SCI_INDICGETFORE = 2083
SCI_INDICSETUNDER = 2510
SCI_INDICGETUNDER = 2511
SCI_SETWHITESPACEFORE = 2084
SCI_SETWHITESPACEBACK = 2085
SCI_SETSTYLEBITS = 2090
SCI_GETSTYLEBITS = 2091
SCI_SETLINESTATE = 2092
SCI_GETLINESTATE = 2093
SCI_GETMAXLINESTATE = 2094
SCI_GETCARETLINEVISIBLE = 2095
SCI_SETCARETLINEVISIBLE = 2096
SCI_GETCARETLINEBACK = 2097
SCI_SETCARETLINEBACK = 2098
SCI_STYLESETCHANGEABLE = 2099
SCI_AUTOCSHOW = 2100
SCI_AUTOCCANCEL = 2101
SCI_AUTOCACTIVE = 2102
SCI_AUTOCPOSSTART = 2103
SCI_AUTOCCOMPLETE = 2104
SCI_AUTOCSTOPS = 2105
SCI_AUTOCSETSEPARATOR = 2106
SCI_AUTOCGETSEPARATOR = 2107
SCI_AUTOCSELECT = 2108
SCI_AUTOCSETCANCELATSTART = 2110
SCI_AUTOCGETCANCELATSTART = 2111
SCI_AUTOCSETFILLUPS = 2112
SCI_AUTOCSETCHOOSESINGLE = 2113
SCI_AUTOCGETCHOOSESINGLE = 2114
SCI_AUTOCSETIGNORECASE = 2115
SCI_AUTOCGETIGNORECASE = 2116
SCI_USERLISTSHOW = 2117
SCI_AUTOCSETAUTOHIDE = 2118
SCI_AUTOCGETAUTOHIDE = 2119
SCI_AUTOCSETDROPRESTOFWORD = 2270
SCI_AUTOCGETDROPRESTOFWORD = 2271
SCI_REGISTERIMAGE = 2405
SCI_CLEARREGISTEREDIMAGES = 2408
SCI_AUTOCGETTYPESEPARATOR = 2285
SCI_AUTOCSETTYPESEPARATOR = 2286
SCI_AUTOCSETMAXWIDTH = 2208
SCI_AUTOCGETMAXWIDTH = 2209
SCI_AUTOCSETMAXHEIGHT = 2210
SCI_AUTOCGETMAXHEIGHT = 2211
SCI_SETINDENT = 2122
SCI_GETINDENT = 2123
SCI_SETUSETABS = 2124
SCI_GETUSETABS = 2125
SCI_SETLINEINDENTATION = 2126
SCI_GETLINEINDENTATION = 2127
SCI_GETLINEINDENTPOSITION = 2128
SCI_GETCOLUMN = 2129
SCI_SETHSCROLLBAR = 2130
SCI_GETHSCROLLBAR = 2131
SC_IV_NONE = 0
SC_IV_REAL = 1
SC_IV_LOOKFORWARD = 2
SC_IV_LOOKBOTH = 3
SCI_SETINDENTATIONGUIDES = 2132
SCI_GETINDENTATIONGUIDES = 2133
SCI_SETHIGHLIGHTGUIDE = 2134
SCI_GETHIGHLIGHTGUIDE = 2135
SCI_GETLINEENDPOSITION = 2136
SCI_GETCODEPAGE = 2137
SCI_GETCARETFORE = 2138
SCI_GETUSEPALETTE = 2139
SCI_GETREADONLY = 2140
SCI_SETCURRENTPOS = 2141
SCI_SETSELECTIONSTART = 2142
SCI_GETSELECTIONSTART = 2143
SCI_SETSELECTIONEND = 2144
SCI_GETSELECTIONEND = 2145
SCI_SETPRINTMAGNIFICATION = 2146
SCI_GETPRINTMAGNIFICATION = 2147
SC_PRINT_NORMAL = 0
SC_PRINT_INVERTLIGHT = 1
SC_PRINT_BLACKONWHITE = 2
SC_PRINT_COLOURONWHITE = 3
SC_PRINT_COLOURONWHITEDEFAULTBG = 4
SCI_SETPRINTCOLOURMODE = 2148
SCI_GETPRINTCOLOURMODE = 2149
SCFIND_WHOLEWORD = 2
SCFIND_MATCHCASE = 4
SCFIND_WORDSTART = 0x00100000
SCFIND_REGEXP = 0x00200000
SCFIND_POSIX = 0x00400000
SCI_FINDTEXT = 2150
SCI_FORMATRANGE = 2151
SCI_GETFIRSTVISIBLELINE = 2152
SCI_GETLINE = 2153
SCI_GETLINECOUNT = 2154
SCI_SETMARGINLEFT = 2155
SCI_GETMARGINLEFT = 2156
SCI_SETMARGINRIGHT = 2157
SCI_GETMARGINRIGHT = 2158
SCI_GETMODIFY = 2159
SCI_SETSEL = 2160
SCI_GETSELTEXT = 2161
SCI_GETTEXTRANGE = 2162
SCI_HIDESELECTION = 2163
SCI_POINTXFROMPOSITION = 2164
SCI_POINTYFROMPOSITION = 2165
SCI_LINEFROMPOSITION = 2166
SCI_POSITIONFROMLINE = 2167
SCI_LINESCROLL = 2168
SCI_SCROLLCARET = 2169
SCI_REPLACESEL = 2170
SCI_SETREADONLY = 2171
SCI_NULL = 2172
SCI_CANPASTE = 2173
SCI_CANUNDO = 2174
SCI_EMPTYUNDOBUFFER = 2175
SCI_UNDO = 2176
SCI_CUT = 2177
SCI_COPY = 2178
SCI_PASTE = 2179
SCI_CLEAR = 2180
SCI_SETTEXT = 2181
SCI_GETTEXT = 2182
SCI_GETTEXTLENGTH = 2183
SCI_GETDIRECTFUNCTION = 2184
SCI_GETDIRECTPOINTER = 2185
SCI_SETOVERTYPE = 2186
SCI_GETOVERTYPE = 2187
SCI_SETCARETWIDTH = 2188
SCI_GETCARETWIDTH = 2189
SCI_SETTARGETSTART = 2190
SCI_GETTARGETSTART = 2191
SCI_SETTARGETEND = 2192
SCI_GETTARGETEND = 2193
SCI_REPLACETARGET = 2194
SCI_REPLACETARGETRE = 2195
SCI_SEARCHINTARGET = 2197
SCI_SETSEARCHFLAGS = 2198
SCI_GETSEARCHFLAGS = 2199
SCI_CALLTIPSHOW = 2200
SCI_CALLTIPCANCEL = 2201
SCI_CALLTIPACTIVE = 2202
SCI_CALLTIPPOSSTART = 2203
SCI_CALLTIPSETHLT = 2204
SCI_CALLTIPSETBACK = 2205
SCI_CALLTIPSETFORE = 2206
SCI_CALLTIPSETFOREHLT = 2207
SCI_CALLTIPUSESTYLE = 2212
SCI_VISIBLEFROMDOCLINE = 2220
SCI_DOCLINEFROMVISIBLE = 2221
SCI_WRAPCOUNT = 2235
SC_FOLDLEVELBASE = 0x400
SC_FOLDLEVELWHITEFLAG = 0x1000
SC_FOLDLEVELHEADERFLAG = 0x2000
SC_FOLDLEVELBOXHEADERFLAG = 0x4000
SC_FOLDLEVELBOXFOOTERFLAG = 0x8000
SC_FOLDLEVELCONTRACTED = 0x10000
SC_FOLDLEVELUNINDENT = 0x20000
SC_FOLDLEVELNUMBERMASK = 0x0FFF
SCI_SETFOLDLEVEL = 2222
SCI_GETFOLDLEVEL = 2223
SCI_GETLASTCHILD = 2224
SCI_GETFOLDPARENT = 2225
SCI_SHOWLINES = 2226
SCI_HIDELINES = 2227
SCI_GETLINEVISIBLE = 2228
SCI_SETFOLDEXPANDED = 2229
SCI_GETFOLDEXPANDED = 2230
SCI_TOGGLEFOLD = 2231
SCI_ENSUREVISIBLE = 2232
SC_FOLDFLAG_LINEBEFORE_EXPANDED = 0x0002
SC_FOLDFLAG_LINEBEFORE_CONTRACTED = 0x0004
SC_FOLDFLAG_LINEAFTER_EXPANDED = 0x0008
SC_FOLDFLAG_LINEAFTER_CONTRACTED = 0x0010
SC_FOLDFLAG_LEVELNUMBERS = 0x0040
SC_FOLDFLAG_BOX = 0x0001
SCI_SETFOLDFLAGS = 2233
SCI_ENSUREVISIBLEENFORCEPOLICY = 2234
SCI_SETTABINDENTS = 2260
SCI_GETTABINDENTS = 2261
SCI_SETBACKSPACEUNINDENTS = 2262
SCI_GETBACKSPACEUNINDENTS = 2263
SC_TIME_FOREVER = 10000000
SCI_SETMOUSEDWELLTIME = 2264
SCI_GETMOUSEDWELLTIME = 2265
SCI_WORDSTARTPOSITION = 2266
SCI_WORDENDPOSITION = 2267
SC_WRAP_NONE = 0
SC_WRAP_WORD = 1
SC_WRAP_CHAR = 2
SCI_SETWRAPMODE = 2268
SCI_GETWRAPMODE = 2269
SC_WRAPVISUALFLAG_NONE = 0x0000
SC_WRAPVISUALFLAG_END = 0x0001
SC_WRAPVISUALFLAG_START = 0x0002
SCI_SETWRAPVISUALFLAGS = 2460
SCI_GETWRAPVISUALFLAGS = 2461
SC_WRAPVISUALFLAGLOC_DEFAULT = 0x0000
SC_WRAPVISUALFLAGLOC_END_BY_TEXT = 0x0001
SC_WRAPVISUALFLAGLOC_START_BY_TEXT = 0x0002
SCI_SETWRAPVISUALFLAGSLOCATION = 2462
SCI_GETWRAPVISUALFLAGSLOCATION = 2463
SCI_SETWRAPSTARTINDENT = 2464
SCI_GETWRAPSTARTINDENT = 2465
SC_CACHE_NONE = 0
SC_CACHE_CARET = 1
SC_CACHE_PAGE = 2
SC_CACHE_DOCUMENT = 3
SCI_SETLAYOUTCACHE = 2272
SCI_GETLAYOUTCACHE = 2273
SCI_SETSCROLLWIDTH = 2274
SCI_GETSCROLLWIDTH = 2275
SCI_SETSCROLLWIDTHTRACKING = 2516
SCI_GETSCROLLWIDTHTRACKING = 2517
SCI_TEXTWIDTH = 2276
SCI_SETENDATLASTLINE = 2277
SCI_GETENDATLASTLINE = 2278
SCI_TEXTHEIGHT = 2279
SCI_SETVSCROLLBAR = 2280
SCI_GETVSCROLLBAR = 2281
SCI_APPENDTEXT = 2282
SCI_GETTWOPHASEDRAW = 2283
SCI_SETTWOPHASEDRAW = 2284
SCI_TARGETFROMSELECTION = 2287
SCI_LINESJOIN = 2288
SCI_LINESSPLIT = 2289
SCI_SETFOLDMARGINCOLOUR = 2290
SCI_SETFOLDMARGINHICOLOUR = 2291
SCI_LINEDOWN = 2300
SCI_LINEDOWNEXTEND = 2301
SCI_LINEUP = 2302
SCI_LINEUPEXTEND = 2303
SCI_CHARLEFT = 2304
SCI_CHARLEFTEXTEND = 2305
SCI_CHARRIGHT = 2306
SCI_CHARRIGHTEXTEND = 2307
SCI_WORDLEFT = 2308
SCI_WORDLEFTEXTEND = 2309
SCI_WORDRIGHT = 2310
SCI_WORDRIGHTEXTEND = 2311
SCI_HOME = 2312
SCI_HOMEEXTEND = 2313
SCI_LINEEND = 2314
SCI_LINEENDEXTEND = 2315
SCI_DOCUMENTSTART = 2316
SCI_DOCUMENTSTARTEXTEND = 2317
SCI_DOCUMENTEND = 2318
SCI_DOCUMENTENDEXTEND = 2319
SCI_PAGEUP = 2320
SCI_PAGEUPEXTEND = 2321
SCI_PAGEDOWN = 2322
SCI_PAGEDOWNEXTEND = 2323
SCI_EDITTOGGLEOVERTYPE = 2324
SCI_CANCEL = 2325
SCI_DELETEBACK = 2326
SCI_TAB = 2327
SCI_BACKTAB = 2328
SCI_NEWLINE = 2329
SCI_FORMFEED = 2330
SCI_VCHOME = 2331
SCI_VCHOMEEXTEND = 2332
SCI_ZOOMIN = 2333
SCI_ZOOMOUT = 2334
SCI_DELWORDLEFT = 2335
SCI_DELWORDRIGHT = 2336
SCI_DELWORDRIGHTEND = 2518
SCI_LINECUT = 2337
SCI_LINEDELETE = 2338
SCI_LINETRANSPOSE = 2339
SCI_LINEDUPLICATE = 2404
SCI_LOWERCASE = 2340
SCI_UPPERCASE = 2341
SCI_LINESCROLLDOWN = 2342
SCI_LINESCROLLUP = 2343
SCI_DELETEBACKNOTLINE = 2344
SCI_HOMEDISPLAY = 2345
SCI_HOMEDISPLAYEXTEND = 2346
SCI_LINEENDDISPLAY = 2347
SCI_LINEENDDISPLAYEXTEND = 2348
SCI_HOMEWRAP = 2349
SCI_HOMEWRAPEXTEND = 2450
SCI_LINEENDWRAP = 2451
SCI_LINEENDWRAPEXTEND = 2452
SCI_VCHOMEWRAP = 2453
SCI_VCHOMEWRAPEXTEND = 2454
SCI_LINECOPY = 2455
SCI_MOVECARETINSIDEVIEW = 2401
SCI_LINELENGTH = 2350
SCI_BRACEHIGHLIGHT = 2351
SCI_BRACEBADLIGHT = 2352
SCI_BRACEMATCH = 2353
SCI_GETVIEWEOL = 2355
SCI_SETVIEWEOL = 2356
SCI_GETDOCPOINTER = 2357
SCI_SETDOCPOINTER = 2358
SCI_SETMODEVENTMASK = 2359
EDGE_NONE = 0
EDGE_LINE = 1
EDGE_BACKGROUND = 2
SCI_GETEDGECOLUMN = 2360
SCI_SETEDGECOLUMN = 2361
SCI_GETEDGEMODE = 2362
SCI_SETEDGEMODE = 2363
SCI_GETEDGECOLOUR = 2364
SCI_SETEDGECOLOUR = 2365
SCI_SEARCHANCHOR = 2366
SCI_SEARCHNEXT = 2367
SCI_SEARCHPREV = 2368
SCI_LINESONSCREEN = 2370
SCI_USEPOPUP = 2371
SCI_SELECTIONISRECTANGLE = 2372
SCI_SETZOOM = 2373
SCI_GETZOOM = 2374
SCI_CREATEDOCUMENT = 2375
SCI_ADDREFDOCUMENT = 2376
SCI_RELEASEDOCUMENT = 2377
SCI_GETMODEVENTMASK = 2378
SCI_SETFOCUS = 2380
SCI_GETFOCUS = 2381
SCI_SETSTATUS = 2382
SCI_GETSTATUS = 2383
SCI_SETMOUSEDOWNCAPTURES = 2384
SCI_GETMOUSEDOWNCAPTURES = 2385
SC_CURSORNORMAL = -1
SC_CURSORWAIT = 4
SCI_SETCURSOR = 2386
SCI_GETCURSOR = 2387
SCI_SETCONTROLCHARSYMBOL = 2388
SCI_GETCONTROLCHARSYMBOL = 2389
SCI_WORDPARTLEFT = 2390
SCI_WORDPARTLEFTEXTEND = 2391
SCI_WORDPARTRIGHT = 2392
SCI_WORDPARTRIGHTEXTEND = 2393
VISIBLE_SLOP = 0x01
VISIBLE_STRICT = 0x04
SCI_SETVISIBLEPOLICY = 2394
SCI_DELLINELEFT = 2395
SCI_DELLINERIGHT = 2396
SCI_SETXOFFSET = 2397
SCI_GETXOFFSET = 2398
SCI_CHOOSECARETX = 2399
SCI_GRABFOCUS = 2400
CARET_SLOP = 0x01
CARET_STRICT = 0x04
CARET_JUMPS = 0x10
CARET_EVEN = 0x08
SCI_SETXCARETPOLICY = 2402
SCI_SETYCARETPOLICY = 2403
SCI_SETPRINTWRAPMODE = 2406
SCI_GETPRINTWRAPMODE = 2407
SCI_SETHOTSPOTACTIVEFORE = 2410
SCI_GETHOTSPOTACTIVEFORE = 2494
SCI_SETHOTSPOTACTIVEBACK = 2411
SCI_GETHOTSPOTACTIVEBACK = 2495
SCI_SETHOTSPOTACTIVEUNDERLINE = 2412
SCI_GETHOTSPOTACTIVEUNDERLINE = 2496
SCI_SETHOTSPOTSINGLELINE = 2421
SCI_GETHOTSPOTSINGLELINE = 2497
SCI_PARADOWN = 2413
SCI_PARADOWNEXTEND = 2414
SCI_PARAUP = 2415
SCI_PARAUPEXTEND = 2416
SCI_POSITIONBEFORE = 2417
SCI_POSITIONAFTER = 2418
SCI_COPYRANGE = 2419
SCI_COPYTEXT = 2420
SC_SEL_STREAM = 0
SC_SEL_RECTANGLE = 1
SC_SEL_LINES = 2
SCI_SETSELECTIONMODE = 2422
SCI_GETSELECTIONMODE = 2423
SCI_GETLINESELSTARTPOSITION = 2424
SCI_GETLINESELENDPOSITION = 2425
SCI_LINEDOWNRECTEXTEND = 2426
SCI_LINEUPRECTEXTEND = 2427
SCI_CHARLEFTRECTEXTEND = 2428
SCI_CHARRIGHTRECTEXTEND = 2429
SCI_HOMERECTEXTEND = 2430
SCI_VCHOMERECTEXTEND = 2431
SCI_LINEENDRECTEXTEND = 2432
SCI_PAGEUPRECTEXTEND = 2433
SCI_PAGEDOWNRECTEXTEND = 2434
SCI_STUTTEREDPAGEUP = 2435
SCI_STUTTEREDPAGEUPEXTEND = 2436
SCI_STUTTEREDPAGEDOWN = 2437
SCI_STUTTEREDPAGEDOWNEXTEND = 2438
SCI_WORDLEFTEND = 2439
SCI_WORDLEFTENDEXTEND = 2440
SCI_WORDRIGHTEND = 2441
SCI_WORDRIGHTENDEXTEND = 2442
SCI_SETWHITESPACECHARS = 2443
SCI_SETCHARSDEFAULT = 2444
SCI_AUTOCGETCURRENT = 2445
SCI_ALLOCATE = 2446
SCI_TARGETASUTF8 = 2447
SCI_SETLENGTHFORENCODE = 2448
SCI_ENCODEDFROMUTF8 = 2449
SCI_FINDCOLUMN = 2456
SCI_GETCARETSTICKY = 2457
SCI_SETCARETSTICKY = 2458
SCI_TOGGLECARETSTICKY = 2459
SCI_SETPASTECONVERTENDINGS = 2467
SCI_GETPASTECONVERTENDINGS = 2468
SCI_SELECTIONDUPLICATE = 2469
SC_ALPHA_TRANSPARENT = 0
SC_ALPHA_OPAQUE = 255
SC_ALPHA_NOALPHA = 256
SCI_SETCARETLINEBACKALPHA = 2470
SCI_GETCARETLINEBACKALPHA = 2471
CARETSTYLE_INVISIBLE = 0
CARETSTYLE_LINE = 1
CARETSTYLE_BLOCK = 2
SCI_SETCARETSTYLE = 2512
SCI_GETCARETSTYLE = 2513
SCI_SETINDICATORCURRENT = 2500
SCI_GETINDICATORCURRENT = 2501
SCI_SETINDICATORVALUE = 2502
SCI_GETINDICATORVALUE = 2503
SCI_INDICATORFILLRANGE = 2504
SCI_INDICATORCLEARRANGE = 2505
SCI_INDICATORALLONFOR = 2506
SCI_INDICATORVALUEAT = 2507
SCI_INDICATORSTART = 2508
SCI_INDICATOREND = 2509
SCI_SETPOSITIONCACHE = 2514
SCI_GETPOSITIONCACHE = 2515
SCI_COPYALLOWLINE = 2519
SCI_GETCHARACTERPOINTER = 2520
SCI_SETKEYSUNICODE = 2521
SCI_GETKEYSUNICODE = 2522
SCI_STARTRECORD = 3001
SCI_STOPRECORD = 3002
SCI_SETLEXER = 4001
SCI_GETLEXER = 4002
SCI_COLOURISE = 4003
SCI_SETPROPERTY = 4004
KEYWORDSET_MAX = 8
SCI_SETKEYWORDS = 4005
SCI_SETLEXERLANGUAGE = 4006
SCI_LOADLEXERLIBRARY = 4007
SCI_GETPROPERTY = 4008
SCI_GETPROPERTYEXPANDED = 4009
SCI_GETPROPERTYINT = 4010
SCI_GETSTYLEBITSNEEDED = 4011
SC_MOD_INSERTTEXT = 0x1
SC_MOD_DELETETEXT = 0x2
SC_MOD_CHANGESTYLE = 0x4
SC_MOD_CHANGEFOLD = 0x8
SC_PERFORMED_USER = 0x10
SC_PERFORMED_UNDO = 0x20
SC_PERFORMED_REDO = 0x40
SC_MULTISTEPUNDOREDO = 0x80
SC_LASTSTEPINUNDOREDO = 0x100
SC_MOD_CHANGEMARKER = 0x200
SC_MOD_BEFOREINSERT = 0x400
SC_MOD_BEFOREDELETE = 0x800
SC_MULTILINEUNDOREDO = 0x1000
SC_STARTACTION = 0x2000
SC_MOD_CHANGEINDICATOR = 0x4000
SC_MOD_CHANGELINESTATE = 0x8000
SC_MODEVENTMASKALL = 0xFFFF
SCEN_CHANGE = 768
SCEN_SETFOCUS = 512
SCEN_KILLFOCUS = 256
SCK_DOWN = 300
SCK_UP = 301
SCK_LEFT = 302
SCK_RIGHT = 303
SCK_HOME = 304
SCK_END = 305
SCK_PRIOR = 306
SCK_NEXT = 307
SCK_DELETE = 308
SCK_INSERT = 309
SCK_ESCAPE = 7
SCK_BACK = 8
SCK_TAB = 9
SCK_RETURN = 13
SCK_ADD = 310
SCK_SUBTRACT = 311
SCK_DIVIDE = 312
SCK_WIN = 313
SCK_RWIN = 314
SCK_MENU = 315
SCMOD_NORM = 0
SCMOD_SHIFT = 1
SCMOD_CTRL = 2
SCMOD_ALT = 4
SCN_STYLENEEDED = 2000
SCN_CHARADDED = 2001
SCN_SAVEPOINTREACHED = 2002
SCN_SAVEPOINTLEFT = 2003
SCN_MODIFYATTEMPTRO = 2004
SCN_KEY = 2005
SCN_DOUBLECLICK = 2006
SCN_UPDATEUI = 2007
SCN_MODIFIED = 2008
SCN_MACRORECORD = 2009
SCN_MARGINCLICK = 2010
SCN_NEEDSHOWN = 2011
SCN_PAINTED = 2013
SCN_USERLISTSELECTION = 2014
SCN_URIDROPPED = 2015
SCN_DWELLSTART = 2016
SCN_DWELLEND = 2017
SCN_ZOOM = 2018
SCN_HOTSPOTCLICK = 2019
SCN_HOTSPOTDOUBLECLICK = 2020
SCN_CALLTIPCLICK = 2021
SCN_AUTOCSELECTION = 2022
SCN_INDICATORCLICK = 2023
SCN_INDICATORRELEASE = 2024
SCN_AUTOCCANCELLED = 2025
SCI_SETCARETPOLICY = 2369
CARET_CENTER = 0x02
CARET_XEVEN = 0x08
CARET_XJUMPS = 0x10
SCN_POSCHANGED = 2012
SCN_CHECKBRACE = 2007
# Generated by h2py from Include\scilexer.h
SCLEX_CONTAINER = 0
SCLEX_NULL = 1
SCLEX_PYTHON = 2
SCLEX_CPP = 3
SCLEX_HTML = 4
SCLEX_XML = 5
SCLEX_PERL = 6
SCLEX_SQL = 7
SCLEX_VB = 8
SCLEX_PROPERTIES = 9
SCLEX_ERRORLIST = 10
SCLEX_MAKEFILE = 11
SCLEX_BATCH = 12
SCLEX_XCODE = 13
SCLEX_LATEX = 14
SCLEX_LUA = 15
SCLEX_DIFF = 16
SCLEX_CONF = 17
SCLEX_PASCAL = 18
SCLEX_AVE = 19
SCLEX_ADA = 20
SCLEX_LISP = 21
SCLEX_RUBY = 22
SCLEX_EIFFEL = 23
SCLEX_EIFFELKW = 24
SCLEX_TCL = 25
SCLEX_NNCRONTAB = 26
SCLEX_BULLANT = 27
SCLEX_VBSCRIPT = 28
SCLEX_BAAN = 31
SCLEX_MATLAB = 32
SCLEX_SCRIPTOL = 33
SCLEX_ASM = 34
SCLEX_CPPNOCASE = 35
SCLEX_FORTRAN = 36
SCLEX_F77 = 37
SCLEX_CSS = 38
SCLEX_POV = 39
SCLEX_LOUT = 40
SCLEX_ESCRIPT = 41
SCLEX_PS = 42
SCLEX_NSIS = 43
SCLEX_MMIXAL = 44
SCLEX_CLW = 45
SCLEX_CLWNOCASE = 46
SCLEX_LOT = 47
SCLEX_YAML = 48
SCLEX_TEX = 49
SCLEX_METAPOST = 50
SCLEX_POWERBASIC = 51
SCLEX_FORTH = 52
SCLEX_ERLANG = 53
SCLEX_OCTAVE = 54
SCLEX_MSSQL = 55
SCLEX_VERILOG = 56
SCLEX_KIX = 57
SCLEX_GUI4CLI = 58
SCLEX_SPECMAN = 59
SCLEX_AU3 = 60
SCLEX_APDL = 61
SCLEX_BASH = 62
SCLEX_ASN1 = 63
SCLEX_VHDL = 64
SCLEX_CAML = 65
SCLEX_BLITZBASIC = 66
SCLEX_PUREBASIC = 67
SCLEX_HASKELL = 68
SCLEX_PHPSCRIPT = 69
SCLEX_TADS3 = 70
SCLEX_REBOL = 71
SCLEX_SMALLTALK = 72
SCLEX_FLAGSHIP = 73
SCLEX_CSOUND = 74
SCLEX_FREEBASIC = 75
SCLEX_INNOSETUP = 76
SCLEX_OPAL = 77
SCLEX_SPICE = 78
SCLEX_D = 79
SCLEX_CMAKE = 80
SCLEX_GAP = 81
SCLEX_PLM = 82
SCLEX_PROGRESS = 83
SCLEX_ABAQUS = 84
SCLEX_ASYMPTOTE = 85
SCLEX_R = 86
SCLEX_MAGIK = 87
SCLEX_POWERSHELL = 88
SCLEX_MYSQL = 89
SCLEX_PO = 90
SCLEX_AUTOMATIC = 1000
SCE_P_DEFAULT = 0
SCE_P_COMMENTLINE = 1
SCE_P_NUMBER = 2
SCE_P_STRING = 3
SCE_P_CHARACTER = 4
SCE_P_WORD = 5
SCE_P_TRIPLE = 6
SCE_P_TRIPLEDOUBLE = 7
SCE_P_CLASSNAME = 8
SCE_P_DEFNAME = 9
SCE_P_OPERATOR = 10
SCE_P_IDENTIFIER = 11
SCE_P_COMMENTBLOCK = 12
SCE_P_STRINGEOL = 13
SCE_P_WORD2 = 14
SCE_P_DECORATOR = 15
SCE_C_DEFAULT = 0
SCE_C_COMMENT = 1
SCE_C_COMMENTLINE = 2
SCE_C_COMMENTDOC = 3
SCE_C_NUMBER = 4
SCE_C_WORD = 5
SCE_C_STRING = 6
SCE_C_CHARACTER = 7
SCE_C_UUID = 8
SCE_C_PREPROCESSOR = 9
SCE_C_OPERATOR = 10
SCE_C_IDENTIFIER = 11
SCE_C_STRINGEOL = 12
SCE_C_VERBATIM = 13
SCE_C_REGEX = 14
SCE_C_COMMENTLINEDOC = 15
SCE_C_WORD2 = 16
SCE_C_COMMENTDOCKEYWORD = 17
SCE_C_COMMENTDOCKEYWORDERROR = 18
SCE_C_GLOBALCLASS = 19
SCE_D_DEFAULT = 0
SCE_D_COMMENT = 1
SCE_D_COMMENTLINE = 2
SCE_D_COMMENTDOC = 3
SCE_D_COMMENTNESTED = 4
SCE_D_NUMBER = 5
SCE_D_WORD = 6
SCE_D_WORD2 = 7
SCE_D_WORD3 = 8
SCE_D_TYPEDEF = 9
SCE_D_STRING = 10
SCE_D_STRINGEOL = 11
SCE_D_CHARACTER = 12
SCE_D_OPERATOR = 13
SCE_D_IDENTIFIER = 14
SCE_D_COMMENTLINEDOC = 15
SCE_D_COMMENTDOCKEYWORD = 16
SCE_D_COMMENTDOCKEYWORDERROR = 17
SCE_TCL_DEFAULT = 0
SCE_TCL_COMMENT = 1
SCE_TCL_COMMENTLINE = 2
SCE_TCL_NUMBER = 3
SCE_TCL_WORD_IN_QUOTE = 4
SCE_TCL_IN_QUOTE = 5
SCE_TCL_OPERATOR = 6
SCE_TCL_IDENTIFIER = 7
SCE_TCL_SUBSTITUTION = 8
SCE_TCL_SUB_BRACE = 9
SCE_TCL_MODIFIER = 10
SCE_TCL_EXPAND = 11
SCE_TCL_WORD = 12
SCE_TCL_WORD2 = 13
SCE_TCL_WORD3 = 14
SCE_TCL_WORD4 = 15
SCE_TCL_WORD5 = 16
SCE_TCL_WORD6 = 17
SCE_TCL_WORD7 = 18
SCE_TCL_WORD8 = 19
SCE_TCL_COMMENT_BOX = 20
SCE_TCL_BLOCK_COMMENT = 21
SCE_H_DEFAULT = 0
SCE_H_TAG = 1
SCE_H_TAGUNKNOWN = 2
SCE_H_ATTRIBUTE = 3
SCE_H_ATTRIBUTEUNKNOWN = 4
SCE_H_NUMBER = 5
SCE_H_DOUBLESTRING = 6
SCE_H_SINGLESTRING = 7
SCE_H_OTHER = 8
SCE_H_COMMENT = 9
SCE_H_ENTITY = 10
SCE_H_TAGEND = 11
SCE_H_XMLSTART = 12
SCE_H_XMLEND = 13
SCE_H_SCRIPT = 14
SCE_H_ASP = 15
SCE_H_ASPAT = 16
SCE_H_CDATA = 17
SCE_H_QUESTION = 18
SCE_H_VALUE = 19
SCE_H_XCCOMMENT = 20
SCE_H_SGML_DEFAULT = 21
SCE_H_SGML_COMMAND = 22
SCE_H_SGML_1ST_PARAM = 23
SCE_H_SGML_DOUBLESTRING = 24
SCE_H_SGML_SIMPLESTRING = 25
SCE_H_SGML_ERROR = 26
SCE_H_SGML_SPECIAL = 27
SCE_H_SGML_ENTITY = 28
SCE_H_SGML_COMMENT = 29
SCE_H_SGML_1ST_PARAM_COMMENT = 30
SCE_H_SGML_BLOCK_DEFAULT = 31
SCE_HJ_START = 40
SCE_HJ_DEFAULT = 41
SCE_HJ_COMMENT = 42
SCE_HJ_COMMENTLINE = 43
SCE_HJ_COMMENTDOC = 44
SCE_HJ_NUMBER = 45
SCE_HJ_WORD = 46
SCE_HJ_KEYWORD = 47
SCE_HJ_DOUBLESTRING = 48
SCE_HJ_SINGLESTRING = 49
SCE_HJ_SYMBOLS = 50
SCE_HJ_STRINGEOL = 51
SCE_HJ_REGEX = 52
SCE_HJA_START = 55
SCE_HJA_DEFAULT = 56
SCE_HJA_COMMENT = 57
SCE_HJA_COMMENTLINE = 58
SCE_HJA_COMMENTDOC = 59
SCE_HJA_NUMBER = 60
SCE_HJA_WORD = 61
SCE_HJA_KEYWORD = 62
SCE_HJA_DOUBLESTRING = 63
SCE_HJA_SINGLESTRING = 64
SCE_HJA_SYMBOLS = 65
SCE_HJA_STRINGEOL = 66
SCE_HJA_REGEX = 67
SCE_HB_START = 70
SCE_HB_DEFAULT = 71
SCE_HB_COMMENTLINE = 72
SCE_HB_NUMBER = 73
SCE_HB_WORD = 74
SCE_HB_STRING = 75
SCE_HB_IDENTIFIER = 76
SCE_HB_STRINGEOL = 77
SCE_HBA_START = 80
SCE_HBA_DEFAULT = 81
SCE_HBA_COMMENTLINE = 82
SCE_HBA_NUMBER = 83
SCE_HBA_WORD = 84
SCE_HBA_STRING = 85
SCE_HBA_IDENTIFIER = 86
SCE_HBA_STRINGEOL = 87
SCE_HP_START = 90
SCE_HP_DEFAULT = 91
SCE_HP_COMMENTLINE = 92
SCE_HP_NUMBER = 93
SCE_HP_STRING = 94
SCE_HP_CHARACTER = 95
SCE_HP_WORD = 96
SCE_HP_TRIPLE = 97
SCE_HP_TRIPLEDOUBLE = 98
SCE_HP_CLASSNAME = 99
SCE_HP_DEFNAME = 100
SCE_HP_OPERATOR = 101
SCE_HP_IDENTIFIER = 102
SCE_HPHP_COMPLEX_VARIABLE = 104
SCE_HPA_START = 105
SCE_HPA_DEFAULT = 106
SCE_HPA_COMMENTLINE = 107
SCE_HPA_NUMBER = 108
SCE_HPA_STRING = 109
SCE_HPA_CHARACTER = 110
SCE_HPA_WORD = 111
SCE_HPA_TRIPLE = 112
SCE_HPA_TRIPLEDOUBLE = 113
SCE_HPA_CLASSNAME = 114
SCE_HPA_DEFNAME = 115
SCE_HPA_OPERATOR = 116
SCE_HPA_IDENTIFIER = 117
SCE_HPHP_DEFAULT = 118
SCE_HPHP_HSTRING = 119
SCE_HPHP_SIMPLESTRING = 120
SCE_HPHP_WORD = 121
SCE_HPHP_NUMBER = 122
SCE_HPHP_VARIABLE = 123
SCE_HPHP_COMMENT = 124
SCE_HPHP_COMMENTLINE = 125
SCE_HPHP_HSTRING_VARIABLE = 126
SCE_HPHP_OPERATOR = 127
SCE_PL_DEFAULT = 0
SCE_PL_ERROR = 1
SCE_PL_COMMENTLINE = 2
SCE_PL_POD = 3
SCE_PL_NUMBER = 4
SCE_PL_WORD = 5
SCE_PL_STRING = 6
SCE_PL_CHARACTER = 7
SCE_PL_PUNCTUATION = 8
SCE_PL_PREPROCESSOR = 9
SCE_PL_OPERATOR = 10
SCE_PL_IDENTIFIER = 11
SCE_PL_SCALAR = 12
SCE_PL_ARRAY = 13
SCE_PL_HASH = 14
SCE_PL_SYMBOLTABLE = 15
SCE_PL_VARIABLE_INDEXER = 16
SCE_PL_REGEX = 17
SCE_PL_REGSUBST = 18
SCE_PL_LONGQUOTE = 19
SCE_PL_BACKTICKS = 20
SCE_PL_DATASECTION = 21
SCE_PL_HERE_DELIM = 22
SCE_PL_HERE_Q = 23
SCE_PL_HERE_QQ = 24
SCE_PL_HERE_QX = 25
SCE_PL_STRING_Q = 26
SCE_PL_STRING_QQ = 27
SCE_PL_STRING_QX = 28
SCE_PL_STRING_QR = 29
SCE_PL_STRING_QW = 30
SCE_PL_POD_VERB = 31
SCE_PL_SUB_PROTOTYPE = 40
SCE_PL_FORMAT_IDENT = 41
SCE_PL_FORMAT = 42
SCE_RB_DEFAULT = 0
SCE_RB_ERROR = 1
SCE_RB_COMMENTLINE = 2
SCE_RB_POD = 3
SCE_RB_NUMBER = 4
SCE_RB_WORD = 5
SCE_RB_STRING = 6
SCE_RB_CHARACTER = 7
SCE_RB_CLASSNAME = 8
SCE_RB_DEFNAME = 9
SCE_RB_OPERATOR = 10
SCE_RB_IDENTIFIER = 11
SCE_RB_REGEX = 12
SCE_RB_GLOBAL = 13
SCE_RB_SYMBOL = 14
SCE_RB_MODULE_NAME = 15
SCE_RB_INSTANCE_VAR = 16
SCE_RB_CLASS_VAR = 17
SCE_RB_BACKTICKS = 18
SCE_RB_DATASECTION = 19
SCE_RB_HERE_DELIM = 20
SCE_RB_HERE_Q = 21
SCE_RB_HERE_QQ = 22
SCE_RB_HERE_QX = 23
SCE_RB_STRING_Q = 24
SCE_RB_STRING_QQ = 25
SCE_RB_STRING_QX = 26
SCE_RB_STRING_QR = 27
SCE_RB_STRING_QW = 28
SCE_RB_WORD_DEMOTED = 29
SCE_RB_STDIN = 30
SCE_RB_STDOUT = 31
SCE_RB_STDERR = 40
SCE_RB_UPPER_BOUND = 41
SCE_B_DEFAULT = 0
SCE_B_COMMENT = 1
SCE_B_NUMBER = 2
SCE_B_KEYWORD = 3
SCE_B_STRING = 4
SCE_B_PREPROCESSOR = 5
SCE_B_OPERATOR = 6
SCE_B_IDENTIFIER = 7
SCE_B_DATE = 8
SCE_B_STRINGEOL = 9
SCE_B_KEYWORD2 = 10
SCE_B_KEYWORD3 = 11
SCE_B_KEYWORD4 = 12
SCE_B_CONSTANT = 13
SCE_B_ASM = 14
SCE_B_LABEL = 15
SCE_B_ERROR = 16
SCE_B_HEXNUMBER = 17
SCE_B_BINNUMBER = 18
SCE_PROPS_DEFAULT = 0
SCE_PROPS_COMMENT = 1
SCE_PROPS_SECTION = 2
SCE_PROPS_ASSIGNMENT = 3
SCE_PROPS_DEFVAL = 4
SCE_PROPS_KEY = 5
SCE_L_DEFAULT = 0
SCE_L_COMMAND = 1
SCE_L_TAG = 2
SCE_L_MATH = 3
SCE_L_COMMENT = 4
SCE_LUA_DEFAULT = 0
SCE_LUA_COMMENT = 1
SCE_LUA_COMMENTLINE = 2
SCE_LUA_COMMENTDOC = 3
SCE_LUA_NUMBER = 4
SCE_LUA_WORD = 5
SCE_LUA_STRING = 6
SCE_LUA_CHARACTER = 7
SCE_LUA_LITERALSTRING = 8
SCE_LUA_PREPROCESSOR = 9
SCE_LUA_OPERATOR = 10
SCE_LUA_IDENTIFIER = 11
SCE_LUA_STRINGEOL = 12
SCE_LUA_WORD2 = 13
SCE_LUA_WORD3 = 14
SCE_LUA_WORD4 = 15
SCE_LUA_WORD5 = 16
SCE_LUA_WORD6 = 17
SCE_LUA_WORD7 = 18
SCE_LUA_WORD8 = 19
SCE_ERR_DEFAULT = 0
SCE_ERR_PYTHON = 1
SCE_ERR_GCC = 2
SCE_ERR_MS = 3
SCE_ERR_CMD = 4
SCE_ERR_BORLAND = 5
SCE_ERR_PERL = 6
SCE_ERR_NET = 7
SCE_ERR_LUA = 8
SCE_ERR_CTAG = 9
SCE_ERR_DIFF_CHANGED = 10
SCE_ERR_DIFF_ADDITION = 11
SCE_ERR_DIFF_DELETION = 12
SCE_ERR_DIFF_MESSAGE = 13
SCE_ERR_PHP = 14
SCE_ERR_ELF = 15
SCE_ERR_IFC = 16
SCE_ERR_IFORT = 17
SCE_ERR_ABSF = 18
SCE_ERR_TIDY = 19
SCE_ERR_JAVA_STACK = 20
SCE_ERR_VALUE = 21
SCE_BAT_DEFAULT = 0
SCE_BAT_COMMENT = 1
SCE_BAT_WORD = 2
SCE_BAT_LABEL = 3
SCE_BAT_HIDE = 4
SCE_BAT_COMMAND = 5
SCE_BAT_IDENTIFIER = 6
SCE_BAT_OPERATOR = 7
SCE_MAKE_DEFAULT = 0
SCE_MAKE_COMMENT = 1
SCE_MAKE_PREPROCESSOR = 2
SCE_MAKE_IDENTIFIER = 3
SCE_MAKE_OPERATOR = 4
SCE_MAKE_TARGET = 5
SCE_MAKE_IDEOL = 9
SCE_DIFF_DEFAULT = 0
SCE_DIFF_COMMENT = 1
SCE_DIFF_COMMAND = 2
SCE_DIFF_HEADER = 3
SCE_DIFF_POSITION = 4
SCE_DIFF_DELETED = 5
SCE_DIFF_ADDED = 6
SCE_DIFF_CHANGED = 7
SCE_CONF_DEFAULT = 0
SCE_CONF_COMMENT = 1
SCE_CONF_NUMBER = 2
SCE_CONF_IDENTIFIER = 3
SCE_CONF_EXTENSION = 4
SCE_CONF_PARAMETER = 5
SCE_CONF_STRING = 6
SCE_CONF_OPERATOR = 7
SCE_CONF_IP = 8
SCE_CONF_DIRECTIVE = 9
SCE_AVE_DEFAULT = 0
SCE_AVE_COMMENT = 1
SCE_AVE_NUMBER = 2
SCE_AVE_WORD = 3
SCE_AVE_STRING = 6
SCE_AVE_ENUM = 7
SCE_AVE_STRINGEOL = 8
SCE_AVE_IDENTIFIER = 9
SCE_AVE_OPERATOR = 10
SCE_AVE_WORD1 = 11
SCE_AVE_WORD2 = 12
SCE_AVE_WORD3 = 13
SCE_AVE_WORD4 = 14
SCE_AVE_WORD5 = 15
SCE_AVE_WORD6 = 16
SCE_ADA_DEFAULT = 0
SCE_ADA_WORD = 1
SCE_ADA_IDENTIFIER = 2
SCE_ADA_NUMBER = 3
SCE_ADA_DELIMITER = 4
SCE_ADA_CHARACTER = 5
SCE_ADA_CHARACTEREOL = 6
SCE_ADA_STRING = 7
SCE_ADA_STRINGEOL = 8
SCE_ADA_LABEL = 9
SCE_ADA_COMMENTLINE = 10
SCE_ADA_ILLEGAL = 11
SCE_BAAN_DEFAULT = 0
SCE_BAAN_COMMENT = 1
SCE_BAAN_COMMENTDOC = 2
SCE_BAAN_NUMBER = 3
SCE_BAAN_WORD = 4
SCE_BAAN_STRING = 5
SCE_BAAN_PREPROCESSOR = 6
SCE_BAAN_OPERATOR = 7
SCE_BAAN_IDENTIFIER = 8
SCE_BAAN_STRINGEOL = 9
SCE_BAAN_WORD2 = 10
SCE_LISP_DEFAULT = 0
SCE_LISP_COMMENT = 1
SCE_LISP_NUMBER = 2
SCE_LISP_KEYWORD = 3
SCE_LISP_KEYWORD_KW = 4
SCE_LISP_SYMBOL = 5
SCE_LISP_STRING = 6
SCE_LISP_STRINGEOL = 8
SCE_LISP_IDENTIFIER = 9
SCE_LISP_OPERATOR = 10
SCE_LISP_SPECIAL = 11
SCE_LISP_MULTI_COMMENT = 12
SCE_EIFFEL_DEFAULT = 0
SCE_EIFFEL_COMMENTLINE = 1
SCE_EIFFEL_NUMBER = 2
SCE_EIFFEL_WORD = 3
SCE_EIFFEL_STRING = 4
SCE_EIFFEL_CHARACTER = 5
SCE_EIFFEL_OPERATOR = 6
SCE_EIFFEL_IDENTIFIER = 7
SCE_EIFFEL_STRINGEOL = 8
SCE_NNCRONTAB_DEFAULT = 0
SCE_NNCRONTAB_COMMENT = 1
SCE_NNCRONTAB_TASK = 2
SCE_NNCRONTAB_SECTION = 3
SCE_NNCRONTAB_KEYWORD = 4
SCE_NNCRONTAB_MODIFIER = 5
SCE_NNCRONTAB_ASTERISK = 6
SCE_NNCRONTAB_NUMBER = 7
SCE_NNCRONTAB_STRING = 8
SCE_NNCRONTAB_ENVIRONMENT = 9
SCE_NNCRONTAB_IDENTIFIER = 10
SCE_FORTH_DEFAULT = 0
SCE_FORTH_COMMENT = 1
SCE_FORTH_COMMENT_ML = 2
SCE_FORTH_IDENTIFIER = 3
SCE_FORTH_CONTROL = 4
SCE_FORTH_KEYWORD = 5
SCE_FORTH_DEFWORD = 6
SCE_FORTH_PREWORD1 = 7
SCE_FORTH_PREWORD2 = 8
SCE_FORTH_NUMBER = 9
SCE_FORTH_STRING = 10
SCE_FORTH_LOCALE = 11
SCE_MATLAB_DEFAULT = 0
SCE_MATLAB_COMMENT = 1
SCE_MATLAB_COMMAND = 2
SCE_MATLAB_NUMBER = 3
SCE_MATLAB_KEYWORD = 4
SCE_MATLAB_STRING = 5
SCE_MATLAB_OPERATOR = 6
SCE_MATLAB_IDENTIFIER = 7
SCE_MATLAB_DOUBLEQUOTESTRING = 8
SCE_SCRIPTOL_DEFAULT = 0
SCE_SCRIPTOL_WHITE = 1
SCE_SCRIPTOL_COMMENTLINE = 2
SCE_SCRIPTOL_PERSISTENT = 3
SCE_SCRIPTOL_CSTYLE = 4
SCE_SCRIPTOL_COMMENTBLOCK = 5
SCE_SCRIPTOL_NUMBER = 6
SCE_SCRIPTOL_STRING = 7
SCE_SCRIPTOL_CHARACTER = 8
SCE_SCRIPTOL_STRINGEOL = 9
SCE_SCRIPTOL_KEYWORD = 10
SCE_SCRIPTOL_OPERATOR = 11
SCE_SCRIPTOL_IDENTIFIER = 12
SCE_SCRIPTOL_TRIPLE = 13
SCE_SCRIPTOL_CLASSNAME = 14
SCE_SCRIPTOL_PREPROCESSOR = 15
SCE_ASM_DEFAULT = 0
SCE_ASM_COMMENT = 1
SCE_ASM_NUMBER = 2
SCE_ASM_STRING = 3
SCE_ASM_OPERATOR = 4
SCE_ASM_IDENTIFIER = 5
SCE_ASM_CPUINSTRUCTION = 6
SCE_ASM_MATHINSTRUCTION = 7
SCE_ASM_REGISTER = 8
SCE_ASM_DIRECTIVE = 9
SCE_ASM_DIRECTIVEOPERAND = 10
SCE_ASM_COMMENTBLOCK = 11
SCE_ASM_CHARACTER = 12
SCE_ASM_STRINGEOL = 13
SCE_ASM_EXTINSTRUCTION = 14
SCE_F_DEFAULT = 0
SCE_F_COMMENT = 1
SCE_F_NUMBER = 2
SCE_F_STRING1 = 3
SCE_F_STRING2 = 4
SCE_F_STRINGEOL = 5
SCE_F_OPERATOR = 6
SCE_F_IDENTIFIER = 7
SCE_F_WORD = 8
SCE_F_WORD2 = 9
SCE_F_WORD3 = 10
SCE_F_PREPROCESSOR = 11
SCE_F_OPERATOR2 = 12
SCE_F_LABEL = 13
SCE_F_CONTINUATION = 14
SCE_CSS_DEFAULT = 0
SCE_CSS_TAG = 1
SCE_CSS_CLASS = 2
SCE_CSS_PSEUDOCLASS = 3
SCE_CSS_UNKNOWN_PSEUDOCLASS = 4
SCE_CSS_OPERATOR = 5
SCE_CSS_IDENTIFIER = 6
SCE_CSS_UNKNOWN_IDENTIFIER = 7
SCE_CSS_VALUE = 8
SCE_CSS_COMMENT = 9
SCE_CSS_ID = 10
SCE_CSS_IMPORTANT = 11
SCE_CSS_DIRECTIVE = 12
SCE_CSS_DOUBLESTRING = 13
SCE_CSS_SINGLESTRING = 14
SCE_CSS_IDENTIFIER2 = 15
SCE_CSS_ATTRIBUTE = 16
SCE_CSS_IDENTIFIER3 = 17
SCE_CSS_PSEUDOELEMENT = 18
SCE_CSS_EXTENDED_IDENTIFIER = 19
SCE_CSS_EXTENDED_PSEUDOCLASS = 20
SCE_CSS_EXTENDED_PSEUDOELEMENT = 21
SCE_POV_DEFAULT = 0
SCE_POV_COMMENT = 1
SCE_POV_COMMENTLINE = 2
SCE_POV_NUMBER = 3
SCE_POV_OPERATOR = 4
SCE_POV_IDENTIFIER = 5
SCE_POV_STRING = 6
SCE_POV_STRINGEOL = 7
SCE_POV_DIRECTIVE = 8
SCE_POV_BADDIRECTIVE = 9
SCE_POV_WORD2 = 10
SCE_POV_WORD3 = 11
SCE_POV_WORD4 = 12
SCE_POV_WORD5 = 13
SCE_POV_WORD6 = 14
SCE_POV_WORD7 = 15
SCE_POV_WORD8 = 16
SCE_LOUT_DEFAULT = 0
SCE_LOUT_COMMENT = 1
SCE_LOUT_NUMBER = 2
SCE_LOUT_WORD = 3
SCE_LOUT_WORD2 = 4
SCE_LOUT_WORD3 = 5
SCE_LOUT_WORD4 = 6
SCE_LOUT_STRING = 7
SCE_LOUT_OPERATOR = 8
SCE_LOUT_IDENTIFIER = 9
SCE_LOUT_STRINGEOL = 10
SCE_ESCRIPT_DEFAULT = 0
SCE_ESCRIPT_COMMENT = 1
SCE_ESCRIPT_COMMENTLINE = 2
SCE_ESCRIPT_COMMENTDOC = 3
SCE_ESCRIPT_NUMBER = 4
SCE_ESCRIPT_WORD = 5
SCE_ESCRIPT_STRING = 6
SCE_ESCRIPT_OPERATOR = 7
SCE_ESCRIPT_IDENTIFIER = 8
SCE_ESCRIPT_BRACE = 9
SCE_ESCRIPT_WORD2 = 10
SCE_ESCRIPT_WORD3 = 11
SCE_PS_DEFAULT = 0
SCE_PS_COMMENT = 1
SCE_PS_DSC_COMMENT = 2
SCE_PS_DSC_VALUE = 3
SCE_PS_NUMBER = 4
SCE_PS_NAME = 5
SCE_PS_KEYWORD = 6
SCE_PS_LITERAL = 7
SCE_PS_IMMEVAL = 8
SCE_PS_PAREN_ARRAY = 9
SCE_PS_PAREN_DICT = 10
SCE_PS_PAREN_PROC = 11
SCE_PS_TEXT = 12
SCE_PS_HEXSTRING = 13
SCE_PS_BASE85STRING = 14
SCE_PS_BADSTRINGCHAR = 15
SCE_NSIS_DEFAULT = 0
SCE_NSIS_COMMENT = 1
SCE_NSIS_STRINGDQ = 2
SCE_NSIS_STRINGLQ = 3
SCE_NSIS_STRINGRQ = 4
SCE_NSIS_FUNCTION = 5
SCE_NSIS_VARIABLE = 6
SCE_NSIS_LABEL = 7
SCE_NSIS_USERDEFINED = 8
SCE_NSIS_SECTIONDEF = 9
SCE_NSIS_SUBSECTIONDEF = 10
SCE_NSIS_IFDEFINEDEF = 11
SCE_NSIS_MACRODEF = 12
SCE_NSIS_STRINGVAR = 13
SCE_NSIS_NUMBER = 14
SCE_NSIS_SECTIONGROUP = 15
SCE_NSIS_PAGEEX = 16
SCE_NSIS_FUNCTIONDEF = 17
SCE_NSIS_COMMENTBOX = 18
SCE_MMIXAL_LEADWS = 0
SCE_MMIXAL_COMMENT = 1
SCE_MMIXAL_LABEL = 2
SCE_MMIXAL_OPCODE = 3
SCE_MMIXAL_OPCODE_PRE = 4
SCE_MMIXAL_OPCODE_VALID = 5
SCE_MMIXAL_OPCODE_UNKNOWN = 6
SCE_MMIXAL_OPCODE_POST = 7
SCE_MMIXAL_OPERANDS = 8
SCE_MMIXAL_NUMBER = 9
SCE_MMIXAL_REF = 10
SCE_MMIXAL_CHAR = 11
SCE_MMIXAL_STRING = 12
SCE_MMIXAL_REGISTER = 13
SCE_MMIXAL_HEX = 14
SCE_MMIXAL_OPERATOR = 15
SCE_MMIXAL_SYMBOL = 16
SCE_MMIXAL_INCLUDE = 17
SCE_CLW_DEFAULT = 0
SCE_CLW_LABEL = 1
SCE_CLW_COMMENT = 2
SCE_CLW_STRING = 3
SCE_CLW_USER_IDENTIFIER = 4
SCE_CLW_INTEGER_CONSTANT = 5
SCE_CLW_REAL_CONSTANT = 6
SCE_CLW_PICTURE_STRING = 7
SCE_CLW_KEYWORD = 8
SCE_CLW_COMPILER_DIRECTIVE = 9
SCE_CLW_RUNTIME_EXPRESSIONS = 10
SCE_CLW_BUILTIN_PROCEDURES_FUNCTION = 11
SCE_CLW_STRUCTURE_DATA_TYPE = 12
SCE_CLW_ATTRIBUTE = 13
SCE_CLW_STANDARD_EQUATE = 14
SCE_CLW_ERROR = 15
SCE_CLW_DEPRECATED = 16
SCE_LOT_DEFAULT = 0
SCE_LOT_HEADER = 1
SCE_LOT_BREAK = 2
SCE_LOT_SET = 3
SCE_LOT_PASS = 4
SCE_LOT_FAIL = 5
SCE_LOT_ABORT = 6
SCE_YAML_DEFAULT = 0
SCE_YAML_COMMENT = 1
SCE_YAML_IDENTIFIER = 2
SCE_YAML_KEYWORD = 3
SCE_YAML_NUMBER = 4
SCE_YAML_REFERENCE = 5
SCE_YAML_DOCUMENT = 6
SCE_YAML_TEXT = 7
SCE_YAML_ERROR = 8
SCE_YAML_OPERATOR = 9
SCE_TEX_DEFAULT = 0
SCE_TEX_SPECIAL = 1
SCE_TEX_GROUP = 2
SCE_TEX_SYMBOL = 3
SCE_TEX_COMMAND = 4
SCE_TEX_TEXT = 5
SCE_METAPOST_DEFAULT = 0
SCE_METAPOST_SPECIAL = 1
SCE_METAPOST_GROUP = 2
SCE_METAPOST_SYMBOL = 3
SCE_METAPOST_COMMAND = 4
SCE_METAPOST_TEXT = 5
SCE_METAPOST_EXTRA = 6
SCE_ERLANG_DEFAULT = 0
SCE_ERLANG_COMMENT = 1
SCE_ERLANG_VARIABLE = 2
SCE_ERLANG_NUMBER = 3
SCE_ERLANG_KEYWORD = 4
SCE_ERLANG_STRING = 5
SCE_ERLANG_OPERATOR = 6
SCE_ERLANG_ATOM = 7
SCE_ERLANG_FUNCTION_NAME = 8
SCE_ERLANG_CHARACTER = 9
SCE_ERLANG_MACRO = 10
SCE_ERLANG_RECORD = 11
SCE_ERLANG_SEPARATOR = 12
SCE_ERLANG_NODE_NAME = 13
SCE_ERLANG_UNKNOWN = 31
SCE_MSSQL_DEFAULT = 0
SCE_MSSQL_COMMENT = 1
SCE_MSSQL_LINE_COMMENT = 2
SCE_MSSQL_NUMBER = 3
SCE_MSSQL_STRING = 4
SCE_MSSQL_OPERATOR = 5
SCE_MSSQL_IDENTIFIER = 6
SCE_MSSQL_VARIABLE = 7
SCE_MSSQL_COLUMN_NAME = 8
SCE_MSSQL_STATEMENT = 9
SCE_MSSQL_DATATYPE = 10
SCE_MSSQL_SYSTABLE = 11
SCE_MSSQL_GLOBAL_VARIABLE = 12
SCE_MSSQL_FUNCTION = 13
SCE_MSSQL_STORED_PROCEDURE = 14
SCE_MSSQL_DEFAULT_PREF_DATATYPE = 15
SCE_MSSQL_COLUMN_NAME_2 = 16
SCE_V_DEFAULT = 0
SCE_V_COMMENT = 1
SCE_V_COMMENTLINE = 2
SCE_V_COMMENTLINEBANG = 3
SCE_V_NUMBER = 4
SCE_V_WORD = 5
SCE_V_STRING = 6
SCE_V_WORD2 = 7
SCE_V_WORD3 = 8
SCE_V_PREPROCESSOR = 9
SCE_V_OPERATOR = 10
SCE_V_IDENTIFIER = 11
SCE_V_STRINGEOL = 12
SCE_V_USER = 19
SCE_KIX_DEFAULT = 0
SCE_KIX_COMMENT = 1
SCE_KIX_STRING1 = 2
SCE_KIX_STRING2 = 3
SCE_KIX_NUMBER = 4
SCE_KIX_VAR = 5
SCE_KIX_MACRO = 6
SCE_KIX_KEYWORD = 7
SCE_KIX_FUNCTIONS = 8
SCE_KIX_OPERATOR = 9
SCE_KIX_IDENTIFIER = 31
SCE_GC_DEFAULT = 0
SCE_GC_COMMENTLINE = 1
SCE_GC_COMMENTBLOCK = 2
SCE_GC_GLOBAL = 3
SCE_GC_EVENT = 4
SCE_GC_ATTRIBUTE = 5
SCE_GC_CONTROL = 6
SCE_GC_COMMAND = 7
SCE_GC_STRING = 8
SCE_GC_OPERATOR = 9
SCE_SN_DEFAULT = 0
SCE_SN_CODE = 1
SCE_SN_COMMENTLINE = 2
SCE_SN_COMMENTLINEBANG = 3
SCE_SN_NUMBER = 4
SCE_SN_WORD = 5
SCE_SN_STRING = 6
SCE_SN_WORD2 = 7
SCE_SN_WORD3 = 8
SCE_SN_PREPROCESSOR = 9
SCE_SN_OPERATOR = 10
SCE_SN_IDENTIFIER = 11
SCE_SN_STRINGEOL = 12
SCE_SN_REGEXTAG = 13
SCE_SN_SIGNAL = 14
SCE_SN_USER = 19
SCE_AU3_DEFAULT = 0
SCE_AU3_COMMENT = 1
SCE_AU3_COMMENTBLOCK = 2
SCE_AU3_NUMBER = 3
SCE_AU3_FUNCTION = 4
SCE_AU3_KEYWORD = 5
SCE_AU3_MACRO = 6
SCE_AU3_STRING = 7
SCE_AU3_OPERATOR = 8
SCE_AU3_VARIABLE = 9
SCE_AU3_SENT = 10
SCE_AU3_PREPROCESSOR = 11
SCE_AU3_SPECIAL = 12
SCE_AU3_EXPAND = 13
SCE_AU3_COMOBJ = 14
SCE_AU3_UDF = 15
SCE_APDL_DEFAULT = 0
SCE_APDL_COMMENT = 1
SCE_APDL_COMMENTBLOCK = 2
SCE_APDL_NUMBER = 3
SCE_APDL_STRING = 4
SCE_APDL_OPERATOR = 5
SCE_APDL_WORD = 6
SCE_APDL_PROCESSOR = 7
SCE_APDL_COMMAND = 8
SCE_APDL_SLASHCOMMAND = 9
SCE_APDL_STARCOMMAND = 10
SCE_APDL_ARGUMENT = 11
SCE_APDL_FUNCTION = 12
SCE_SH_DEFAULT = 0
SCE_SH_ERROR = 1
SCE_SH_COMMENTLINE = 2
SCE_SH_NUMBER = 3
SCE_SH_WORD = 4
SCE_SH_STRING = 5
SCE_SH_CHARACTER = 6
SCE_SH_OPERATOR = 7
SCE_SH_IDENTIFIER = 8
SCE_SH_SCALAR = 9
SCE_SH_PARAM = 10
SCE_SH_BACKTICKS = 11
SCE_SH_HERE_DELIM = 12
SCE_SH_HERE_Q = 13
SCE_ASN1_DEFAULT = 0
SCE_ASN1_COMMENT = 1
SCE_ASN1_IDENTIFIER = 2
SCE_ASN1_STRING = 3
SCE_ASN1_OID = 4
SCE_ASN1_SCALAR = 5
SCE_ASN1_KEYWORD = 6
SCE_ASN1_ATTRIBUTE = 7
SCE_ASN1_DESCRIPTOR = 8
SCE_ASN1_TYPE = 9
SCE_ASN1_OPERATOR = 10
SCE_VHDL_DEFAULT = 0
SCE_VHDL_COMMENT = 1
SCE_VHDL_COMMENTLINEBANG = 2
SCE_VHDL_NUMBER = 3
SCE_VHDL_STRING = 4
SCE_VHDL_OPERATOR = 5
SCE_VHDL_IDENTIFIER = 6
SCE_VHDL_STRINGEOL = 7
SCE_VHDL_KEYWORD = 8
SCE_VHDL_STDOPERATOR = 9
SCE_VHDL_ATTRIBUTE = 10
SCE_VHDL_STDFUNCTION = 11
SCE_VHDL_STDPACKAGE = 12
SCE_VHDL_STDTYPE = 13
SCE_VHDL_USERWORD = 14
SCE_CAML_DEFAULT = 0
SCE_CAML_IDENTIFIER = 1
SCE_CAML_TAGNAME = 2
SCE_CAML_KEYWORD = 3
SCE_CAML_KEYWORD2 = 4
SCE_CAML_KEYWORD3 = 5
SCE_CAML_LINENUM = 6
SCE_CAML_OPERATOR = 7
SCE_CAML_NUMBER = 8
SCE_CAML_CHAR = 9
SCE_CAML_STRING = 11
SCE_CAML_COMMENT = 12
SCE_CAML_COMMENT1 = 13
SCE_CAML_COMMENT2 = 14
SCE_CAML_COMMENT3 = 15
SCE_HA_DEFAULT = 0
SCE_HA_IDENTIFIER = 1
SCE_HA_KEYWORD = 2
SCE_HA_NUMBER = 3
SCE_HA_STRING = 4
SCE_HA_CHARACTER = 5
SCE_HA_CLASS = 6
SCE_HA_MODULE = 7
SCE_HA_CAPITAL = 8
SCE_HA_DATA = 9
SCE_HA_IMPORT = 10
SCE_HA_OPERATOR = 11
SCE_HA_INSTANCE = 12
SCE_HA_COMMENTLINE = 13
SCE_HA_COMMENTBLOCK = 14
SCE_HA_COMMENTBLOCK2 = 15
SCE_HA_COMMENTBLOCK3 = 16
SCE_T3_DEFAULT = 0
SCE_T3_X_DEFAULT = 1
SCE_T3_PREPROCESSOR = 2
SCE_T3_BLOCK_COMMENT = 3
SCE_T3_LINE_COMMENT = 4
SCE_T3_OPERATOR = 5
SCE_T3_KEYWORD = 6
SCE_T3_NUMBER = 7
SCE_T3_IDENTIFIER = 8
SCE_T3_S_STRING = 9
SCE_T3_D_STRING = 10
SCE_T3_X_STRING = 11
SCE_T3_LIB_DIRECTIVE = 12
SCE_T3_MSG_PARAM = 13
SCE_T3_HTML_TAG = 14
SCE_T3_HTML_DEFAULT = 15
SCE_T3_HTML_STRING = 16
SCE_T3_USER1 = 17
SCE_T3_USER2 = 18
SCE_T3_USER3 = 19
SCE_T3_BRACE = 20
SCE_REBOL_DEFAULT = 0
SCE_REBOL_COMMENTLINE = 1
SCE_REBOL_COMMENTBLOCK = 2
SCE_REBOL_PREFACE = 3
SCE_REBOL_OPERATOR = 4
SCE_REBOL_CHARACTER = 5
SCE_REBOL_QUOTEDSTRING = 6
SCE_REBOL_BRACEDSTRING = 7
SCE_REBOL_NUMBER = 8
SCE_REBOL_PAIR = 9
SCE_REBOL_TUPLE = 10
SCE_REBOL_BINARY = 11
SCE_REBOL_MONEY = 12
SCE_REBOL_ISSUE = 13
SCE_REBOL_TAG = 14
SCE_REBOL_FILE = 15
SCE_REBOL_EMAIL = 16
SCE_REBOL_URL = 17
SCE_REBOL_DATE = 18
SCE_REBOL_TIME = 19
SCE_REBOL_IDENTIFIER = 20
SCE_REBOL_WORD = 21
SCE_REBOL_WORD2 = 22
SCE_REBOL_WORD3 = 23
SCE_REBOL_WORD4 = 24
SCE_REBOL_WORD5 = 25
SCE_REBOL_WORD6 = 26
SCE_REBOL_WORD7 = 27
SCE_REBOL_WORD8 = 28
SCE_SQL_DEFAULT = 0
SCE_SQL_COMMENT = 1
SCE_SQL_COMMENTLINE = 2
SCE_SQL_COMMENTDOC = 3
SCE_SQL_NUMBER = 4
SCE_SQL_WORD = 5
SCE_SQL_STRING = 6
SCE_SQL_CHARACTER = 7
SCE_SQL_SQLPLUS = 8
SCE_SQL_SQLPLUS_PROMPT = 9
SCE_SQL_OPERATOR = 10
SCE_SQL_IDENTIFIER = 11
SCE_SQL_SQLPLUS_COMMENT = 13
SCE_SQL_COMMENTLINEDOC = 15
SCE_SQL_WORD2 = 16
SCE_SQL_COMMENTDOCKEYWORD = 17
SCE_SQL_COMMENTDOCKEYWORDERROR = 18
SCE_SQL_USER1 = 19
SCE_SQL_USER2 = 20
SCE_SQL_USER3 = 21
SCE_SQL_USER4 = 22
SCE_SQL_QUOTEDIDENTIFIER = 23
SCE_ST_DEFAULT = 0
SCE_ST_STRING = 1
SCE_ST_NUMBER = 2
SCE_ST_COMMENT = 3
SCE_ST_SYMBOL = 4
SCE_ST_BINARY = 5
SCE_ST_BOOL = 6
SCE_ST_SELF = 7
SCE_ST_SUPER = 8
SCE_ST_NIL = 9
SCE_ST_GLOBAL = 10
SCE_ST_RETURN = 11
SCE_ST_SPECIAL = 12
SCE_ST_KWSEND = 13
SCE_ST_ASSIGN = 14
SCE_ST_CHARACTER = 15
SCE_ST_SPEC_SEL = 16
SCE_FS_DEFAULT = 0
SCE_FS_COMMENT = 1
SCE_FS_COMMENTLINE = 2
SCE_FS_COMMENTDOC = 3
SCE_FS_COMMENTLINEDOC = 4
SCE_FS_COMMENTDOCKEYWORD = 5
SCE_FS_COMMENTDOCKEYWORDERROR = 6
SCE_FS_KEYWORD = 7
SCE_FS_KEYWORD2 = 8
SCE_FS_KEYWORD3 = 9
SCE_FS_KEYWORD4 = 10
SCE_FS_NUMBER = 11
SCE_FS_STRING = 12
SCE_FS_PREPROCESSOR = 13
SCE_FS_OPERATOR = 14
SCE_FS_IDENTIFIER = 15
SCE_FS_DATE = 16
SCE_FS_STRINGEOL = 17
SCE_FS_CONSTANT = 18
SCE_FS_ASM = 19
SCE_FS_LABEL = 20
SCE_FS_ERROR = 21
SCE_FS_HEXNUMBER = 22
SCE_FS_BINNUMBER = 23
SCE_CSOUND_DEFAULT = 0
SCE_CSOUND_COMMENT = 1
SCE_CSOUND_NUMBER = 2
SCE_CSOUND_OPERATOR = 3
SCE_CSOUND_INSTR = 4
SCE_CSOUND_IDENTIFIER = 5
SCE_CSOUND_OPCODE = 6
SCE_CSOUND_HEADERSTMT = 7
SCE_CSOUND_USERKEYWORD = 8
SCE_CSOUND_COMMENTBLOCK = 9
SCE_CSOUND_PARAM = 10
SCE_CSOUND_ARATE_VAR = 11
SCE_CSOUND_KRATE_VAR = 12
SCE_CSOUND_IRATE_VAR = 13
SCE_CSOUND_GLOBAL_VAR = 14
SCE_CSOUND_STRINGEOL = 15
SCE_INNO_DEFAULT = 0
SCE_INNO_COMMENT = 1
SCE_INNO_KEYWORD = 2
SCE_INNO_PARAMETER = 3
SCE_INNO_SECTION = 4
SCE_INNO_PREPROC = 5
SCE_INNO_PREPROC_INLINE = 6
SCE_INNO_COMMENT_PASCAL = 7
SCE_INNO_KEYWORD_PASCAL = 8
SCE_INNO_KEYWORD_USER = 9
SCE_INNO_STRING_DOUBLE = 10
SCE_INNO_STRING_SINGLE = 11
SCE_INNO_IDENTIFIER = 12
SCE_OPAL_SPACE = 0
SCE_OPAL_COMMENT_BLOCK = 1
SCE_OPAL_COMMENT_LINE = 2
SCE_OPAL_INTEGER = 3
SCE_OPAL_KEYWORD = 4
SCE_OPAL_SORT = 5
SCE_OPAL_STRING = 6
SCE_OPAL_PAR = 7
SCE_OPAL_BOOL_CONST = 8
SCE_OPAL_DEFAULT = 32
SCE_SPICE_DEFAULT = 0
SCE_SPICE_IDENTIFIER = 1
SCE_SPICE_KEYWORD = 2
SCE_SPICE_KEYWORD2 = 3
SCE_SPICE_KEYWORD3 = 4
SCE_SPICE_NUMBER = 5
SCE_SPICE_DELIMITER = 6
SCE_SPICE_VALUE = 7
SCE_SPICE_COMMENTLINE = 8
SCE_CMAKE_DEFAULT = 0
SCE_CMAKE_COMMENT = 1
SCE_CMAKE_STRINGDQ = 2
SCE_CMAKE_STRINGLQ = 3
SCE_CMAKE_STRINGRQ = 4
SCE_CMAKE_COMMANDS = 5
SCE_CMAKE_PARAMETERS = 6
SCE_CMAKE_VARIABLE = 7
SCE_CMAKE_USERDEFINED = 8
SCE_CMAKE_WHILEDEF = 9
SCE_CMAKE_FOREACHDEF = 10
SCE_CMAKE_IFDEFINEDEF = 11
SCE_CMAKE_MACRODEF = 12
SCE_CMAKE_STRINGVAR = 13
SCE_CMAKE_NUMBER = 14
SCE_GAP_DEFAULT = 0
SCE_GAP_IDENTIFIER = 1
SCE_GAP_KEYWORD = 2
SCE_GAP_KEYWORD2 = 3
SCE_GAP_KEYWORD3 = 4
SCE_GAP_KEYWORD4 = 5
SCE_GAP_STRING = 6
SCE_GAP_CHAR = 7
SCE_GAP_OPERATOR = 8
SCE_GAP_COMMENT = 9
SCE_GAP_NUMBER = 10
SCE_GAP_STRINGEOL = 11
SCE_PLM_DEFAULT = 0
SCE_PLM_COMMENT = 1
SCE_PLM_STRING = 2
SCE_PLM_NUMBER = 3
SCE_PLM_IDENTIFIER = 4
SCE_PLM_OPERATOR = 5
SCE_PLM_CONTROL = 6
SCE_PLM_KEYWORD = 7
SCE_4GL_DEFAULT = 0
SCE_4GL_NUMBER = 1
SCE_4GL_WORD = 2
SCE_4GL_STRING = 3
SCE_4GL_CHARACTER = 4
SCE_4GL_PREPROCESSOR = 5
SCE_4GL_OPERATOR = 6
SCE_4GL_IDENTIFIER = 7
SCE_4GL_BLOCK = 8
SCE_4GL_END = 9
SCE_4GL_COMMENT1 = 10
SCE_4GL_COMMENT2 = 11
SCE_4GL_COMMENT3 = 12
SCE_4GL_COMMENT4 = 13
SCE_4GL_COMMENT5 = 14
SCE_4GL_COMMENT6 = 15
SCE_4GL_DEFAULT_ = 16
SCE_4GL_NUMBER_ = 17
SCE_4GL_WORD_ = 18
SCE_4GL_STRING_ = 19
SCE_4GL_CHARACTER_ = 20
SCE_4GL_PREPROCESSOR_ = 21
SCE_4GL_OPERATOR_ = 22
SCE_4GL_IDENTIFIER_ = 23
SCE_4GL_BLOCK_ = 24
SCE_4GL_END_ = 25
SCE_4GL_COMMENT1_ = 26
SCE_4GL_COMMENT2_ = 27
SCE_4GL_COMMENT3_ = 28
SCE_4GL_COMMENT4_ = 29
SCE_4GL_COMMENT5_ = 30
SCE_4GL_COMMENT6_ = 31
SCE_ABAQUS_DEFAULT = 0
SCE_ABAQUS_COMMENT = 1
SCE_ABAQUS_COMMENTBLOCK = 2
SCE_ABAQUS_NUMBER = 3
SCE_ABAQUS_STRING = 4
SCE_ABAQUS_OPERATOR = 5
SCE_ABAQUS_WORD = 6
SCE_ABAQUS_PROCESSOR = 7
SCE_ABAQUS_COMMAND = 8
SCE_ABAQUS_SLASHCOMMAND = 9
SCE_ABAQUS_STARCOMMAND = 10
SCE_ABAQUS_ARGUMENT = 11
SCE_ABAQUS_FUNCTION = 12
SCE_ASY_DEFAULT = 0
SCE_ASY_COMMENT = 1
SCE_ASY_COMMENTLINE = 2
SCE_ASY_NUMBER = 3
SCE_ASY_WORD = 4
SCE_ASY_STRING = 5
SCE_ASY_CHARACTER = 6
SCE_ASY_OPERATOR = 7
SCE_ASY_IDENTIFIER = 8
SCE_ASY_STRINGEOL = 9
SCE_ASY_COMMENTLINEDOC = 10
SCE_ASY_WORD2 = 11
SCE_R_DEFAULT = 0
SCE_R_COMMENT = 1
SCE_R_KWORD = 2
SCE_R_BASEKWORD = 3
SCE_R_OTHERKWORD = 4
SCE_R_NUMBER = 5
SCE_R_STRING = 6
SCE_R_STRING2 = 7
SCE_R_OPERATOR = 8
SCE_R_IDENTIFIER = 9
SCE_R_INFIX = 10
SCE_R_INFIXEOL = 11
SCE_MAGIK_DEFAULT = 0
SCE_MAGIK_COMMENT = 1
SCE_MAGIK_HYPER_COMMENT = 16
SCE_MAGIK_STRING = 2
SCE_MAGIK_CHARACTER = 3
SCE_MAGIK_NUMBER = 4
SCE_MAGIK_IDENTIFIER = 5
SCE_MAGIK_OPERATOR = 6
SCE_MAGIK_FLOW = 7
SCE_MAGIK_CONTAINER = 8
SCE_MAGIK_BRACKET_BLOCK = 9
SCE_MAGIK_BRACE_BLOCK = 10
SCE_MAGIK_SQBRACKET_BLOCK = 11
SCE_MAGIK_UNKNOWN_KEYWORD = 12
SCE_MAGIK_KEYWORD = 13
SCE_MAGIK_PRAGMA = 14
SCE_MAGIK_SYMBOL = 15
SCE_POWERSHELL_DEFAULT = 0
SCE_POWERSHELL_COMMENT = 1
SCE_POWERSHELL_STRING = 2
SCE_POWERSHELL_CHARACTER = 3
SCE_POWERSHELL_NUMBER = 4
SCE_POWERSHELL_VARIABLE = 5
SCE_POWERSHELL_OPERATOR = 6
SCE_POWERSHELL_IDENTIFIER = 7
SCE_POWERSHELL_KEYWORD = 8
SCE_POWERSHELL_CMDLET = 9
SCE_POWERSHELL_ALIAS = 10
SCE_MYSQL_DEFAULT = 0
SCE_MYSQL_COMMENT = 1
SCE_MYSQL_COMMENTLINE = 2
SCE_MYSQL_VARIABLE = 3
SCE_MYSQL_SYSTEMVARIABLE = 4
SCE_MYSQL_KNOWNSYSTEMVARIABLE = 5
SCE_MYSQL_NUMBER = 6
SCE_MYSQL_MAJORKEYWORD = 7
SCE_MYSQL_KEYWORD = 8
SCE_MYSQL_DATABASEOBJECT = 9
SCE_MYSQL_PROCEDUREKEYWORD = 10
SCE_MYSQL_STRING = 11
SCE_MYSQL_SQSTRING = 12
SCE_MYSQL_DQSTRING = 13
SCE_MYSQL_OPERATOR = 14
SCE_MYSQL_FUNCTION = 15
SCE_MYSQL_IDENTIFIER = 16
SCE_MYSQL_QUOTEDIDENTIFIER = 17
SCE_MYSQL_USER1 = 18
SCE_MYSQL_USER2 = 19
SCE_MYSQL_USER3 = 20
SCE_PO_DEFAULT = 0
SCE_PO_COMMENT = 1
SCE_PO_MSGID = 2
SCE_PO_MSGID_TEXT = 3
SCE_PO_MSGSTR = 4
SCE_PO_MSGSTR_TEXT = 5
SCE_PO_MSGCTXT = 6
SCE_PO_MSGCTXT_TEXT = 7
SCE_PO_FUZZY = 8
SCLEX_ASP = 29
SCLEX_PHP = 30
| gpl-3.0 |
StackStorm/st2 | contrib/chatops/actions/match_and_execute.py | 3 | 2668 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from st2common.runners.base_action import Action
from st2client.models.action_alias import ActionAliasMatch
from st2client.models.aliasexecution import ActionAliasExecution
from st2client.commands.action import (
LIVEACTION_STATUS_REQUESTED,
LIVEACTION_STATUS_SCHEDULED,
LIVEACTION_STATUS_RUNNING,
LIVEACTION_STATUS_CANCELING,
)
from st2client.client import Client
class ExecuteActionAliasAction(Action):
def __init__(self, config=None):
super(ExecuteActionAliasAction, self).__init__(config=config)
api_url = os.environ.get("ST2_ACTION_API_URL", None)
token = os.environ.get("ST2_ACTION_AUTH_TOKEN", None)
self.client = Client(api_url=api_url, token=token)
def run(self, text, source_channel=None, user=None):
alias_match = ActionAliasMatch()
alias_match.command = text
alias, representation = self.client.managers["ActionAlias"].match(alias_match)
execution = ActionAliasExecution()
execution.name = alias.name
execution.format = representation
execution.command = text
execution.source_channel = source_channel # ?
execution.notification_channel = None
execution.notification_route = None
execution.user = user
action_exec_mgr = self.client.managers["ActionAliasExecution"]
execution = action_exec_mgr.create(execution)
self._wait_execution_to_finish(execution.execution["id"])
return execution.execution["id"]
def _wait_execution_to_finish(self, execution_id):
pending_statuses = [
LIVEACTION_STATUS_REQUESTED,
LIVEACTION_STATUS_SCHEDULED,
LIVEACTION_STATUS_RUNNING,
LIVEACTION_STATUS_CANCELING,
]
action_exec_mgr = self.client.managers["LiveAction"]
execution = action_exec_mgr.get_by_id(execution_id)
while execution.status in pending_statuses:
time.sleep(1)
execution = action_exec_mgr.get_by_id(execution_id)
| apache-2.0 |
earshel/PokeyPyManager | POGOProtos/Networking/Responses/SetAvatarResponse_pb2.py | 16 | 4194 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/SetAvatarResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data import PlayerData_pb2 as POGOProtos_dot_Data_dot_PlayerData__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/SetAvatarResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n7POGOProtos/Networking/Responses/SetAvatarResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a POGOProtos/Data/PlayerData.proto\"\xd7\x01\n\x11SetAvatarResponse\x12I\n\x06status\x18\x01 \x01(\x0e\x32\x39.POGOProtos.Networking.Responses.SetAvatarResponse.Status\x12\x30\n\x0bplayer_data\x18\x02 \x01(\x0b\x32\x1b.POGOProtos.Data.PlayerData\"E\n\x06Status\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x16\n\x12\x41VATAR_ALREADY_SET\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_PlayerData__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SETAVATARRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='POGOProtos.Networking.Responses.SetAvatarResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVATAR_ALREADY_SET', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILURE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=273,
serialized_end=342,
)
_sym_db.RegisterEnumDescriptor(_SETAVATARRESPONSE_STATUS)
_SETAVATARRESPONSE = _descriptor.Descriptor(
name='SetAvatarResponse',
full_name='POGOProtos.Networking.Responses.SetAvatarResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='POGOProtos.Networking.Responses.SetAvatarResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_data', full_name='POGOProtos.Networking.Responses.SetAvatarResponse.player_data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SETAVATARRESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=127,
serialized_end=342,
)
_SETAVATARRESPONSE.fields_by_name['status'].enum_type = _SETAVATARRESPONSE_STATUS
_SETAVATARRESPONSE.fields_by_name['player_data'].message_type = POGOProtos_dot_Data_dot_PlayerData__pb2._PLAYERDATA
_SETAVATARRESPONSE_STATUS.containing_type = _SETAVATARRESPONSE
DESCRIPTOR.message_types_by_name['SetAvatarResponse'] = _SETAVATARRESPONSE
SetAvatarResponse = _reflection.GeneratedProtocolMessageType('SetAvatarResponse', (_message.Message,), dict(
DESCRIPTOR = _SETAVATARRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.SetAvatarResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.SetAvatarResponse)
))
_sym_db.RegisterMessage(SetAvatarResponse)
# @@protoc_insertion_point(module_scope)
| mit |
BT-jmichaud/l10n-switzerland | l10n_ch_payment_slip/tests/test_payment_slip.py | 1 | 9506 | # -*- coding: utf-8 -*-
# © 2014-2016 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import time
import re
import odoo.tests.common as test_common
from odoo.report import render_report
class TestPaymentSlip(test_common.TransactionCase):
_compile_get_ref = re.compile(r'[^0-9]')
def make_bank(self):
company = self.env.ref('base.main_company')
self.assertTrue(company)
partner = self.env.ref('base.main_partner')
self.assertTrue(partner)
bank = self.env['res.bank'].create(
{
'name': 'BCV',
'ccp': '01-1234-1',
'bic': '23452345',
'clearing': '234234',
}
)
bank_account = self.env['res.partner.bank'].create(
{
'partner_id': partner.id,
'bank_id': bank.id,
'bank_bic': bank.bic,
'acc_number': '01-1234-1',
'bvr_adherent_num': '1234567',
'print_bank': True,
'print_account': True,
'print_partner': True,
}
)
bank_account.onchange_acc_number_set_swiss_bank()
self.assertEqual(bank_account.ccp, '01-1234-1')
return bank_account
def make_invoice(self):
if not hasattr(self, 'bank_account'):
self.bank_account = self.make_bank()
account_model = self.env['account.account']
account_debtor = account_model.search([('code', '=', '1100')])
if not account_debtor:
account_debtor = account_model.create({
'code': 1100,
'name': 'Debitors',
'user_type_id':
self.env.ref('account.data_account_type_receivable').id,
'reconcile': True,
})
account_sale = account_model.search([('code', '=', '3200')])
if not account_sale:
account_sale = account_model.create({
'code': 3200,
'name': 'Goods sales',
'user_type_id':
self.env.ref('account.data_account_type_revenue').id,
'reconcile': False,
})
invoice = self.env['account.invoice'].create({
'partner_id': self.env.ref('base.res_partner_12').id,
'reference_type': 'none',
'name': 'A customer invoice',
'account_id': account_debtor.id,
'type': 'out_invoice',
'partner_bank_id': self.bank_account.id
})
self.env['account.invoice.line'].create({
'account_id': account_sale.id,
'product_id': False,
'quantity': 1,
'price_unit': 862.50,
'invoice_id': invoice.id,
'name': 'product that cost 862.50 all tax included',
})
invoice.action_invoice_open()
# waiting for the cache to refresh
attempt = 0
while not invoice.move_id:
invoice.refresh()
time.sleep(0.1)
attempt += 1
if attempt > 20:
break
return invoice
def test_invoice_confirmation(self):
"""Test that confirming an invoice generate slips correctly"""
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
for line in invoice.move_id.line_ids:
if line.account_id.user_type_id.type in ('payable', 'receivable'):
self.assertTrue(line.transaction_ref)
else:
self.assertFalse(line.transaction_ref)
for line in invoice.move_id.line_ids:
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
if line.account_id.user_type_id.type in ('payable', 'receivable'):
self.assertTrue(slip)
self.assertEqual(slip.amount_total, 862.50)
self.assertEqual(slip.invoice_id.id, invoice.id)
else:
self.assertFalse(slip)
def test_slip_validity(self):
"""Test that confirming slip are valid"""
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
for line in invoice.move_id.line_ids:
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
if line.account_id.user_type_id.type in ('payable', 'receivable'):
self.assertTrue(slip.reference)
self.assertTrue(slip.scan_line)
self.assertTrue(slip.slip_image)
self.assertTrue(slip.a4_pdf)
inv_num = line.invoice_id.number
line_ident = self._compile_get_ref.sub(
'', "%s%s" % (inv_num, line.id)
)
self.assertIn(line_ident, slip.reference.replace(' ', ''))
def test_print_report(self):
invoice = self.make_invoice()
data, format = render_report(
self.env.cr,
self.env.uid,
[invoice.id],
'l10n_ch_payment_slip.one_slip_per_page_from_invoice',
{},
context={'force_pdf': True},
)
self.assertTrue(data)
self.assertEqual(format, 'pdf')
def test_print_multi_report_merge_in_memory(self):
# default value as in memory
self.assertEqual(self.env.user.company_id.merge_mode, 'in_memory')
invoice1 = self.make_invoice()
invoice2 = self.make_invoice()
data, format = render_report(
self.env.cr,
self.env.uid,
[invoice1.id, invoice2.id],
'l10n_ch_payment_slip.one_slip_per_page_from_invoice',
{},
context={'force_pdf': True},
)
self.assertTrue(data)
self.assertEqual(format, 'pdf')
def test_print_multi_report_merge_on_disk(self):
self.env.user.company_id.merge_mode = 'on_disk'
invoice1 = self.make_invoice()
invoice2 = self.make_invoice()
data, format = render_report(
self.env.cr,
self.env.uid,
[invoice1.id, invoice2.id],
'l10n_ch_payment_slip.one_slip_per_page_from_invoice',
{},
context={'force_pdf': True},
)
self.assertTrue(data)
self.assertEqual(format, 'pdf')
def test_address_format(self):
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
line = invoice.move_id.line_ids[0]
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
com_partner = slip.get_comm_partner()
address_lines = slip._get_address_lines(com_partner)
self.assertEqual(
address_lines,
[u'93, Press Avenue', u'', u'73377 Le Bourget du Lac']
)
def test_address_format_no_country(self):
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
line = invoice.move_id.line_ids[0]
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
com_partner = slip.get_comm_partner()
com_partner.country_id = False
address_lines = slip._get_address_lines(com_partner)
self.assertEqual(
address_lines,
[u'93, Press Avenue', u'', u'73377 Le Bourget du Lac']
)
def test_address_format_special_format(self):
""" Test special formating without street2 """
ICP = self.env['ir.config_parameter']
ICP.set_param(
'bvr.address.format',
"%(street)s\n%(zip)s %(city)s"
)
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
line = invoice.move_id.line_ids[0]
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
com_partner = slip.get_comm_partner()
com_partner.country_id = False
address_lines = slip._get_address_lines(com_partner)
self.assertEqual(
address_lines,
[u'93, Press Avenue', u'73377 Le Bourget du Lac']
)
def test_address_length(self):
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
line = invoice.move_id.line_ids[0]
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
com_partner = slip.get_comm_partner()
address_lines = slip._get_address_lines(com_partner)
f_size = 11
len_tests = [
(15, (11, None)),
(23, (11, None)),
(26, (10, None)),
(27, (10, None)),
(30, (9, None)),
(32, (8, 34)),
(34, (8, 34)),
(40, (8, 34))]
for text_len, result in len_tests:
com_partner.name = 'x' * text_len
res = slip._get_address_font_size(
f_size, address_lines, com_partner)
self.assertEqual(res, result, "Wrong result for len %s" % text_len)
def test_print_bvr(self):
invoice = self.make_invoice()
bvr = invoice.print_bvr()
self.assertEqual(bvr['report_name'],
'l10n_ch_payment_slip.one_slip_per_page_from_invoice')
self.assertEqual(bvr['report_file'],
'l10n_ch_payment_slip.one_slip_per_page')
| agpl-3.0 |
ddietze/pyFSRS | available_modules/Devices/dummySensor.py | 2 | 5873 | """
.. module: dummySensor
:platform: Windows
.. moduleauthor:: Scott R. Ellis <skellis@berkeley.edu>
dummyDAQ provides a dummy input device for testing purposes.
You can use this file as a starting point when writing your own input device module for pyFSRS.
..
This file is part of the pyFSRS app.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014-2016 Daniel Dietze <daniel.dietze@berkeley.edu>.
"""
import time
import numpy as np
import core.FSRSModule as module
import core.Optutils as outils
def rosenbrock_val(coord,*params):
"""
evaluates rosenbrock's function in any dimension. the minimum is found at (x,y)=(a,a^2) traditionally a is set to 1 while b is set to 100.
Parameters
--------------
coord : np.array(dtype=float)
``an N dimensional numpy array to be evaluated by Rosenbrocks function.
params : float
``optional positional parameters of lenght 2N which are to be the coefficients of
Rosenbrock's value. default is a=1 b=100
Returns
-------
val : float
value of rosenbrock's function at the given coord.
Notes
--------
https://en.wikipedia.org/wiki/Rosenbrock_function
Examples
---------
>>> print rosenbrock_val([1,1,1])
[0]
"""
coord=np.array(coord).T
val =0
if len(params)==2*(len(coord)-1):
for i in range(0, len(coord)-1):
val+= (params[2*i]-coord[i])**2+params[2*i+1]*(coord[i+1]-coord[i]**2)**2
else:
for i in range(0, len(coord)-1):
val+= (1-coord[i])**2+100*(coord[i+1]-coord[i]**2)**2
return val
def parabola_val(coord,*params):
"""
evaluates a parabola's function in any dimension. the minimum is found at (x,y)=(1,1) unless alternative parameters are found
Parameters
--------------
coord : np.array(dtype=float)
``an N dimensional numpy array to be evaluated.
params : float
``optional positional parameters of lenght N which are to be the coefficients of
Minimum of the parabola default is (1,1)
Returns
-------
val : float
value of rosenbrock's function at the given coord.
Notes
--------
https://en.wikipedia.org/wiki/Test_functions_for_optimization
Examples
---------
>>> print rosenbrock_val([1,1,1])
[0]
"""
coord=np.array(coord).T
val =0
if len(params)==len(coord):
for i in range(0, len(coord)):
val+= (coord[i]-params[i])**2
else:
for i in range(0, len(coord)):
val+= (coord[i]-1)**2
return val
def ackley_val(coord):
"""
evaluates a ackley's function in 2 dimensions. the minimum is found at (x,y)=(1,1) unless alternative parameters are found
Parameters
--------------
coord : np.array(dtype=float)
``an N dimensional numpy array to be evaluated.
params : float
``optional positional parameters of lenght N which are to be the coefficients of
Minimum of the parabola default is (1,1)
Returns
-------
val : float
value of rosenbrock's function at the given coord.
Notes
--------
https://en.wikipedia.org/wiki/Test_functions_for_optimization
Examples
---------
>>> print rosenbrock_val([1,1,1])
[0]
"""
coord=np.array(coord).T
val = -20*np.exp(-0.2*(0.5*((coord[0]-1)**2+(coord[1]-1)**2))**0.5)-np.exp(0.5*(np.cos(2*3.1415259*(coord[0]-1))+np.cos(2*3.1415259*(coord[1]-1))))+2.71828+20
return val
def howMany():
return 1
class dummySensor(module.Input):
def __init__(self):
module.Input.__init__(self)
self.name = "Dummy Sensor"
prop = []
prop.append({"label": "Amplitude", "type": "input", "value": "0.0"})
prop.append({"label": "Offset", "type": "input", "value": "0.0"})
prop.append({"label": "Wait Time (s)", "type": "input", "value": "0"})
prop.append({"label": "Function", "type": "choice", "value": 0, "choices": ["Rosenbrock", "Parabola", "Ackley"], "event": None})
# convert dictionary to properties object
self.parsePropertiesDict(prop)
# this is the only additional function an input device has to have
# returns some value
def read(self,coord=[]):
# wait number of seconds
time.sleep(abs(float(self.getPropertyByLabel("wait").getValue())))
if len(coord)==0:
return (np.random.rand(1) - 0.5) * float(self.getPropertyByLabel("amplitude").getValue()) + float(self.getPropertyByLabel("offset").getValue())
elif self.getPropertyByLabel("Function").getValue()==0:
return rosenbrock_val(coord)+(np.random.rand(1) - 0.5) * float(self.getPropertyByLabel("amplitude").getValue()) + float(self.getPropertyByLabel("offset").getValue())
elif self.getPropertyByLabel("Function").getValue()==1:
return parabola_val(coord)+(np.random.rand(1) - 0.5) * float(self.getPropertyByLabel("amplitude").getValue()) + float(self.getPropertyByLabel("offset").getValue())
elif self.getPropertyByLabel("Function").getValue()==2:
return ackley_val(coord)+(np.random.rand(1) - 0.5) * float(self.getPropertyByLabel("amplitude").getValue()) + float(self.getPropertyByLabel("offset").getValue()) | gpl-3.0 |
chuan9/chromium-crosswalk | tools/telemetry/third_party/typ/typ/tests/main_test.py | 35 | 26317 | # Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import os
import sys
import textwrap
from typ import main
from typ import test_case
from typ import Host
from typ import VERSION
from typ.fakes import test_result_server_fake
is_python3 = bool(sys.version_info.major == 3)
if is_python3: # pragma: python3
# redefining built-in 'unicode' pylint: disable=W0622
unicode = str
d = textwrap.dedent
PASS_TEST_PY = """
import unittest
class PassingTest(unittest.TestCase):
def test_pass(self):
pass
"""
PASS_TEST_FILES = {'pass_test.py': PASS_TEST_PY}
FAIL_TEST_PY = """
import unittest
class FailingTest(unittest.TestCase):
def test_fail(self):
self.fail()
"""
FAIL_TEST_FILES = {'fail_test.py': FAIL_TEST_PY}
OUTPUT_TEST_PY = """
import sys
import unittest
class PassTest(unittest.TestCase):
def test_out(self):
sys.stdout.write("hello on stdout\\n")
sys.stdout.flush()
def test_err(self):
sys.stderr.write("hello on stderr\\n")
class FailTest(unittest.TestCase):
def test_out_err_fail(self):
sys.stdout.write("hello on stdout\\n")
sys.stdout.flush()
sys.stderr.write("hello on stderr\\n")
self.fail()
"""
OUTPUT_TEST_FILES = {'output_test.py': OUTPUT_TEST_PY}
SF_TEST_PY = """
import sys
import unittest
class SkipMethods(unittest.TestCase):
@unittest.skip('reason')
def test_reason(self):
self.fail()
@unittest.skipIf(True, 'reason')
def test_skip_if_true(self):
self.fail()
@unittest.skipIf(False, 'reason')
def test_skip_if_false(self):
self.fail()
class SkipSetup(unittest.TestCase):
def setUp(self):
self.skipTest('setup failed')
def test_notrun(self):
self.fail()
@unittest.skip('skip class')
class SkipClass(unittest.TestCase):
def test_method(self):
self.fail()
class SetupClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
sys.stdout.write('in setupClass\\n')
sys.stdout.flush()
assert False, 'setupClass failed'
def test_method1(self):
pass
def test_method2(self):
pass
class ExpectedFailures(unittest.TestCase):
@unittest.expectedFailure
def test_fail(self):
self.fail()
@unittest.expectedFailure
def test_pass(self):
pass
"""
SF_TEST_FILES = {'sf_test.py': SF_TEST_PY}
LOAD_TEST_PY = """
import unittest
def load_tests(_, _2, _3):
class BaseTest(unittest.TestCase):
pass
def method_fail(self):
self.fail()
def method_pass(self):
pass
setattr(BaseTest, "test_fail", method_fail)
setattr(BaseTest, "test_pass", method_pass)
suite = unittest.TestSuite()
suite.addTest(BaseTest("test_fail"))
suite.addTest(BaseTest("test_pass"))
return suite
"""
LOAD_TEST_FILES = {'load_test.py': LOAD_TEST_PY}
path_to_main = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'runner.py')
class TestCli(test_case.MainTestCase):
prog = [sys.executable, path_to_main]
files_to_ignore = ['*.pyc']
def test_bad_arg(self):
self.check(['--bad-arg'], ret=2, out='',
rerr='.*: error: unrecognized arguments: --bad-arg\n')
self.check(['-help'], ret=2, out='',
rerr=(".*: error: argument -h/--help: "
"ignored explicit argument 'elp'\n"))
def test_bad_metadata(self):
self.check(['--metadata', 'foo'], ret=2, err='',
out='Error: malformed --metadata "foo"\n')
def test_basic(self):
self.check([], files=PASS_TEST_FILES,
ret=0,
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test run, 0 failures.\n'), err='')
def test_coverage(self):
try:
import coverage # pylint: disable=W0612
files = {
'pass_test.py': PASS_TEST_PY,
'fail_test.py': FAIL_TEST_PY,
}
self.check(['-c', 'pass_test'], files=files, ret=0, err='',
out=d("""\
[1/1] pass_test.PassingTest.test_pass passed
1 test run, 0 failures.
Name Stmts Miss Cover
-------------------------------
fail_test 4 4 0%
pass_test 4 0 100%
-------------------------------
TOTAL 8 4 50%
"""))
except ImportError: # pragma: no cover
# We can never cover this line, since running coverage means
# that import will succeed.
self.check(['-c'], files=PASS_TEST_FILES, ret=1,
out='Error: coverage is not installed\n', err='')
def test_debugger(self):
if sys.version_info.major == 3: # pragma: python3
return
else: # pragma: python2
_, out, _, _ = self.check(['-d'], stdin='quit()\n',
files=PASS_TEST_FILES, ret=0, err='')
self.assertIn('(Pdb) ', out)
def test_dryrun(self):
self.check(['-n'], files=PASS_TEST_FILES, ret=0, err='',
out=d("""\
[1/1] pass_test.PassingTest.test_pass passed
1 test run, 0 failures.
"""))
def test_error(self):
files = {'err_test.py': d("""\
import unittest
class ErrTest(unittest.TestCase):
def test_err(self):
foo = bar
""")}
_, out, _, _ = self.check([''], files=files, ret=1, err='')
self.assertIn('[1/1] err_test.ErrTest.test_err failed unexpectedly',
out)
self.assertIn('1 test run, 1 failure', out)
def test_fail(self):
_, out, _, _ = self.check([], files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn('fail_test.FailingTest.test_fail failed unexpectedly',
out)
def test_fail_then_pass(self):
files = {'fail_then_pass_test.py': d("""\
import unittest
count = 0
class FPTest(unittest.TestCase):
def test_count(self):
global count
count += 1
if count == 1:
self.fail()
""")}
_, out, _, files = self.check(['--retry-limit', '3',
'--write-full-results-to',
'full_results.json'],
files=files, ret=0, err='')
self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
self.assertIn('1 test run, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
self.assertEqual(results['tests'][
'fail_then_pass_test']['FPTest']['test_count']['actual'],
'FAIL PASS')
def test_failures_are_not_elided(self):
_, out, _, _ = self.check(['--terminal-width=20'],
files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn('[1/1] fail_test.FailingTest.test_fail failed '
'unexpectedly:\n', out)
def test_file_list(self):
files = PASS_TEST_FILES
self.check(['-f', '-'], files=files, stdin='pass_test\n', ret=0)
self.check(['-f', '-'], files=files, stdin='pass_test.PassingTest\n',
ret=0)
self.check(['-f', '-'], files=files,
stdin='pass_test.PassingTest.test_pass\n',
ret=0)
files = {'pass_test.py': PASS_TEST_PY,
'test_list.txt': 'pass_test.PassingTest.test_pass\n'}
self.check(['-f', 'test_list.txt'], files=files, ret=0)
def test_find(self):
files = PASS_TEST_FILES
self.check(['-l'], files=files, ret=0,
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', 'pass_test'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', 'pass_test.py'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', './pass_test.py'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', '.'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', 'pass_test.PassingTest.test_pass'], files=files,
ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', '.'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
def test_find_from_subdirs(self):
files = {
'foo/__init__.py': '',
'foo/pass_test.py': PASS_TEST_PY,
'bar/__init__.py': '',
'bar/tmp': '',
}
self.check(['-l', '../foo/pass_test.py'], files=files, cwd='bar',
ret=0, err='',
out='foo.pass_test.PassingTest.test_pass\n')
self.check(['-l', 'foo'], files=files, cwd='bar',
ret=0, err='',
out='foo.pass_test.PassingTest.test_pass\n')
self.check(['-l', '--path', '../foo', 'pass_test'],
files=files, cwd='bar', ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
def test_help(self):
self.check(['--help'], ret=0, rout='.*', err='')
def test_import_failure_missing_file(self):
self.check(['-l', 'foo'], ret=1, err='',
rout='Failed to load "foo".*')
def test_import_failure_missing_package(self):
files = {'foo.py': d("""\
import unittest
import package_that_does_not_exist
class ImportFailureTest(unittest.TestCase):
def test_case(self):
pass
""")}
self.check(['-l', 'foo.py'], files=files, ret=1, err='',
rout=('Failed to load "foo.py": No module named '
'\'?package_that_does_not_exist\'?\n'))
def test_import_failure_no_tests(self):
files = {'foo.py': 'import unittest'}
self.check(['-l', 'foo.bar'], files=files, ret=1, err='',
rout='Failed to load "foo.bar":.*')
def test_import_failure_syntax_error(self):
files = {'syn_test.py': d("""\
import unittest
class SyntaxErrorTest(unittest.TestCase):
def test_syntax_error_in_test(self):
syntax error
""")}
_, out, _, _ = self.check([], files=files, ret=1, err='')
self.assertIn('Failed to import test module: syn_test', out)
self.assertIn((' syntax error\n'
' ^\n'
'SyntaxError: invalid syntax\n'), out)
def test_interrupt(self):
files = {'interrupt_test.py': d("""\
import unittest
class Foo(unittest.TestCase):
def test_interrupt(self):
raise KeyboardInterrupt()
""")}
self.check(['-j', '1'], files=files, ret=130, out='',
err='interrupted, exiting\n')
def test_isolate(self):
self.check(['--isolate', '*test_pass*'], files=PASS_TEST_FILES, ret=0,
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test run, 0 failures.\n'), err='')
def test_load_tests_failure(self):
files = {'foo_test.py': d("""\
import unittest
def load_tests(_, _2, _3):
raise ValueError('this should fail')
""")}
self.check([], files=files, ret=1, err='',
out=('foo_test.load_tests() failed: this should fail\n'))
def test_load_tests_single_worker(self):
files = LOAD_TEST_FILES
_, out, _, _ = self.check(['-j', '1', '-v'], files=files, ret=1,
err='')
self.assertIn('[1/2] load_test.BaseTest.test_fail failed', out)
self.assertIn('[2/2] load_test.BaseTest.test_pass passed', out)
self.assertIn('2 tests run, 1 failure.\n', out)
def test_load_tests_multiple_workers(self):
_, out, _, _ = self.check([], files=LOAD_TEST_FILES, ret=1, err='')
# The output for this test is nondeterministic since we may run
# two tests in parallel. So, we just test that some of the substrings
# we care about are present.
self.assertIn('test_pass passed', out)
self.assertIn('test_fail failed', out)
self.assertIn('2 tests run, 1 failure.\n', out)
def test_missing_builder_name(self):
self.check(['--test-results-server', 'localhost'], ret=2,
out=('Error: --builder-name must be specified '
'along with --test-result-server\n'
'Error: --master-name must be specified '
'along with --test-result-server\n'
'Error: --test-type must be specified '
'along with --test-result-server\n'), err='')
def test_ninja_status_env(self):
self.check(['-v', 'output_test.PassTest.test_out'],
files=OUTPUT_TEST_FILES, aenv={'NINJA_STATUS': 'ns: '},
out=d("""\
ns: output_test.PassTest.test_out passed
1 test run, 0 failures.
"""), err='')
def test_output_for_failures(self):
_, out, _, _ = self.check(['output_test.FailTest'],
files=OUTPUT_TEST_FILES,
ret=1, err='')
self.assertIn('[1/1] output_test.FailTest.test_out_err_fail '
'failed unexpectedly:\n'
' hello on stdout\n'
' hello on stderr\n', out)
def test_quiet(self):
self.check(['-q'], files=PASS_TEST_FILES, ret=0, err='', out='')
def test_retry_limit(self):
_, out, _, _ = self.check(['--retry-limit', '2'],
files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn('Retrying failed tests', out)
lines = out.splitlines()
self.assertEqual(len([l for l in lines
if 'test_fail failed unexpectedly:' in l]),
3)
def test_skip(self):
self.check(['--skip', '*test_fail*'], files=FAIL_TEST_FILES, ret=1,
out='No tests to run.\n', err='')
files = {'fail_test.py': FAIL_TEST_PY,
'pass_test.py': PASS_TEST_PY}
self.check(['-j', '1', '--skip', '*test_fail*'], files=files, ret=0,
out=('[1/2] fail_test.FailingTest.test_fail was skipped\n'
'[2/2] pass_test.PassingTest.test_pass passed\n'
'2 tests run, 0 failures.\n'), err='')
# This tests that we print test_started updates for skipped tests
# properly. It also tests how overwriting works.
_, out, _, _ = self.check(['-j', '1', '--overwrite', '--skip',
'*test_fail*'], files=files, ret=0,
err='', universal_newlines=False)
# We test this string separately and call out.strip() to
# avoid the trailing \r\n we get on windows, while keeping
# the \r's elsewhere in the string.
self.assertMultiLineEqual(
out.strip(),
('[0/2] fail_test.FailingTest.test_fail\r'
' \r'
'[1/2] fail_test.FailingTest.test_fail was skipped\r'
' \r'
'[1/2] pass_test.PassingTest.test_pass\r'
' \r'
'[2/2] pass_test.PassingTest.test_pass passed\r'
' \r'
'2 tests run, 0 failures.'))
def test_skips_and_failures(self):
_, out, _, _ = self.check(['-j', '1', '-v', '-v'], files=SF_TEST_FILES,
ret=1, err='')
# We do a bunch of assertIn()'s to work around the non-portable
# tracebacks.
self.assertIn(('[1/9] sf_test.ExpectedFailures.test_fail failed:\n'
' Traceback '), out)
self.assertIn(('[2/9] sf_test.ExpectedFailures.test_pass '
'passed unexpectedly'), out)
self.assertIn(('[3/9] sf_test.SetupClass.test_method1 '
'failed unexpectedly:\n'
' in setupClass\n'), out)
self.assertIn(('[4/9] sf_test.SetupClass.test_method2 '
'failed unexpectedly:\n'
' in setupClass\n'), out)
self.assertIn(('[5/9] sf_test.SkipClass.test_method was skipped:\n'
' skip class\n'), out)
self.assertIn(('[6/9] sf_test.SkipMethods.test_reason was skipped:\n'
' reason\n'), out)
self.assertIn(('[7/9] sf_test.SkipMethods.test_skip_if_false '
'failed unexpectedly:\n'
' Traceback'), out)
self.assertIn(('[8/9] sf_test.SkipMethods.test_skip_if_true '
'was skipped:\n'
' reason\n'
'[9/9] sf_test.SkipSetup.test_notrun was skipped:\n'
' setup failed\n'
'9 tests run, 4 failures.\n'), out)
def test_skip_and_all(self):
# --all should override --skip
self.check(['-l', '--skip', '*test_pass'],
files=PASS_TEST_FILES, ret=1, err='',
out='No tests to run.\n')
self.check(['-l', '--all', '--skip', '*test_pass'],
files=PASS_TEST_FILES, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
def test_skip_decorators_and_all(self):
_, out, _, _ = self.check(['--all', '-j', '1', '-v', '-v'],
files=SF_TEST_FILES, ret=1, err='')
self.assertIn('sf_test.SkipClass.test_method failed', out)
self.assertIn('sf_test.SkipMethods.test_reason failed', out)
self.assertIn('sf_test.SkipMethods.test_skip_if_true failed', out)
self.assertIn('sf_test.SkipMethods.test_skip_if_false failed', out)
# --all does not override explicit calls to skipTest(), only
# the decorators.
self.assertIn('sf_test.SkipSetup.test_notrun was skipped', out)
def test_subdir(self):
files = {
'foo/__init__.py': '',
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY
}
self.check(['foo/bar'], files=files, ret=0, err='',
out=d("""\
[1/1] foo.bar.pass_test.PassingTest.test_pass passed
1 test run, 0 failures.
"""))
def test_timing(self):
self.check(['-t'], files=PASS_TEST_FILES, ret=0, err='',
rout=('\[1/1\] pass_test.PassingTest.test_pass passed '
'\d+.\d+s\n'
'1 test run in \d+.\d+s, 0 failures.'))
def test_test_results_server(self):
server = test_result_server_fake.start()
self.assertNotEqual(server, None, 'could not start fake server')
try:
self.check(['--test-results-server',
'%s:%d' % server.server_address,
'--master-name', 'fake_master',
'--builder-name', 'fake_builder',
'--test-type', 'typ_tests',
'--metadata', 'foo=bar'],
files=PASS_TEST_FILES, ret=0, err='',
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test run, 0 failures.\n'))
finally:
posts = server.stop()
self.assertEqual(len(posts), 1)
payload = posts[0][2].decode('utf8')
self.assertIn('"test_pass": {"expected": "PASS", "actual": "PASS"}',
payload)
self.assertTrue(payload.endswith('--\r\n'))
self.assertNotEqual(server.log.getvalue(), '')
def test_test_results_server_error(self):
server = test_result_server_fake.start(code=500)
self.assertNotEqual(server, None, 'could not start fake server')
try:
self.check(['--test-results-server',
'%s:%d' % server.server_address,
'--master-name', 'fake_master',
'--builder-name', 'fake_builder',
'--test-type', 'typ_tests',
'--metadata', 'foo=bar'],
files=PASS_TEST_FILES, ret=1, err='',
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test run, 0 failures.\n'
'Uploading the JSON results raised '
'"HTTP Error 500: Internal Server Error"\n'))
finally:
_ = server.stop()
def test_test_results_server_not_running(self):
self.check(['--test-results-server', 'localhost:99999',
'--master-name', 'fake_master',
'--builder-name', 'fake_builder',
'--test-type', 'typ_tests',
'--metadata', 'foo=bar'],
files=PASS_TEST_FILES, ret=1, err='',
rout=('\[1/1\] pass_test.PassingTest.test_pass passed\n'
'1 test run, 0 failures.\n'
'Uploading the JSON results raised .*\n'))
def test_verbose(self):
self.check(['-vv', '-j', '1', 'output_test.PassTest'],
files=OUTPUT_TEST_FILES, ret=0,
out=d("""\
[1/2] output_test.PassTest.test_err passed:
hello on stderr
[2/2] output_test.PassTest.test_out passed:
hello on stdout
2 tests run, 0 failures.
"""), err='')
def test_version(self):
self.check('--version', ret=0, out=(VERSION + '\n'))
def test_write_full_results_to(self):
_, _, _, files = self.check(['--write-full-results-to',
'results.json'], files=PASS_TEST_FILES)
self.assertIn('results.json', files)
results = json.loads(files['results.json'])
self.assertEqual(results['interrupted'], False)
self.assertEqual(results['path_delimiter'], '.')
self.assertEqual(results['tests'],
{u'pass_test': {
u'PassingTest': {
u'test_pass': {
u'actual': u'PASS',
u'expected': u'PASS',
}
}
}})
def test_write_trace_to(self):
_, _, _, files = self.check(['--write-trace-to', 'trace.json'],
files=PASS_TEST_FILES)
self.assertIn('trace.json', files)
trace_obj = json.loads(files['trace.json'])
self.assertEqual(trace_obj['otherData'], {})
self.assertEqual(len(trace_obj['traceEvents']), 5)
event = trace_obj['traceEvents'][0]
self.assertEqual(event['name'], 'pass_test.PassingTest.test_pass')
self.assertEqual(event['ph'], 'X')
self.assertEqual(event['tid'], 1)
self.assertEqual(event['args']['expected'], ['Pass'])
self.assertEqual(event['args']['actual'], 'Pass')
class TestMain(TestCli):
prog = []
def make_host(self):
return Host()
def call(self, host, argv, stdin, env):
stdin = unicode(stdin)
host.stdin = io.StringIO(stdin)
if env:
host.getenv = env.get
host.capture_output()
orig_sys_path = sys.path[:]
orig_sys_modules = list(sys.modules.keys())
try:
ret = main(argv + ['-j', '1'], host)
finally:
out, err = host.restore_output()
modules_to_unload = []
for k in sys.modules:
if k not in orig_sys_modules:
modules_to_unload.append(k)
for k in modules_to_unload:
del sys.modules[k]
sys.path = orig_sys_path
return ret, out, err
def test_debugger(self):
# TODO: this test seems to hang under coverage.
pass
| bsd-3-clause |
asnir/airflow | airflow/utils/operator_helpers.py | 31 | 1556 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def context_to_airflow_vars(context):
"""
Given a context, this function provides a dictionary of values that can be used to
externally reconstruct relations between dags, dag_runs, tasks and task_instances.
:param context: The context for the task_instance of interest
:type context: dict
"""
params = {}
dag = context.get('dag')
if dag and dag.dag_id:
params['airflow.ctx.dag.dag_id'] = dag.dag_id
dag_run = context.get('dag_run')
if dag_run and dag_run.execution_date:
params['airflow.ctx.dag_run.execution_date'] = dag_run.execution_date.isoformat()
task = context.get('task')
if task and task.task_id:
params['airflow.ctx.task.task_id'] = task.task_id
task_instance = context.get('task_instance')
if task_instance and task_instance.execution_date:
params['airflow.ctx.task_instance.execution_date'] = (
task_instance.execution_date.isoformat()
)
return params
| apache-2.0 |
YangSongzhou/django | tests/admin_docs/models.py | 244 | 1941 | """
Models for testing various aspects of the djang.contrib.admindocs app
"""
from django.db import models
class Company(models.Model):
name = models.CharField(max_length=200)
class Group(models.Model):
name = models.CharField(max_length=200)
class Family(models.Model):
last_name = models.CharField(max_length=200)
class Person(models.Model):
"""
Stores information about a person, related to :model:`myapp.Company`.
**Notes**
Use ``save_changes()`` when saving this object.
``company``
Field storing :model:`myapp.Company` where the person works.
(DESCRIPTION)
.. raw:: html
:file: admin_docs/evilfile.txt
.. include:: admin_docs/evilfile.txt
"""
first_name = models.CharField(max_length=200, help_text="The person's first name")
last_name = models.CharField(max_length=200, help_text="The person's last name")
company = models.ForeignKey(Company, models.CASCADE, help_text="place of work")
family = models.ForeignKey(Family, models.SET_NULL, related_name='+', null=True)
groups = models.ManyToManyField(Group, help_text="has membership")
def _get_full_name(self):
return "%s %s" % (self.first_name, self.last_name)
def rename_company(self, new_name):
self.company.name = new_name
self.company.save()
return new_name
def dummy_function(self, baz, rox, *some_args, **some_kwargs):
return some_kwargs
def suffix_company_name(self, suffix='ltd'):
return self.company.name + suffix
def add_image(self):
pass
def delete_image(self):
pass
def save_changes(self):
pass
def set_status(self):
pass
def get_full_name(self):
"""
Get the full name of the person
"""
return self._get_full_name()
def get_status_count(self):
return 0
def get_groups_list(self):
return []
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.